diff options
Diffstat (limited to 'roles')
709 files changed, 30639 insertions, 6448 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index 12929b354..dc05b03b5 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -1,10 +1,16 @@ --- ansible_service_broker_remove: false +ansible_service_broker_install: false ansible_service_broker_log_level: info ansible_service_broker_output_request: false ansible_service_broker_recovery: true ansible_service_broker_bootstrap_on_startup: true -# Recommended you do not enable this for now ansible_service_broker_dev_broker: false +ansible_service_broker_refresh_interval: 600s +# Recommended you do not enable this for now ansible_service_broker_launch_apb_on_bind: false + +ansible_service_broker_image_pull_policy: IfNotPresent +ansible_service_broker_sandbox_role: edit +ansible_service_broker_auto_escalate: false diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index b3797ef96..89a84c4df 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -17,10 +17,13 @@ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}" ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}" + ansible_service_broker_registry_name: "{{ ansible_service_broker_registry_name | default(__ansible_service_broker_registry_name) }}" ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}" ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}" ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}" ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}" + ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}" + ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}" - name: set ansible-service-broker image facts using set prefix and tag set_fact: @@ -42,53 +45,128 @@ namespace: openshift-ansible-service-broker state: present -- name: Set SA cluster-role +- name: create ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: present + +- name: Create asb-auth cluster role + oc_clusterrole: + state: present + name: asb-auth + rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["create", "delete"] + - apiGroups: ["authorization.openshift.io"] + resources: ["subjectrulesreview"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + +- name: Create asb-access cluster role + oc_clusterrole: + state: present + name: asb-access + rules: + - nonResourceURLs: ["/ansible-service-broker", "/ansible-service-broker/*"] + verbs: ["get", "post", "put", "patch", "delete"] + +- name: Bind admin cluster-role to asb serviceaccount oc_adm_policy_user: state: present - namespace: "openshift-ansible-service-broker" resource_kind: cluster-role resource_name: admin user: "system:serviceaccount:openshift-ansible-service-broker:asb" -- name: create ansible-service-broker service - oc_service: - name: asb +- name: Bind auth cluster role to asb service account + oc_adm_policy_user: + state: present + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Bind asb-access role to asb-client service account + oc_adm_policy_user: + state: present + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" + +- name: create asb-client token secret + oc_obj: + name: asb-client namespace: openshift-ansible-service-broker state: present - labels: - app: openshift-ansible-service-broker - service: asb - ports: - - name: port-1338 - port: 1338 - selector: - app: openshift-ansible-service-broker - service: asb + kind: Secret + content: + path: /tmp/asbclientsecretout + data: + apiVersion: v1 + kind: Secret + metadata: + name: asb-client + namespace: openshift-ansible-service-broker + annotations: + kubernetes.io/service-account.name: asb-client + type: kubernetes.io/service-account-token -- name: create etcd service - oc_service: - name: etcd +- oc_secret: + state: list + namespace: openshift-ansible-service-broker + name: asb-client + register: asb_client_secret + +- set_fact: + service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}" + +# Using oc_obj because oc_service doesn't seem to allow annotations +# TODO: Extend oc_service to allow annotations +- name: create ansible-service-broker service + oc_obj: + name: asb namespace: openshift-ansible-service-broker state: present - ports: - - name: etcd-advertise - port: 2379 - selector: - app: openshift-ansible-service-broker - service: etcd + kind: Service + content: + path: /tmp/asbsvcout + data: + apiVersion: v1 + kind: Service + metadata: + name: asb + namespace: openshift-ansible-service-broker + labels: + app: openshift-ansible-service-broker + service: asb + annotations: + service.alpha.openshift.io/serving-cert-secret-name: asb-tls + spec: + ports: + - name: port-1338 + port: 1338 + targetPort: 1338 + protocol: TCP + selector: + app: openshift-ansible-service-broker + service: asb - name: create route for ansible-service-broker service oc_route: name: asb-1338 namespace: openshift-ansible-service-broker state: present + labels: + app: openshift-ansible-service-broker + service: asb service_name: asb port: 1338 - register: asb_route_out - -- name: get ansible-service-broker route name - set_fact: - ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}" + tls_termination: Reencrypt - name: create persistent volume claim for etcd oc_obj: @@ -97,7 +175,7 @@ state: present kind: PersistentVolumeClaim content: - path: /tmp/dcout + path: /tmp/pvcout data: apiVersion: v1 kind: PersistentVolumeClaim @@ -111,50 +189,75 @@ requests: storage: 1Gi -- name: create etcd deployment +- name: Create Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker state: present - kind: Deployment + kind: DeploymentConfig content: path: /tmp/dcout data: - apiVersion: extensions/v1beta1 - kind: Deployment + apiVersion: v1 + kind: DeploymentConfig metadata: - name: etcd - namespace: openshift-ansible-service-broker + name: asb labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: + replicas: 1 selector: - matchLabels: - app: openshift-ansible-service-broker - service: etcd + app: openshift-ansible-service-broker strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - replicas: 1 + type: Rolling template: metadata: labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: - restartPolicy: Always + serviceAccount: asb containers: + - image: "{{ ansible_service_broker_image }}" + name: asb + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /etc/ansible-service-broker + - name: asb-tls + mountPath: /etc/tls/private + ports: + - containerPort: 1338 + protocol: TCP + env: + - name: BROKER_CONFIG + value: /etc/ansible-service-broker/config.yaml + resources: {} + terminationMessagePath: /tmp/termination-log + readinessProbe: + httpGet: + port: 1338 + path: /healthz + scheme: HTTPS + initialDelaySeconds: 15 + timeoutSeconds: 1 + livenessProbe: + httpGet: + port: 1338 + path: /healthz + scheme: HTTPS + initialDelaySeconds: 15 + timeoutSeconds: 1 + - image: "{{ ansible_service_broker_etcd_image }}" name: etcd imagePullPolicy: IfNotPresent terminationMessagePath: /tmp/termination-log workingDir: /etcd args: - - '{{ ansible_service_broker_etcd_image_etcd_path }}' - - --data-dir=/data + - "{{ ansible_service_broker_etcd_image_etcd_path }}" + - "--data-dir=/data" - "--listen-client-urls=http://0.0.0.0:2379" - "--advertise-client-urls=http://0.0.0.0:2379" ports: @@ -170,57 +273,15 @@ - name: etcd persistentVolumeClaim: claimName: etcd - -- name: create ansible-service-broker deployment - oc_obj: - name: asb - namespace: openshift-ansible-service-broker - state: present - kind: Deployment - content: - path: /tmp/dcout - data: - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: asb - namespace: openshift-ansible-service-broker - labels: - app: openshift-ansible-service-broker - service: asb - spec: - strategy: - type: Recreate - replicas: 1 - template: - metadata: - labels: - app: openshift-ansible-service-broker - service: asb - spec: - serviceAccount: asb - restartPolicy: Always - containers: - - image: "{{ ansible_service_broker_image }}" - name: asb - imagePullPolicy: IfNotPresent - volumeMounts: - - name: config-volume - mountPath: /etc/ansible-service-broker - ports: - - containerPort: 1338 - protocol: TCP - env: - - name: BROKER_CONFIG - value: /etc/ansible-service-broker/config.yaml - terminationMessagePath: /tmp/termination-log - volumes: - name: config-volume configMap: name: broker-config items: - key: broker-config path: config.yaml + - name: asb-tls + secret: + secretName: asb-tls # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: @@ -239,42 +300,65 @@ name: broker-config namespace: openshift-ansible-service-broker labels: - app: ansible-service-broker + app: openshift-ansible-service-broker data: broker-config: | registry: - name: "{{ ansible_service_broker_registry_type }}" - url: "{{ ansible_service_broker_registry_url }}" - user: "{{ ansible_service_broker_registry_user }}" - pass: "{{ ansible_service_broker_registry_password }}" - org: "{{ ansible_service_broker_registry_organization }}" + - type: {{ ansible_service_broker_registry_type }} + name: {{ ansible_service_broker_registry_name }} + url: {{ ansible_service_broker_registry_url }} + user: {{ ansible_service_broker_registry_user }} + pass: {{ ansible_service_broker_registry_password }} + org: {{ ansible_service_broker_registry_organization }} + tag: {{ ansible_service_broker_registry_tag }} + white_list: {{ ansible_service_broker_registry_whitelist }} dao: - etcd_host: etcd + etcd_host: 0.0.0.0 etcd_port: 2379 log: logfile: /var/log/ansible-service-broker/asb.log stdout: true - level: "{{ ansible_service_broker_log_level }}" + level: {{ ansible_service_broker_log_level }} color: true - openshift: {} + openshift: + host: "" + ca_file: "" + bearer_token_file: "" + sandbox_role: {{ ansible_service_broker_sandbox_role }} + image_pull_policy: {{ ansible_service_broker_image_pull_policy }} broker: dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }} + bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + refresh_interval: {{ ansible_service_broker_refresh_interval }} launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }} - recovery: {{ ansible_service_broker_recovery | bool | lower }} output_request: {{ ansible_service_broker_output_request | bool | lower }} - bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + recovery: {{ ansible_service_broker_recovery | bool | lower }} + ssl_cert_key: /etc/tls/private/tls.key + ssl_cert: /etc/tls/private/tls.crt + auto_escalate: {{ ansible_service_broker_auto_escalate }} + auth: + - type: basic + enabled: false + - name: Create the Broker resource in the catalog oc_obj: name: ansible-service-broker state: present - kind: Broker + kind: ClusterServiceBroker content: path: /tmp/brokerout data: - apiVersion: servicecatalog.k8s.io/v1alpha1 - kind: Broker + apiVersion: servicecatalog.k8s.io/v1beta1 + kind: ClusterServiceBroker metadata: name: ansible-service-broker spec: - url: http://asb.openshift-ansible-service-broker.svc:1338 + url: https://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker + authInfo: + bearer: + secretRef: + name: asb-client + namespace: openshift-ansible-service-broker + kind: Secret + caBundle: "{{ service_ca_crt }}" diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index b46ce8233..d8695bd3a 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: not ansible_service_broker_remove|default(false) | bool + when: ansible_service_broker_install | default(false) | bool - include: remove.yml - when: ansible_service_broker_remove|default(false) | bool + when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml index 2519f9f4c..51b86fb26 100644 --- a/roles/ansible_service_broker/tasks/remove.yml +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -1,16 +1,57 @@ --- -- name: remove openshift-ansible-service-broker project - oc_project: - name: openshift-ansible-service-broker - state: absent - - name: remove ansible-service-broker serviceaccount oc_serviceaccount: name: asb namespace: openshift-ansible-service-broker state: absent +- name: remove ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: absent + +- name: remove asb-auth cluster role + oc_clusterrole: + state: absent + name: asb-auth + +- name: remove asb-access cluster role + oc_clusterrole: + state: absent + name: asb-access + +- name: Unbind admin cluster-role to asb serviceaccount + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: admin + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind auth cluster role to asb service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind asb-access role to asb-client service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" + +- name: remove asb-client token secret + oc_secret: + state: absent + name: asb-client + namespace: openshift-ansible-service-broker + - name: remove ansible-service-broker service oc_service: name: asb @@ -35,19 +76,19 @@ namespace: openshift-ansible-service-broker state: absent -- name: remove etcd deployment +- name: remove Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker + kind: DeploymentConfig state: absent - kind: Deployment -- name: remove ansible-service-broker deployment +- name: remove secret for broker auth oc_obj: - name: asb + name: asb-client namespace: openshift-ansible-service-broker + kind: Secret state: absent - kind: Deployment # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: remove config map for ansible-service-broker @@ -58,8 +99,19 @@ kind: ConfigMap # TODO: Is this going to work? +- shell: > + oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + register: get_apiservices + changed_when: no + - name: remove broker object from the catalog oc_obj: name: ansible-service-broker state: absent - kind: Broker + kind: ClusterServiceBroker + when: not "'not found' in get_apiservices.stdout" + +- name: remove openshift-ansible-service-broker project + oc_project: + name: openshift-ansible-service-broker + state: absent diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 15e448515..8438e993f 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -8,7 +8,10 @@ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd __ansible_service_broker_registry_type: dockerhub +__ansible_service_broker_registry_name: dh __ansible_service_broker_registry_url: null __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null __ansible_service_broker_registry_organization: null +__ansible_service_broker_registry_tag: latest +__ansible_service_broker_registry_whitelist: [] diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index ce2ae8365..fc58b4fd8 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -1,14 +1,19 @@ --- __ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- -__ansible_service_broker_image_tag: v3.6 +__ansible_service_broker_image_tag: v3.7 __ansible_service_broker_etcd_image_prefix: rhel7/ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /bin/etcd + __ansible_service_broker_registry_type: rhcc +__ansible_service_broker_registry_name: rh __ansible_service_broker_registry_url: "https://registry.access.redhat.com" __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null __ansible_service_broker_registry_organization: null +__ansible_service_broker_registry_tag: v3.7 +__ansible_service_broker_registry_whitelist: + - '.*-apb$' diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml index b1907f8cb..be73e8a73 100644 --- a/roles/calico/defaults/main.yaml +++ b/roles/calico/defaults/main.yaml @@ -11,4 +11,4 @@ calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/ calico_ipv4pool_ipip: "always" calico_log_dir: "/var/log/calico" -calico_node_image: "calico/node:v2.4.1" +calico_node_image: "calico/node:v2.5.0" diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index 39f730462..0e3863304 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -2,10 +2,14 @@ - name: Calico Node | Error if invalid cert arguments fail: msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints" - when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) + when: + - calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined + - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined) - name: Calico Node | Generate OpenShift-etcd certs - include: ../../../roles/etcd_client_certificates/tasks/main.yml + include_role: + name: etcd + tasks_from: client_certificates when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined vars: etcd_cert_prefix: calico.etcd- @@ -28,18 +32,18 @@ msg: "Invalid etcd configuration for calico." when: item is not defined or item == '' with_items: - - calico_etcd_ca_cert_file - - calico_etcd_cert_file - - calico_etcd_key_file - - calico_etcd_endpoints + - calico_etcd_ca_cert_file + - calico_etcd_cert_file + - calico_etcd_key_file + - calico_etcd_endpoints - name: Calico Node | Assure the calico certs are present stat: path: "{{ item }}" with_items: - - "{{ calico_etcd_ca_cert_file }}" - - "{{ calico_etcd_cert_file }}" - - "{{ calico_etcd_key_file }}" + - "{{ calico_etcd_ca_cert_file }}" + - "{{ calico_etcd_cert_file }}" + - "{{ calico_etcd_key_file }}" - name: Calico Node | Configure Calico service unit file template: diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml index d40286aba..01a2b9529 100644 --- a/roles/calico_master/defaults/main.yaml +++ b/roles/calico_master/defaults/main.yaml @@ -3,5 +3,5 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf calicoctl_bin_dir: "/usr/local/bin/" -calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.4.0/calicoctl" +calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.5.0/calicoctl" calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.7.0" diff --git a/roles/cockpit-ui/defaults/main.yml b/roles/cockpit-ui/defaults/main.yml new file mode 100644 index 000000000..b1696f1b8 --- /dev/null +++ b/roles/cockpit-ui/defaults/main.yml @@ -0,0 +1,3 @@ +--- +openshift_config_base: "/etc/origin" +openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master" diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index 0114498f8..244e2cc41 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -50,7 +50,9 @@ -n default register: deploy_registry_console changed_when: "'already exists' not in deploy_registry_console.stderr" - failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0" + failed_when: + - "'already exists' not in deploy_registry_console.stderr" + - "deploy_registry_console.rc != 0" - name: Delete temp directory file: diff --git a/roles/dns/README.md b/roles/dns/README.md deleted file mode 100644 index 9a88ce97c..000000000 --- a/roles/dns/README.md +++ /dev/null @@ -1,45 +0,0 @@ -dns -=== - -Configure a DNS server serving IPs of all the nodes of the cluster - -Requirements ------------- - -Ansible 2.2 - -Role Variables --------------- - -| Name | Mandatory / Optional | Description | -|------|----------------------|-------------| -| `dns_zones` | Mandatory | DNS zones in which we must find the hosts | -| `dns_forwarders` | If not set, the DNS will be a recursive non-forwarding DNS server | DNS forwarders to delegate the requests for hosts outside of `dns_zones` | -| `dns_all_hosts` | Mandatory | Exhaustive list of hosts | -| `base_docker_image` | Optional | Base docker image to build Bind image from, used only in containerized deployments | - -Dependencies ------------- - -None - -Example Playbook ----------------- - - - hosts: dns_hosts - roles: - - role: dns - dns_forwarders: [ '8.8.8.8', '8.8.4.4' ] - dns_zones: [ novalocal, openstacklocal ] - dns_all_hosts: "{{ g_all_hosts }}" - base_docker_image: 'centos:centos7' - -License -------- - -ASL 2.0 - -Author Information ------------------- - -OpenShift operations, Red Hat, Inc diff --git a/roles/dns/defaults/main.yml b/roles/dns/defaults/main.yml deleted file mode 100644 index 82055c8cd..000000000 --- a/roles/dns/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -base_docker_image: "{{ 'centos:centos7' if openshift.common.deployment_type == 'origin' else 'rhel7' }}" diff --git a/roles/dns/handlers/main.yml b/roles/dns/handlers/main.yml deleted file mode 100644 index 61fd7a10e..000000000 --- a/roles/dns/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: restart bind - systemd: - name: named - state: restarted diff --git a/roles/dns/meta/main.yml b/roles/dns/meta/main.yml deleted file mode 100644 index 64d56114e..000000000 --- a/roles/dns/meta/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -galaxy_info: - author: Lénaïc Huard - description: Deploy and configure a DNS server - company: Amadeus SAS - license: ASL 2.0 - min_ansible_version: 2.2 -dependencies: -- { role: openshift_facts } diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml deleted file mode 100644 index c5ab53b4d..000000000 --- a/roles/dns/tasks/main.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Install Bind - package: name=bind state=present - when: not openshift.common.is_containerized | bool - -- name: Create docker build dir - file: path=/tmp/dockerbuild state=directory - when: openshift.common.is_containerized | bool - -- name: Install dockerfile - template: - dest: "/tmp/dockerbuild/Dockerfile" - src: Dockerfile - when: openshift.common.is_containerized | bool - -- name: Build Bind image - docker_image: path="/tmp/dockerbuild" name="bind" state=present - when: openshift.common.is_containerized | bool - -- name: Install bind service file - template: - dest: "/etc/systemd/system/named.service" - src: named.service.j2 - when: openshift.common.is_containerized | bool - -- name: Create bind zone dir - file: path=/var/named state=directory - when: openshift.common.is_containerized | bool - -- name: Configure Bind - template: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - with_items: - - src: openshift-cluster.zone - dest: /var/named/openshift-cluster.zone - - src: named.conf - dest: /etc/named.conf - notify: restart bind - -- name: Enable Bind - systemd: - name: named - state: started - enabled: yes - daemon_reload: yes diff --git a/roles/dns/templates/Dockerfile b/roles/dns/templates/Dockerfile deleted file mode 100644 index cdff0a228..000000000 --- a/roles/dns/templates/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM {{ base_docker_image }} -MAINTAINER Jan Provaznik <jprovazn@redhat.com> - -# install main packages: -RUN yum -y update; yum clean all; -RUN yum -y install bind-utils bind - -EXPOSE 53 - -# start services: -CMD ["/usr/sbin/named", "-f"] diff --git a/roles/dns/templates/named.conf b/roles/dns/templates/named.conf deleted file mode 100644 index 22c1ff935..000000000 --- a/roles/dns/templates/named.conf +++ /dev/null @@ -1,23 +0,0 @@ -options -{ - directory "/var/named"; - - allow-query { {{ ansible_default_ipv4.network }}/24; }; - - recursion yes; - -{% if dns_forwarders is defined %} - forwarders { - {% for dns in dns_forwarders %} - {{ dns }}; - {% endfor %} - }; -{% endif %} -}; -{% for zone in dns_zones %} - -zone "{{ zone }}" IN { - type master; - file "openshift-cluster.zone"; -}; -{% endfor %} diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2 deleted file mode 100644 index 6e0a7a640..000000000 --- a/roles/dns/templates/named.service.j2 +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Requires={{ openshift.docker.service_name }}.service -After={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service - -[Service] -Type=simple -TimeoutStartSec=5m -ExecStartPre=/usr/bin/docker run --rm -v /etc/named.conf:/etc/named.conf -v /var/named:/var/named:z bind named-checkconf -z /etc/named.conf -ExecStartPre=-/usr/bin/docker rm -f bind -ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v /etc/named.conf:/etc/named.conf -v /var/named:/var/named:z bind -ExecStop=/usr/bin/docker stop bind - -[Install] -WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/dns/templates/openshift-cluster.zone b/roles/dns/templates/openshift-cluster.zone deleted file mode 100644 index 03f5dc089..000000000 --- a/roles/dns/templates/openshift-cluster.zone +++ /dev/null @@ -1,14 +0,0 @@ -$TTL 1d -@ IN SOA {{ ansible_hostname }} openshift ( - {{ ansible_date_time.epoch }} ; Serial (To be fixed before 2039) - 12h ; Refresh - 3m ; Retry - 4w ; Expire - 3h ; TTL for negative replies - ) - - IN NS {{ ansible_hostname }} -{{ ansible_hostname }} IN A {{ ansible_default_ipv4.address }} -{% for host in dns_all_hosts %} -{{ hostvars[host].ansible_hostname }} IN A {{ hostvars[host]['ansible_default_ipv4'].address }} -{% endfor %} diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index ed97d539c..f6f2bd77e 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1 +1,35 @@ --- +docker_cli_auth_config_path: '/root/.docker' +openshift_docker_signature_verification: False + +# oreg_url is defined by user input. +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_replace: False + +openshift_docker_additional_registries: [] +openshift_docker_blocked_registries: [] +openshift_docker_insecure_registries: [] + +openshift_docker_ent_reg: 'registry.access.redhat.com' + +# The l2_docker_* variables convert csv strings to lists, if +# necessary. These variables should be used in place of their respective +# openshift_docker_* counterparts to ensure the properly formatted lists are +# utilized. +l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}" +l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" +l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" + +openshift_docker_use_etc_containers: False +containers_registries_conf_path: /etc/containers/registries.conf + +r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +r_crio_os_firewall_deny: [] +r_crio_os_firewall_allow: +- service: crio + port: 10010/tcp + + +openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 591367467..866ed0452 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -4,6 +4,7 @@ systemd: name: "{{ openshift.docker.service_name }}" state: restarted + daemon_reload: yes register: r_docker_restart_docker_result until: not r_docker_restart_docker_result | failed retries: 3 diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index b773a417c..62b8a2eb5 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: lib_openshift +- role: lib_os_firewall diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/docker/tasks/crio_firewall.yml new file mode 100644 index 000000000..fbd1ff515 --- /dev/null +++ b/roles/docker/tasks/crio_firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool + block: + - name: Add iptables allow rules + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: add + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_allow }}" + + - name: Remove iptables rules + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: remove + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_deny }}" + +- when: r_crio_firewall_enabled | bool and r_crio_use_firewalld | bool + block: + - name: Add firewalld allow rules + firewalld: + port: "{{ item.port }}" + permanent: true + immediate: true + state: enabled + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_allow }}" + + - name: Remove firewalld allow rules + firewalld: + port: "{{ item.port }}" + permanent: true + immediate: true + state: disabled + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_deny }}" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 78c6671d8..1539af53f 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -10,19 +10,29 @@ l_use_crio: "{{ openshift_use_crio | default(False) }}" l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}" +- name: Add enterprise registry, if necessary + set_fact: + l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" + when: + - openshift.common.deployment_type == 'openshift-enterprise' + - openshift_docker_ent_reg != '' + - openshift_docker_ent_reg not in l2_docker_additional_registries + - not l_use_crio_only + - name: Use Package Docker if Requested include: package_docker.yml when: - - not l_use_system_container - - not l_use_crio_only + - not l_use_system_container + - not l_use_crio_only - name: Use System Container Docker if Requested include: systemcontainer_docker.yml when: - - l_use_system_container - - not l_use_crio_only + - l_use_system_container + - not l_use_crio_only - name: Add CRI-O usage Requested include: systemcontainer_crio.yml when: - - l_use_crio + - l_use_crio + - openshift_docker_is_node_or_master | bool diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index bc52ab60c..c1aedf879 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -3,6 +3,8 @@ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" when: not openshift.common.is_atomic | bool register: curr_docker_version + retries: 4 + until: curr_docker_version | succeeded changed_when: false - name: Error out if Docker pre-installed but too old @@ -46,7 +48,9 @@ template: dest: "{{ docker_systemd_dir }}/custom.conf" src: custom.conf.j2 - when: not os_firewall_use_firewalld | default(False) | bool + notify: + - restart docker + when: not (os_firewall_use_firewalld | default(False)) | bool - stat: path=/etc/sysconfig/docker register: docker_check @@ -56,20 +60,31 @@ dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" - when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg + when: + - item.reg_fact_val != [] + - docker_check.stat.isreg is defined + - docker_check.stat.isreg with_items: - reg_conf_var: ADD_REGISTRY - reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" + reg_fact_val: "{{ l2_docker_additional_registries }}" reg_flag: --add-registry - reg_conf_var: BLOCK_REGISTRY - reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" + reg_fact_val: "{{ l2_docker_blocked_registries }}" reg_flag: --block-registry - reg_conf_var: INSECURE_REGISTRY - reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" + reg_fact_val: "{{ l2_docker_insecure_registries }}" reg_flag: --insecure-registry notify: - restart docker +- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf + template: + dest: "{{ containers_registries_conf_path }}" + src: registries.conf + when: openshift_docker_use_etc_containers | bool + notify: + - restart docker + - name: Set Proxy Settings lineinfile: dest: /etc/sysconfig/docker @@ -93,11 +108,12 @@ dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='\ - {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\ - {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ - {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ - {% if docker_options is defined %} {{ docker_options }}{% endif %}\ - {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" + {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ + {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \ + {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if docker_options is defined %} {{ docker_options }}{% endif %} \ + {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \ + --signature-verification={{ openshift_docker_signature_verification | bool }}'" when: docker_check.stat.isreg is defined and docker_check.stat.isreg notify: - restart docker @@ -117,6 +133,13 @@ notify: - restart docker +# The following task is needed as the systemd module may report a change in +# state even though docker is already running. +- name: Detect if docker is already started + command: "systemctl show docker -p ActiveState" + changed_when: False + register: r_docker_already_running_result + - name: Start the Docker service systemd: name: docker @@ -129,6 +152,8 @@ delay: 30 - set_fact: - docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}" + docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}" + +- include: registry_auth.yml - meta: flush_handlers diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml new file mode 100644 index 000000000..d05b7f2b8 --- /dev/null +++ b/roles/docker/tasks/registry_auth.yml @@ -0,0 +1,16 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ docker_cli_auth_config_path }}/config.json" + when: oreg_auth_user is defined + register: docker_cli_auth_credentials_stat + +- name: Create credentials for docker cli registry auth + command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + register: openshift_docker_credentials_create_res + retries: 3 + delay: 5 + until: openshift_docker_credentials_create_res.rc == 0 + when: + - oreg_auth_user is defined + - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 787f51f94..67ede0d21 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -1,8 +1,34 @@ --- + # TODO: Much of this file is shared with container engine tasks - set_fact: - l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}" - when: openshift.docker.insecure_registries + l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" + when: l2_docker_insecure_registries | bool +- set_fact: + l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" + when: l2_docker_additional_registries | bool +- set_fact: + l_crio_registries: "{{ ['docker.io'] }}" + when: not (l2_docker_additional_registries | bool) +- set_fact: + l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" + when: l2_docker_additional_registries | bool + +- set_fact: + l_openshift_image_tag: "{{ openshift_image_tag | string }}" + when: openshift_image_tag is defined + +- set_fact: + l_openshift_image_tag: "latest" + when: + - openshift_image_tag is not defined + - openshift_release == "latest" + +- set_fact: + l_openshift_image_tag: "{{ openshift_release | string }}" + when: + - openshift_image_tag is not defined + - openshift_release != "latest" - name: Ensure container-selinux is installed package: @@ -10,6 +36,12 @@ state: present when: not openshift.common.is_atomic | bool +- name: Check we are not using node as a Docker container with CRI-O + fail: msg='Cannot use CRI-O with node configured as a Docker container' + when: + - openshift.common.is_containerized | bool + - not openshift.common.is_node_system_container | bool + # Used to pull and install the system container - name: Ensure atomic is installed package: @@ -30,7 +62,7 @@ shell: lsmod | grep overlay register: l_has_overlay_in_kernel ignore_errors: yes - + failed_when: false - when: l_has_overlay_in_kernel.rc != 0 block: @@ -50,60 +82,51 @@ enabled: yes state: restarted +- name: Ensure proxies are in the atomic.conf + include_role: + name: openshift_atomic + tasks_from: proxy - block: - - name: Add http_proxy to /etc/atomic.conf - lineinfile: - dest: /etc/atomic.conf - regexp: "^#?http_proxy[:=]{1}" - line: "http_proxy: {{ openshift.common.http_proxy | default('') }}" - when: - - openshift.common.http_proxy is defined - - openshift.common.http_proxy != '' - - - name: Add https_proxy to /etc/atomic.conf - lineinfile: - dest: /etc/atomic.conf - regexp: "^#?https_proxy[:=]{1}" - line: "https_proxy: {{ openshift.common.https_proxy | default('') }}" - when: - - openshift.common.https_proxy is defined - - openshift.common.https_proxy != '' - - - name: Add no_proxy to /etc/atomic.conf - lineinfile: - dest: /etc/atomic.conf - regexp: "^#?no_proxy[:=]{1}" - line: "no_proxy: {{ openshift.common.no_proxy | default('') }}" - when: - - openshift.common.no_proxy is defined - - openshift.common.no_proxy != '' - - -- block: - - - name: Set to default prepend + - name: Set CRI-O image defaults set_fact: l_crio_image_prepend: "docker.io/gscrivano" - l_crio_image_name: "crio-o-fedora" + l_crio_image_name: "cri-o-fedora" + l_crio_image_tag: "latest" - - name: Use Centos based image when distribution is Red Hat or CentOS + - name: Use Centos based image when distribution is CentOS set_fact: l_crio_image_name: "cri-o-centos" - when: ansible_distribution in ['RedHat', 'CentOS'] + when: ansible_distribution == "CentOS" - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested + - name: Set CRI-O image tag set_fact: - l_crio_image_prepend: "{{ openshift_crio_systemcontainer_image_registry_override }}" + l_crio_image_tag: "{{ l_openshift_image_tag }}" when: - - openshift_crio_systemcontainer_image_registry_override is defined - - openshift_crio_systemcontainer_image_registry_override != "" + - openshift_deployment_type == 'openshift-enterprise' + + - name: Use RHEL based image when distribution is Red Hat + set_fact: + l_crio_image_prepend: "registry.access.redhat.com/openshift3" + l_crio_image_name: "cri-o" + when: ansible_distribution == "RedHat" - name: Set the full image name set_fact: - l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest" + l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" + + # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 + - name: Use a specific image if requested + set_fact: + l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}" + when: + - openshift_crio_systemcontainer_image_override is defined + - openshift_crio_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_crio_image # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull CRI-O System Container image @@ -119,6 +142,14 @@ image: "{{ l_crio_image }}" state: latest +- name: Remove CRI-O default configuration files + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/cni/net.d/200-loopback.conf + - /etc/cni/net.d/100-crio-bridge.conf + - name: Create the CRI-O configuration template: dest: /etc/crio/crio.conf @@ -130,11 +161,19 @@ path: /etc/cni/net.d/ state: directory +- name: setup firewall for CRI-O + include: crio_firewall.yml + static: yes + - name: Configure the CNI network template: dest: /etc/cni/net.d/openshift-sdn.conf src: 80-openshift-sdn.conf.j2 +- name: Fix SELinux Permissions on /var/lib/containers + command: "restorecon -R /var/lib/containers/" + changed_when: false + - name: Start the CRI-O service systemd: name: "cri-o" diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 57a84bc2c..aa3b35ddd 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -1,4 +1,21 @@ --- + +- set_fact: + l_openshift_image_tag: "{{ openshift_image_tag | string }}" + when: openshift_image_tag is defined + +- set_fact: + l_openshift_image_tag: "latest" + when: + - openshift_image_tag is not defined + - openshift_release == "latest" + +- set_fact: + l_openshift_image_tag: "{{ openshift_release | string }}" + when: + - openshift_image_tag is not defined + - openshift_release != "latest" + # If docker_options are provided we should fail. We should not install docker and ignore # the users configuration. NOTE: docker_options == inventory:openshift_docker_options - name: Fail quickly if openshift_docker_options are set @@ -51,44 +68,23 @@ retries: 3 delay: 30 - -# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf -# regexp: the line starts with or without #, followed by the string -# http_proxy, then either : or = -- block: - - - name: Add http_proxy to /etc/atomic.conf - lineinfile: - dest: /etc/atomic.conf - regexp: "^#?http_proxy[:=]{1}" - line: "http_proxy: {{ openshift.common.http_proxy | default('') }}" - when: - - openshift.common.http_proxy is defined - - openshift.common.http_proxy != '' - - - name: Add https_proxy to /etc/atomic.conf - lineinfile: - dest: /etc/atomic.conf - regexp: "^#?https_proxy[:=]{1}" - line: "https_proxy: {{ openshift.common.https_proxy | default('') }}" - when: - - openshift.common.https_proxy is defined - - openshift.common.https_proxy != '' - - - name: Add no_proxy to /etc/atomic.conf - lineinfile: - dest: /etc/atomic.conf - regexp: "^#?no_proxy[:=]{1}" - line: "no_proxy: {{ openshift.common.no_proxy | default('') }}" - when: - - openshift.common.no_proxy is defined - - openshift.common.no_proxy != '' +- name: Ensure proxies are in the atomic.conf + include_role: + name: openshift_atomic + tasks_from: proxy - block: - name: Set to default prepend set_fact: l_docker_image_prepend: "gscrivano" + l_docker_image_tag: "latest" + + - name: Set container engine image tag + set_fact: + l_docker_image_tag: "{{ l_openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' - name: Use Red Hat Registry for image when distribution is Red Hat set_fact: @@ -100,17 +96,21 @@ l_docker_image_prepend: "registry.fedoraproject.org/f25" when: ansible_distribution == 'Fedora' - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested + - name: Set the full image name set_fact: - l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" - when: - - openshift_docker_systemcontainer_image_registry_override is defined - - openshift_docker_systemcontainer_image_registry_override != "" + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}" - - name: Set the full image name + # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 + - name: Use a specific image if requested set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" + when: + - openshift_docker_systemcontainer_image_override is defined + - openshift_docker_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_docker_image # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image @@ -144,10 +144,10 @@ # Set local versions of facts that must be in json format for container-daemon.json # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson - set_fact: - l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}" + l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}" l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" - l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" - l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" + l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}" + l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}" l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" # Configure container-engine using the container-daemon.json file @@ -173,4 +173,6 @@ - set_fact: docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" +- include: registry_auth.yml + - meta: flush_handlers diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2 index 5b31932b1..93014a80d 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/docker/templates/crio.conf.j2 @@ -13,12 +13,12 @@ runroot = "/var/run/containers/storage" # storage_driver select which storage driver is used to manage storage # of images and containers. -storage_driver = "overlay2" +storage_driver = "overlay" # storage_option is used to pass an option to the storage driver. storage_option = [ {% if ansible_distribution in ['RedHat', 'CentOS'] %} - "overlay2.override_kernel_check=1" + "overlay.override_kernel_check=1" {% endif %} ] @@ -35,6 +35,10 @@ stream_address = "" # stream_port is the port on which the stream server will listen stream_port = "10010" +# file_locking is whether file-based locking will be used instead of +# in-memory locking +file_locking = true + # The "crio.runtime" table contains settings pertaining to the OCI # runtime used and options for how to set up and manage the OCI runtime. [crio.runtime] @@ -67,6 +71,9 @@ runtime_untrusted_workload = "" # container runtime for all containers. default_workload_trust = "trusted" +# no_pivot instructs the runtime to not use pivot_root, but instead use MS_MOVE +no_pivot = false + # conmon is the path to conmon binary, used for managing the runtime. conmon = "/usr/libexec/crio/conmon" @@ -93,6 +100,16 @@ apparmor_profile = "crio-default" # for the runtime. cgroup_manager = "systemd" +# hooks_dir_path is the oci hooks directory for automatically executed hooks +hooks_dir_path = "/usr/share/containers/oci/hooks.d" + +# pids_limit is the number of processes allowed in a container +pids_limit = 1024 + +# log_size_max is the max limit for the container log size in bytes. +# Negative values indicate that no limit is imposed. +log_size_max = 52428800 + # The "crio.image" table contains settings pertaining to the # management of OCI images. [crio.image] @@ -115,11 +132,21 @@ pause_command = "/pause" # unspecified so that the default system-wide policy will be used. signature_policy = "" +# image_volumes controls how image volumes are handled. +# The valid values are mkdir and ignore. +image_volumes = "mkdir" + # insecure_registries is used to skip TLS verification when pulling images. insecure_registries = [ {{ l_insecure_crio_registries|default("") }} ] +# registries is used to specify a comma separated list of registries to be used +# when pulling an unqualified image (e.g. fedora:rawhide). +registries = [ +{{ l_additional_crio_registries|default("") }} +] + # The "crio.network" table contains settings pertaining to the # management of CNI plugins. [crio.network] diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2 index 9b47cb6ab..713412473 100644 --- a/roles/docker/templates/custom.conf.j2 +++ b/roles/docker/templates/custom.conf.j2 @@ -3,3 +3,9 @@ [Unit] Wants=iptables.service After=iptables.service + +# The following line is a work-around to ensure docker is restarted whenever +# iptables is restarted. This ensures the proper iptables rules will be in +# place for docker. +# Note: This will also cause docker to be stopped if iptables is stopped. +PartOf=iptables.service diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf new file mode 100644 index 000000000..d379b2be0 --- /dev/null +++ b/roles/docker/templates/registries.conf @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# This is a system-wide configuration file used to +# keep track of registries for various container backends. +# It adheres to YAML format and does not support recursive +# lists of registries. + +# The default location for this configuration file is /etc/containers/registries.conf. + +# The only valid categories are: 'registries', 'insecure_registries', +# and 'block_registries'. + + +#registries: +# - registry.access.redhat.com + +{% if l2_docker_additional_registries %} +registries: +{% for reg in l2_docker_additional_registries %} + - {{ reg }} +{% endfor %} +{% endif %} + +# If you need to access insecure registries, uncomment the section below +# and add the registries fully-qualified name. An insecure registry is one +# that does not have a valid SSL certificate or only does HTTP. +#insecure_registries: +# - + +{% if l2_docker_insecure_registries %} +insecure_registries: +{% for reg in l2_docker_insecure_registries %} + - {{ reg }} +{% endfor %} +{% endif %} + +# If you need to block pull access from a registry, uncomment the section below +# and add the registries fully-qualified name. +#block_registries: +# - + +{% if l2_docker_blocked_registries %} +block_registries: +{% for reg in l2_docker_blocked_registries %} + - {{ reg }} +{% endfor %} +{% endif %} diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index d12d7a358..4b734d4ed 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -1,6 +1,64 @@ --- -r_etcd_firewall_enabled: True -r_etcd_use_firewalld: False +r_etcd_common_backup_tag: '' +r_etcd_common_backup_sufix_name: '' + +# runc, docker, host +r_etcd_common_etcd_runtime: "docker" +r_etcd_common_embedded_etcd: false + +# etcd run on a host => use etcdctl command directly +# etcd run as a docker container => use docker exec +# etcd run as a runc container => use runc exec +r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" + +# etcd server vars +etcd_conf_dir: '/etc/etcd' +etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" +etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" +etcd_key_file: "{{ etcd_conf_dir }}/server.key" +etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" + +# etcd ca vars +etcd_ca_dir: "{{ etcd_conf_dir}}/ca" +etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" +etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" +etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" +etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" +etcd_ca_name: etcd_ca +etcd_req_ext: etcd_v3_req +etcd_ca_exts_peer: etcd_v3_ca_peer +etcd_ca_exts_server: etcd_v3_ca_server +etcd_ca_exts_self: etcd_v3_ca_self +etcd_ca_exts_client: etcd_v3_ca_client +etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" +etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" +etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" +etcd_ca_serial: "{{ etcd_ca_dir }}/serial" +etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" +etcd_ca_default_days: 1825 + +r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt +r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key +r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt + +# etcd server & certificate vars +etcd_hostname: "{{ inventory_hostname }}" +etcd_ip: "{{ ansible_default_ipv4.address }}" +etcd_is_atomic: False +etcd_is_containerized: False +etcd_is_thirdparty: False + +# etcd dir vars +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}" + +# etcd ports and protocols +etcd_client_port: 2379 +etcd_peer_port: 2380 +etcd_url_scheme: http +etcd_peer_url_scheme: http etcd_initial_cluster_state: new etcd_initial_cluster_token: etcd-cluster-1 @@ -10,8 +68,16 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_client_port: 2379 -etcd_peer_port: 2380 +# required role variable +#etcd_peer: 127.0.0.1 +etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" + +etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" +# Location of the service file is fixed and not meant to be changed +etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" + +r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" r_etcd_os_firewall_deny: [] @@ -20,3 +86,6 @@ r_etcd_os_firewall_allow: port: "{{etcd_client_port}}/tcp" - service: etcd peering port: "{{ etcd_peer_port }}/tcp" + +# set the backend quota to 4GB by default +etcd_quota_backend_bytes: 4294967296 diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py index 0cab1ca88..0cab1ca88 100755 --- a/roles/etcd_common/library/delegated_serial_command.py +++ b/roles/etcd/library/delegated_serial_command.py diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 9a955c822..879ca4f4e 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -18,5 +18,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_os_firewall -- role: etcd_server_certificates -- role: etcd_common +- role: lib_utils diff --git a/roles/etcd_migrate/tasks/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml index 95a0e7c0a..1ed2db5bc 100644 --- a/roles/etcd_migrate/tasks/clean_data.yml +++ b/roles/etcd/tasks/auxiliary/clean_data.yml @@ -1,5 +1,5 @@ --- - name: Remove member data file: - path: /var/lib/etcd/member + path: "{{ etcd_data_dir }}/member" state: absent diff --git a/roles/etcd/tasks/auxiliary/disable_etcd.yml b/roles/etcd/tasks/auxiliary/disable_etcd.yml new file mode 100644 index 000000000..7c6d0409d --- /dev/null +++ b/roles/etcd/tasks/auxiliary/disable_etcd.yml @@ -0,0 +1,5 @@ +--- +- name: Disable etcd members + service: + name: "{{ etcd_service }}" + state: stopped diff --git a/roles/etcd_common/tasks/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index 6cb456677..11bd2310e 100644 --- a/roles/etcd_common/tasks/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -3,7 +3,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not openshift.common.is_atomic | bool -- name: Configure etcd profile.d alises +- name: Configure etcd profile.d aliases template: dest: "/etc/profile.d/etcdctl.sh" src: etcdctl.sh.j2 diff --git a/roles/etcd/tasks/auxiliary/force_new_cluster.yml b/roles/etcd/tasks/auxiliary/force_new_cluster.yml new file mode 100644 index 000000000..ae8a36130 --- /dev/null +++ b/roles/etcd/tasks/auxiliary/force_new_cluster.yml @@ -0,0 +1,31 @@ +--- +- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host + lineinfile: + line: "ETCD_FORCE_NEW_CLUSTER=true" + dest: /etc/etcd/etcd.conf + backup: true + +- name: Start etcd + systemd: + name: "{{ etcd_service }}" + state: started + +- name: Wait for cluster to become healthy after bringing up first member + command: > + etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health + register: l_etcd_migrate_health + until: l_etcd_migrate_health.rc == 0 + retries: 3 + delay: 30 + +- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host + lineinfile: + line: "ETCD_FORCE_NEW_CLUSTER=true" + dest: /etc/etcd/etcd.conf + state: absent + backup: true + +- name: Restart first etcd host + systemd: + name: "{{ etcd_service }}" + state: restarted diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml new file mode 100644 index 000000000..6daa6dc51 --- /dev/null +++ b/roles/etcd/tasks/backup.archive.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/archive.yml diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml new file mode 100644 index 000000000..cc540cbca --- /dev/null +++ b/roles/etcd/tasks/backup.copy.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/copy.yml diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml new file mode 100644 index 000000000..26ec15043 --- /dev/null +++ b/roles/etcd/tasks/backup.fetch.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/fetch.yml diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml new file mode 100644 index 000000000..d2e866416 --- /dev/null +++ b/roles/etcd/tasks/backup.force_new_cluster.yml @@ -0,0 +1,12 @@ +--- +- include: backup/vars.yml + +- name: Move content of etcd backup under the etcd data directory + command: > + mv "{{ l_etcd_backup_dir }}/member" "{{ etcd_data_dir }}" + +- name: Set etcd group for the etcd data directory + command: > + chown -R etcd:etcd "{{ etcd_data_dir }}" + +- include: auxiliary/force_new_cluster.yml diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml new file mode 100644 index 000000000..77a637360 --- /dev/null +++ b/roles/etcd/tasks/backup.unarchive.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/unarchive.yml diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml new file mode 100644 index 000000000..c0538e596 --- /dev/null +++ b/roles/etcd/tasks/backup.yml @@ -0,0 +1,2 @@ +--- +- include: backup/backup.yml diff --git a/roles/etcd/tasks/backup/archive.yml b/roles/etcd/tasks/backup/archive.yml new file mode 100644 index 000000000..f6aa68a6e --- /dev/null +++ b/roles/etcd/tasks/backup/archive.yml @@ -0,0 +1,5 @@ +--- +- name: Archive backup + archive: + path: "{{ l_etcd_backup_dir }}" + dest: "{{ l_etcd_backup_dir }}.tgz" diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd/tasks/backup/backup.yml index 2bc486d3f..ca0d29155 100644 --- a/roles/etcd_common/tasks/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml @@ -1,25 +1,9 @@ --- -# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time -# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes -# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different -- set_fact: - l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" - -- set_fact: - l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}" - -- set_fact: - l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}" - -- set_fact: - l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}" - -- set_fact: - l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}" +- include: vars.yml # TODO: replace shell module with command and update later checks - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1 + shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1 register: l_avail_disk # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything @@ -27,9 +11,8 @@ # TODO: replace shell module with command and update later checks - name: Check current etcd disk usage - shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1 + shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1 register: l_etcd_disk_usage - when: r_etcd_common_embedded_etcd | bool # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything changed_when: false @@ -37,9 +20,9 @@ - name: Abort if insufficient disk space for etcd backup fail: msg: > - {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup, {{ l_avail_disk.stdout }} Kb available. - when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int) + when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int # For non containerized and non embedded we should have the correct version of # etcd installed already. So don't do anything. @@ -61,17 +44,17 @@ - r_etcd_common_embedded_etcd | bool - not l_ostree_booted.stat.exists | bool -- name: Check selinux label of '{{ l_etcd_data_dir }}' +- name: Check selinux label of '{{ etcd_data_dir }}' command: > - stat -c '%C' {{ l_etcd_data_dir }} + stat -c '%C' {{ etcd_data_dir }} register: l_etcd_selinux_labels - debug: msg: "{{ l_etcd_selinux_labels }}" -- name: Make sure the '{{ l_etcd_data_dir }}' has the proper label +- name: Make sure the '{{ etcd_data_dir }}' has the proper label command: > - chcon -t svirt_sandbox_file_t "{{ l_etcd_data_dir }}" + chcon -t svirt_sandbox_file_t "{{ etcd_data_dir }}" when: - l_etcd_selinux_labels.rc == 0 - "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout" @@ -85,12 +68,12 @@ # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6 - name: Check for v3 data store stat: - path: "{{ l_etcd_data_dir }}/member/snap/db" + path: "{{ etcd_data_dir }}/member/snap/db" register: l_v3_db - name: Copy etcd v3 data store command: > - cp -a {{ l_etcd_data_dir }}/member/snap/db + cp -a {{ etcd_data_dir }}/member/snap/db {{ l_etcd_backup_dir }}/member/snap/ when: l_v3_db.stat.exists diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml new file mode 100644 index 000000000..967e5ee66 --- /dev/null +++ b/roles/etcd/tasks/backup/copy.yml @@ -0,0 +1,5 @@ +--- +- name: Copy etcd backup + copy: + src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz" + dest: "{{ etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/fetch.yml b/roles/etcd/tasks/backup/fetch.yml new file mode 100644 index 000000000..610ce1960 --- /dev/null +++ b/roles/etcd/tasks/backup/fetch.yml @@ -0,0 +1,8 @@ +--- +- name: Fetch etcd backup + fetch: + src: "{{ l_etcd_backup_dir }}.tgz" + dest: "{{ etcd_backup_sync_directory }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml new file mode 100644 index 000000000..a85f533c2 --- /dev/null +++ b/roles/etcd/tasks/backup/unarchive.yml @@ -0,0 +1,14 @@ +--- +- shell: ls /var/lib/etcd + register: output + +- debug: + msg: "output: {{ output }}" + +- name: Unarchive backup + # can't use unarchive https://github.com/ansible/ansible/issues/30821 + # unarchive: + # src: "{{ l_etcd_backup_dir }}.tgz" + # dest: "{{ l_etcd_backup_dir }}" + command: > + tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml new file mode 100644 index 000000000..3ffa641b3 --- /dev/null +++ b/roles/etcd/tasks/backup/vars.yml @@ -0,0 +1,15 @@ +--- +# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time +# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes +# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different +- set_fact: + l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" + +- set_fact: + l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}" + +- set_fact: + l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}" + +- set_fact: + l_etcd_backup_dir: "{{ etcd_data_dir }}/{{ l_backup_dir_name }}" diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml new file mode 100644 index 000000000..a41b032f3 --- /dev/null +++ b/roles/etcd/tasks/backup_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_ca_certificates.yml diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml new file mode 100644 index 000000000..8cf2a10cc --- /dev/null +++ b/roles/etcd/tasks/backup_generated_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_generated_certificates.yml diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml new file mode 100644 index 000000000..129e1831c --- /dev/null +++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_master_etcd_certificates.yml diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml new file mode 100644 index 000000000..267ffeb4d --- /dev/null +++ b/roles/etcd/tasks/backup_server_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_server_certificates.yml diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml new file mode 100644 index 000000000..cca1e9ad7 --- /dev/null +++ b/roles/etcd/tasks/ca.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/deploy_ca.yml diff --git a/roles/etcd/tasks/certificates/backup_ca_certificates.yml b/roles/etcd/tasks/certificates/backup_ca_certificates.yml new file mode 100644 index 000000000..f60eb82ef --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_ca_certificates.yml @@ -0,0 +1,12 @@ +--- +- name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat +- name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool diff --git a/roles/etcd/tasks/certificates/backup_generated_certificates.yml b/roles/etcd/tasks/certificates/backup_generated_certificates.yml new file mode 100644 index 000000000..6a24cfcb3 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_generated_certificates.yml @@ -0,0 +1,13 @@ +--- +- name: Determine if generated etcd certificates exist + stat: + path: "{{ etcd_conf_dir }}/generated_certs" + register: etcd_generated_certs_dir_stat + +- name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/generated_certs + args: + warn: no + when: etcd_generated_certs_dir_stat.stat.exists | bool diff --git a/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml new file mode 100644 index 000000000..e65b3e5a2 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml @@ -0,0 +1,7 @@ +--- +- name: Backup master etcd certificates + shell: > + tar -czvf /etc/origin/master/master-etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz + /etc/origin/master/master.etcd-* + args: + warn: no diff --git a/roles/etcd/tasks/certificates/backup_server_certificates.yml b/roles/etcd/tasks/certificates/backup_server_certificates.yml new file mode 100644 index 000000000..8e6cc6965 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_server_certificates.yml @@ -0,0 +1,11 @@ +--- +- name: Backup etcd certificates + command: > + tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/ca.crt + {{ etcd_conf_dir }}/server.crt + {{ etcd_conf_dir }}/server.key + {{ etcd_conf_dir }}/peer.crt + {{ etcd_conf_dir }}/peer.key + args: + warn: no diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd/tasks/certificates/deploy_ca.yml index b4dea4a07..3d32290a2 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd/tasks/certificates/deploy_ca.yml @@ -1,6 +1,8 @@ --- - name: Install openssl - package: name=openssl state=present + package: + name: openssl + state: present when: not etcd_is_atomic | bool delegate_to: "{{ etcd_ca_host }}" run_once: true diff --git a/roles/etcd/tasks/certificates/distribute_ca.yml b/roles/etcd/tasks/certificates/distribute_ca.yml new file mode 100644 index 000000000..632ac15dd --- /dev/null +++ b/roles/etcd/tasks/certificates/distribute_ca.yml @@ -0,0 +1,47 @@ +--- +- name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + +- name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_sync_cert_dir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + +- name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + +- name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ etcd_sync_cert_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + +- name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + +- name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + +- copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index bbd29ece1..119071a72 100644 --- a/roles/etcd_client_certificates/tasks/main.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -9,7 +9,7 @@ - fail: msg: > CA certificate {{ etcd_ca_cert }} doesn't exist on CA host - {{ etcd_ca_host }}. Apply 'etcd_ca' role to + {{ etcd_ca_host }}. Apply 'etcd_ca' action from `etcd` role to {{ etcd_ca_host }}. when: not g_ca_cert_stat_result.stat.exists | bool run_once: true diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 4795188a6..00b8f4a0b 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -1,6 +1,8 @@ --- - name: Install etcd - package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present + package: + name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}" + state: present when: not etcd_is_containerized | bool - name: Check status of etcd certificates @@ -10,9 +12,6 @@ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt" - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt" - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt" - - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt" - - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt" - - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt" register: g_etcd_server_cert_stat_result when: not etcd_certificates_redeploy | default(false) | bool @@ -139,7 +138,6 @@ state: directory with_items: - "{{ etcd_cert_config_dir }}" - - "{{ etcd_system_container_cert_config_dir }}" when: etcd_server_certs_missing | bool - name: Unarchive cert tarball @@ -174,25 +172,8 @@ state: directory with_items: - "{{ etcd_ca_dir }}" - - "{{ etcd_system_container_cert_config_dir }}/ca" when: etcd_server_certs_missing | bool -- name: Unarchive cert tarball for the system container - unarchive: - src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ etcd_system_container_cert_config_dir }}" - when: - - etcd_server_certs_missing | bool - - r_etcd_common_etcd_runtime == 'runc' - -- name: Unarchive etcd ca cert tarballs for the system container - unarchive: - src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_system_container_cert_config_dir }}/ca" - when: - - etcd_server_certs_missing | bool - - r_etcd_common_etcd_runtime == 'runc' - - name: Delete temporary directory local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent become: no diff --git a/roles/etcd/tasks/certificates/remove_ca_certificates.yml b/roles/etcd/tasks/certificates/remove_ca_certificates.yml new file mode 100644 index 000000000..4a86eb60d --- /dev/null +++ b/roles/etcd/tasks/certificates/remove_ca_certificates.yml @@ -0,0 +1,5 @@ +--- +- name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent diff --git a/roles/etcd/tasks/certificates/remove_generated_certificates.yml b/roles/etcd/tasks/certificates/remove_generated_certificates.yml new file mode 100644 index 000000000..993b18de2 --- /dev/null +++ b/roles/etcd/tasks/certificates/remove_generated_certificates.yml @@ -0,0 +1,5 @@ +--- +- name: Remove generated etcd certificates + file: + path: "{{ etcd_conf_dir }}/generated_certs" + state: absent diff --git a/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml new file mode 100644 index 000000000..70b5c6523 --- /dev/null +++ b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml @@ -0,0 +1,8 @@ +--- +- name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ etcd_sync_cert_dir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml new file mode 100644 index 000000000..75c110972 --- /dev/null +++ b/roles/etcd/tasks/check_cluster_health.yml @@ -0,0 +1,2 @@ +--- +- include: migration/check_cluster_health.yml diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml new file mode 100644 index 000000000..d131ffd21 --- /dev/null +++ b/roles/etcd/tasks/clean_data.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/clean_data.yml diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml new file mode 100644 index 000000000..2f4108a0d --- /dev/null +++ b/roles/etcd/tasks/client_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/fetch_client_certificates_from_ca.yml diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml new file mode 100644 index 000000000..9202e6e48 --- /dev/null +++ b/roles/etcd/tasks/disable_etcd.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/disable_etcd.yml diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca new file mode 100644 index 000000000..040c5f7af --- /dev/null +++ b/roles/etcd/tasks/distribute_ca @@ -0,0 +1,2 @@ +--- +- include: certificates/distribute_ca.yml diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml new file mode 100644 index 000000000..4c1f609f7 --- /dev/null +++ b/roles/etcd/tasks/drop_etcdctl.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/drop_etcdctl.yml diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml new file mode 100644 index 000000000..513eed17a --- /dev/null +++ b/roles/etcd/tasks/fetch_backup.yml @@ -0,0 +1,8 @@ +--- +- include: backup/vars.yml + +- include: backup/archive.yml + +- include: backup/sync_backup.yml + +- include: backup/ diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 78e543ef1..3e69af314 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -14,10 +14,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool -- include_role: - name: etcd_common - vars: - r_etcd_common_action: drop_etcdctl +- include: drop_etcdctl.yml when: - openshift_etcd_etcdctl_profile | default(true) | bool diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml new file mode 100644 index 000000000..bc27e4ea1 --- /dev/null +++ b/roles/etcd/tasks/migrate.add_ttls.yml @@ -0,0 +1,2 @@ +--- +- include: migration/add_ttls.yml diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml new file mode 100644 index 000000000..3ada6e362 --- /dev/null +++ b/roles/etcd/tasks/migrate.configure_master.yml @@ -0,0 +1,2 @@ +--- +- include: migration/configure_master.yml diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml new file mode 100644 index 000000000..124d21561 --- /dev/null +++ b/roles/etcd/tasks/migrate.pre_check.yml @@ -0,0 +1,2 @@ +--- +- include: migration/check.yml diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml new file mode 100644 index 000000000..5d5385873 --- /dev/null +++ b/roles/etcd/tasks/migrate.yml @@ -0,0 +1,2 @@ +--- +- include: migration/migrate.yml diff --git a/roles/etcd_migrate/tasks/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index c10465af9..14625e49e 100644 --- a/roles/etcd_migrate/tasks/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -8,6 +8,7 @@ accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}" authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}" controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}" + - name: Re-introduce leases (as a replacement for key TTLs) command: > oadm migrate etcd-ttl \ diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd/tasks/migration/check.yml index 0804d9e1c..0804d9e1c 100644 --- a/roles/etcd_migrate/tasks/check.yml +++ b/roles/etcd/tasks/migration/check.yml diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd/tasks/migration/check_cluster_health.yml index 201d83f99..201d83f99 100644 --- a/roles/etcd_migrate/tasks/check_cluster_health.yml +++ b/roles/etcd/tasks/migration/check_cluster_health.yml diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd/tasks/migration/check_cluster_status.yml index b69fb5a52..b69fb5a52 100644 --- a/roles/etcd_migrate/tasks/check_cluster_status.yml +++ b/roles/etcd/tasks/migration/check_cluster_status.yml diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd/tasks/migration/configure_master.yml index a305d5bf3..a305d5bf3 100644 --- a/roles/etcd_migrate/tasks/configure.yml +++ b/roles/etcd/tasks/migration/configure_master.yml diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 173de77f4..54a9c74ff 100644 --- a/roles/etcd_migrate/tasks/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml @@ -21,15 +21,24 @@ lineinfile: line: "ETCD_FORCE_NEW_CLUSTER=true" dest: /etc/etcd/etcd.conf + backup: true - name: Start etcd systemd: name: "{{ l_etcd_service }}" state: started +- name: Wait for cluster to become healthy after bringing up first member + command: > + etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health + register: l_etcd_migrate_health + until: l_etcd_migrate_health.rc == 0 + retries: 3 + delay: 30 - name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host lineinfile: line: "ETCD_FORCE_NEW_CLUSTER=true" dest: /etc/etcd/etcd.conf state: absent + backup: true - name: Restart first etcd host systemd: name: "{{ l_etcd_service }}" diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml new file mode 100644 index 000000000..36df1a1cc --- /dev/null +++ b/roles/etcd/tasks/remove_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/remove_ca_certificates.yml diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml new file mode 100644 index 000000000..b10a4b32d --- /dev/null +++ b/roles/etcd/tasks/remove_generated_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/remove_generated_certificates.yml diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml new file mode 100644 index 000000000..bd6c4ec85 --- /dev/null +++ b/roles/etcd/tasks/retrieve_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/retrieve_ca_certificates.yml diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml new file mode 100644 index 000000000..ae26079f9 --- /dev/null +++ b/roles/etcd/tasks/server_certificates.yml @@ -0,0 +1,6 @@ +--- +- include: ca.yml + when: + - etcd_ca_setup | default(True) | bool + +- include: certificates/fetch_server_certificates_from_ca.yml diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index e735bf50a..f71d9b551 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -1,6 +1,8 @@ --- -- set_fact: - l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}" +- name: Ensure proxies are in the atomic.conf + include_role: + name: openshift_atomic + tasks_from: proxy - name: Pull etcd system container command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }} @@ -17,6 +19,7 @@ {{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}, {%- endif -%} {% endfor -%} + when: etcd_initial_cluster is undefined - name: Check etcd system container package command: > @@ -51,36 +54,13 @@ - name: Systemd reload configuration systemd: name=etcd_container daemon_reload=yes -- name: Check for previous etcd data store - stat: - path: "{{ l_etcd_src_data_dir }}/member/" - register: src_datastore - -- name: Check for etcd system container data store - stat: - path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member" - register: dest_datastore - -- name: Ensure that etcd system container data dirs exist - file: path="{{ item }}" state=directory - with_items: - - "{{ r_etcd_common_system_container_host_dir }}/etc" - - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd" - -- name: Copy etcd data store - command: > - cp -a {{ l_etcd_src_data_dir }}/member - {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member - when: - - src_datastore.stat.exists - - not dest_datastore.stat.exists - - name: Install or Update Etcd system container package oc_atomic_container: name: etcd image: "{{ openshift.etcd.etcd_image }}" state: latest values: + - ETCD_DATA_DIR=/var/lib/etcd - ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} - ETCD_NAME={{ etcd_hostname }} - ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }} @@ -89,11 +69,21 @@ - ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} - ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} - ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} - - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt - - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt - - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key - - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt - - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt - - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key - - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt - - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt + - ETCD_CA_FILE={{ etcd_ca_file }} + - ETCD_CERT_FILE={{ etcd_cert_file }} + - ETCD_KEY_FILE={{ etcd_key_file }} + - ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }} + - ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }} + - ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }} + - ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }} + - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }} + - 'ADDTL_MOUNTS=,{"type":"bind","source":"/etc/","destination":"/etc/","options":["rbind","rw","rslave"]},{"type":"bind","source":"/var/lib/etcd","destination":"/var/lib/etcd/","options":["rbind","rw","rslave"]}' + +- name: Ensure etcd datadir ownership for the system container + file: + path: "{{ etcd_data_dir }}" + state: directory + mode: 0700 + owner: root + group: root + recurse: True diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml index 136ec1142..24071f9ad 100644 --- a/roles/etcd_upgrade/tasks/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade/upgrade_image.yml @@ -20,6 +20,11 @@ regexp: "{{ current_image.stdout }}$" replace: "{{ new_etcd_image }}" +- lineinfile: + destfile: "{{ etcd_conf_file }}" + regexp: '^ETCD_QUOTA_BACKEND_BYTES=' + line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}" + - name: Restart etcd_container systemd: name: "{{ etcd_service }}" @@ -29,8 +34,15 @@ ## TODO: probably should just move this into the backup playbooks, also this ## will fail on atomic host. We need to revisit how to do etcd backups there as ## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) +- name: Detecting Atomic Host Operating System + stat: + path: /run/ostree-booted + register: l_ostree_booted + - name: Upgrade etcd for etcdctl when not atomic - package: name=etcd state=latest + package: + name: etcd + state: latest when: not l_ostree_booted.stat.exists | bool - name: Verify cluster is healthy diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml index 324b69605..505e28afb 100644 --- a/roles/etcd_upgrade/tasks/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml @@ -19,6 +19,11 @@ name: "{{ l_etcd_target_package }}" state: latest +- lineinfile: + destfile: "{{ etcd_conf_file }}" + regexp: '^ETCD_QUOTA_BACKEND_BYTES=' + line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}" + - name: Restart etcd service: name: "{{ etcd_service }}" diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml new file mode 100644 index 000000000..9e69027eb --- /dev/null +++ b/roles/etcd/tasks/upgrade_image.yml @@ -0,0 +1,2 @@ +--- +- include: upgrade/upgrade_image.yml diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml new file mode 100644 index 000000000..29603d2b6 --- /dev/null +++ b/roles/etcd/tasks/upgrade_rpm.yml @@ -0,0 +1,2 @@ +--- +- include: upgrade/upgrade_rpm.yml diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index ce362c743..3027a9447 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -11,7 +11,8 @@ ETCD_NAME={{ etcd_hostname }} ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} ETCD_DATA_DIR={{ etcd_data_dir }} -#ETCD_SNAPSHOT_COUNTER=10000 +#ETCD_WAL_DIR="" +#ETCD_SNAPSHOT_COUNT=10000 ETCD_HEARTBEAT_INTERVAL=500 ETCD_ELECTION_TIMEOUT=2500 ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} @@ -28,8 +29,8 @@ ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }} ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1 {% else %} -{% if initial_etcd_cluster is defined and initial_etcd_cluster %} -ETCD_INITIAL_CLUSTER={{ initial_etcd_cluster }} +{% if etcd_initial_cluster is defined and etcd_initial_cluster %} +ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }} {% else %} ETCD_INITIAL_CLUSTER={{ initial_cluster() }} {% endif %} @@ -41,24 +42,44 @@ ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} #ETCD_DISCOVERY_PROXY= {% endif %} ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} +#ETCD_STRICT_RECONFIG_CHECK="false" +#ETCD_AUTO_COMPACTION_RETENTION="0" +#ETCD_ENABLE_V2="true" +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} #[proxy] #ETCD_PROXY=off +#ETCD_PROXY_FAILURE_WAIT="5000" +#ETCD_PROXY_REFRESH_INTERVAL="30000" +#ETCD_PROXY_DIAL_TIMEOUT="1000" +#ETCD_PROXY_WRITE_TIMEOUT="5000" +#ETCD_PROXY_READ_TIMEOUT="0" #[security] {% if etcd_url_scheme == 'https' -%} -ETCD_CA_FILE={{ etcd_ca_file }} +ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }} +ETCD_CLIENT_CERT_AUTH="true" ETCD_CERT_FILE={{ etcd_cert_file }} ETCD_KEY_FILE={{ etcd_key_file }} {% endif -%} +#ETCD_AUTO_TLS="false" {% if etcd_peer_url_scheme == 'https' -%} -ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }} +ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }} +ETCD_PEER_CLIENT_CERT_AUTH="true" ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }} ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }} {% endif -%} +#ETCD_PEER_AUTO_TLS="false" #[logging] ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}" {% if etcd_log_package_levels is defined %} ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}" {% endif %} + +#[profiling] +#ETCD_ENABLE_PPROF="false" +#ETCD_METRICS="basic" +# +#[auth] +#ETCD_AUTH_TOKEN="simple" diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd/templates/etcdctl.sh.j2 index ac7d9c72f..ac7d9c72f 100644 --- a/roles/etcd_common/templates/etcdctl.sh.j2 +++ b/roles/etcd/templates/etcdctl.sh.j2 diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd/templates/openssl_append.j2 index f28316fc2..f28316fc2 100644 --- a/roles/etcd_ca/templates/openssl_append.j2 +++ b/roles/etcd/templates/openssl_append.j2 diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md deleted file mode 100644 index 60a880e30..000000000 --- a/roles/etcd_ca/README.md +++ /dev/null @@ -1,34 +0,0 @@ -etcd_ca -======================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_client_certificates/README.md b/roles/etcd_client_certificates/README.md deleted file mode 100644 index 269d5296d..000000000 --- a/roles/etcd_client_certificates/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Etcd Certificates -=========================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md deleted file mode 100644 index d1c3a6602..000000000 --- a/roles/etcd_common/README.md +++ /dev/null @@ -1,53 +0,0 @@ -etcd_common -======================== - -Common resources for dependent etcd roles. E.g. default variables for: -* config directories -* certificates -* ports -* other settings - -Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. - -```yaml -- delegated_serial_command: - command: /usr/bin/make_database.sh arg1 arg2 - creates: /path/to/database -``` - -Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example). - -Dependencies ------------- - -openshift-repos - -Example Playbook ----------------- - -**Drop etcdctl aliases** - -```yaml -- include_role: - name: etcd_common - tasks_from: etcdctl -``` - -**Get access to common variables** - -```yaml -# meta.yml of etcd -... -dependencies: -- { role: etcd_common } -``` - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml deleted file mode 100644 index 89993f7ea..000000000 --- a/roles/etcd_common/defaults/main.yml +++ /dev/null @@ -1,75 +0,0 @@ ---- -# Default action when calling this role -r_etcd_common_action: noop -r_etcd_common_backup_tag: '' -r_etcd_common_backup_sufix_name: '' - -# runc, docker, host -r_etcd_common_etcd_runtime: "docker" -r_etcd_common_embedded_etcd: false - -# etcd run on a host => use etcdctl command directly -# etcd run as a docker container => use docker exec -# etcd run as a runc container => use runc exec -r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" - -# etcd server vars -etcd_conf_dir: '/etc/etcd' -r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd -etcd_system_container_conf_dir: /var/lib/etcd/etc -etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" -etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" -etcd_key_file: "{{ etcd_conf_dir }}/server.key" -etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" -etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" - -# etcd ca vars -etcd_ca_dir: "{{ etcd_conf_dir}}/ca" -etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" -etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" -etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" -etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" -etcd_ca_name: etcd_ca -etcd_req_ext: etcd_v3_req -etcd_ca_exts_peer: etcd_v3_ca_peer -etcd_ca_exts_server: etcd_v3_ca_server -etcd_ca_exts_self: etcd_v3_ca_self -etcd_ca_exts_client: etcd_v3_ca_client -etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" -etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" -etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" -etcd_ca_serial: "{{ etcd_ca_dir }}/serial" -etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" -etcd_ca_default_days: 1825 - -r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt -r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key -r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt - -# etcd server & certificate vars -etcd_hostname: "{{ inventory_hostname }}" -etcd_ip: "{{ ansible_default_ipv4.address }}" -etcd_is_atomic: False -etcd_is_containerized: False -etcd_is_thirdparty: False - -# etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" - -# etcd ports and protocols -etcd_client_port: 2379 -etcd_peer_port: 2380 -etcd_url_scheme: http -etcd_peer_url_scheme: http - -etcd_initial_cluster_state: new -etcd_initial_cluster_token: etcd-cluster-1 - -etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" -etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" -etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" - -etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml deleted file mode 100644 index dfb1c7a2c..000000000 --- a/roles/etcd_common/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: [] diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml deleted file mode 100644 index f5bcd03ee..000000000 --- a/roles/etcd_common/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Fail if invalid r_etcd_common_action provided - fail: - msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'" - when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl'] - -- name: Include main action task file - include: "{{ r_etcd_common_action }}.yml" - when: '"noop" not in r_etcd_common_action' diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml deleted file mode 100644 index 00d697776..000000000 --- a/roles/etcd_common/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" -# Location of the service file is fixed and not meant to be changed -etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md deleted file mode 100644 index 369e78ff2..000000000 --- a/roles/etcd_migrate/README.md +++ /dev/null @@ -1,53 +0,0 @@ -Role Name -========= - -Offline etcd migration of data from v2 to v3 - -Requirements ------------- - -It is expected all consumers of the etcd data are not accessing the data. -Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster. - -The role itself is responsible for: -- checking etcd cluster health and raft status before the migration -- checking of presence of any v3 data (in that case the migration is stopped) -- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string) -- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value) - -The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started. - -Role Variables --------------- - -TBD - -Dependencies ------------- - -- etcd_common -- lib_utils - -Example Playbook ----------------- - -```yaml -- name: Migrate etcd data from v2 to v3 - hosts: oo_etcd_to_config - gather_facts: no - tasks: - - include_role: - name: openshift_etcd_migrate - vars: - etcd_peer: "{{ ansible_default_ipv4.address }}" -``` - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jan Chaloupka (jchaloup@redhat.com) diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml deleted file mode 100644 index 05cf41fbb..000000000 --- a/roles/etcd_migrate/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Default action when calling this role, choices: check, migrate, configure -r_etcd_migrate_action: migrate diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml deleted file mode 100644 index e82f6a6b4..000000000 --- a/roles/etcd_migrate/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Fail if invalid r_etcd_migrate_action provided - fail: - msg: "etcd_migrate role can only be called with 'check', 'migrate', 'configure', 'add_ttls', or 'clean_data'" - when: r_etcd_migrate_action not in ['check', 'migrate', 'configure', 'add_ttls', 'clean_data'] - -- name: Include main action task file - include: "{{ r_etcd_migrate_action }}.yml" - -# 2. migrate v2 datadir into v3: -# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl -# backup the etcd datadir first -# Provide a way for an operator to specify transformer - -# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml -# set storage-backend to “etcd3” -# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master)) - -# Run -# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health -# to check the cluster health (from the etcdctl.sh aliases file) - -# Another assumption: -# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting) -# - diff --git a/roles/etcd_server_certificates/README.md b/roles/etcd_server_certificates/README.md deleted file mode 100644 index 269d5296d..000000000 --- a/roles/etcd_server_certificates/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Etcd Certificates -=========================== - -TODO - -Requirements ------------- - -TODO - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Scott Dodson (sdodson@redhat.com) diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml deleted file mode 100644 index 4b6013a49..000000000 --- a/roles/etcd_server_certificates/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: Etcd Server Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_ca - when: (etcd_ca_setup | default(True) | bool) diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml deleted file mode 100644 index 61bbba225..000000000 --- a/roles/etcd_upgrade/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -r_etcd_upgrade_action: upgrade -r_etcd_upgrade_mechanism: rpm diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml deleted file mode 100644 index afdb0267f..000000000 --- a/roles/etcd_upgrade/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jan Chaloupka - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_common - r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}" diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml deleted file mode 100644 index 129c69d6b..000000000 --- a/roles/etcd_upgrade/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# INPUT r_etcd_upgrade_action -- name: Fail if invalid etcd_upgrade_action provided - fail: - msg: "etcd_upgrade role can only be called with 'upgrade'" - when: - - r_etcd_upgrade_action not in ['upgrade'] - -- name: Detecting Atomic Host Operating System - stat: - path: /run/ostree-booted - register: l_ostree_booted - -- include: "{{ r_etcd_upgrade_action }}.yml" diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml deleted file mode 100644 index 420c9638e..000000000 --- a/roles/etcd_upgrade/tasks/upgrade.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# INPUT r_etcd_upgrade_version -# INPUT r_etcd_upgrade_mechanism -- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade - fail: - msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'" - when: - - r_etcd_upgrade_mechanism not in ['rpm', 'image'] - -- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd" - include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml deleted file mode 100644 index 5ed919d42..000000000 --- a/roles/etcd_upgrade/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# EXPECTS etcd_peer -etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/flannel/README.md b/roles/flannel/README.md index 0c7347603..b9e15e6e0 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -27,8 +27,6 @@ Role Variables Dependencies ------------ -openshift_facts - Example Playbook ---------------- diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 02f5a5f64..889069485 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -12,3 +12,12 @@ until: not l_docker_restart_docker_in_flannel_result | failed retries: 3 delay: 30 + +- name: restart node + systemd: + name: "{{ openshift.common.service_type }}-node" + state: restarted + register: l_restart_node_result + until: not l_restart_node_result | failed + retries: 3 + delay: 30 diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 35f825586..51128dba6 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -12,7 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_facts -- role: openshift_etcd_client_certificates - etcd_cert_prefix: flannel.etcd- +dependencies: [] diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml index ddf8230ec..1d0f5df6a 100644 --- a/roles/flannel_register/defaults/main.yaml +++ b/roles/flannel_register/defaults/main.yaml @@ -1,7 +1,6 @@ --- -flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}" -flannel_min_network: 172.30.5.0 -flannel_subnet_len: 24 +flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}" +flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" etcd_conf_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json index 89ce4c30b..bba3729fa 100644 --- a/roles/flannel_register/templates/flannel-config.json +++ b/roles/flannel_register/templates/flannel-config.json @@ -1,7 +1,6 @@ { "Network": "{{ flannel_network }}", "SubnetLen": {{ flannel_subnet_len }}, - "SubnetMin": "{{ flannel_min_network }}", "Backend": { "Type": "host-gw" } diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md new file mode 100644 index 000000000..83e00e504 --- /dev/null +++ b/roles/installer_checkpoint/README.md @@ -0,0 +1,176 @@ +OpenShift-Ansible Installer Checkpoint +====================================== + +A complete OpenShift cluster installation is comprised of many different +components which can take 30 minutes to several hours to complete. If the +installation should fail, it could be confusing to understand at which component +the failure occurred. Additionally, it may be desired to re-run only the +component which failed instead of starting over from the beginning. Components +which came after the failed component would also need to be run individually. + +Design +------ + +The Installer Checkpoint implements an Ansible callback plugin to allow +displaying and logging of the installer status at the end of a playbook run. + +To ensure the callback plugin is loaded, regardless of ansible.cfg file +configuration, the plugin has been placed inside the installer_checkpoint role +which must be called early in playbook execution. The `std_include.yml` playbook +is run first for all entry point playbooks, therefore, the initialization of the +checkpoint plugin has been placed at the beginning of that file. + +Playbooks use the [set_stats][set_stats] Ansible module to set a custom stats +variable indicating the status of the phase being executed. + +The installer_checkpoint.py callback plugin extends the Ansible +`v2_playbook_on_stats` method, which is called at the end of a playbook run, to +display the status of each phase which was run. The INSTALLER STATUS report is +displayed immediately following the PLAY RECAP. + +Phases of cluster installation are mapped to the steps in the +[common/openshift-cluster/config.yml][openshift_cluster_config] playbook. + +To correctly display the order of the installer phases, the `installer_phases` +variable defines the phase or component order. + +```python + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_initialize', + 'installer_phase_etcd', + 'installer_phase_nfs', + 'installer_phase_loadbalancer', + 'installer_phase_master', + 'installer_phase_master_additional', + 'installer_phase_node', + 'installer_phase_glusterfs', + 'installer_phase_hosted', + 'installer_phase_metrics', + 'installer_phase_logging', + 'installer_phase_servicecatalog', + ] +``` + +Additional attributes, such as display title and component playbook, of each +phase are stored in the `phase_attributes` variable. + +```python + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_initialize': { + 'title': 'Initialization', + 'playbook': '' + }, + 'installer_phase_etcd': { + 'title': 'etcd Install', + 'playbook': 'playbooks/byo/openshift-etcd/config.yml' + }, + 'installer_phase_nfs': { + 'title': 'NFS Install', + 'playbook': 'playbooks/byo/openshift-nfs/config.yml' + }, + #... + } +``` + +Usage +----- + +In order to indicate the beginning of a component installation, a play must be +added to the beginning of the main playbook for the component to set the phase +status to "In Progress". Additionally, a play must be added after the last play +for that component to set the phase status to "Complete". + +The following example shows the first play of the 'installer phase' loading the +`installer_checkpoint` role, as well as the `set_stats` task for setting +`installer_phase_initialize` to "In Progress". Various plays are run for the +phase/component and then a final play for setting `installer_hase_initialize` to +"Complete". + +```yaml +# common/openshift-cluster/std_include.yml +--- +- name: Initialization Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + roles: + - installer_checkpoint + tasks: + - name: Set install initialization 'In Progress' + set_stats: + data: + installer_phase_initialize: "In Progress" + aggregate: false + +#... +# Various plays here +#... + +- name: Initialization Checkpoint End + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Set install initialization 'Complete' + set_stats: + data: + installer_phase_initialize: "Complete" + aggregate: false +``` + +Each phase or component of the installer will follow a similar pattern, with the +exception that the `installer_checkpoint` role does not need to be called since +it was already loaded by the play in `std_include.yml`. It is important to +place the 'In Progress' and 'Complete' plays as the first and last plays of the +phase or component. + +Examples +-------- + +Example display of a successful playbook run: + +``` +PLAY RECAP ********************************************************************* +master01.example.com : ok=158 changed=16 unreachable=0 failed=0 +node01.example.com : ok=469 changed=74 unreachable=0 failed=0 +node02.example.com : ok=157 changed=17 unreachable=0 failed=0 +localhost : ok=24 changed=0 unreachable=0 failed=0 + + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : Complete +Master Additional Install : Complete +Node Install : Complete +GlusterFS Install : Not Started +Hosted Install : Complete +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started +``` + +Example display if a failure occurs during execution: + +``` +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : In Progress + This phase can be restarted by running: playbooks/byo/openshift-master/config.yml +Master Additional Install : Not Started +Node Install : Not Started +GlusterFS Install : Not Started +Hosted Install : Not Started +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started +``` + +[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html +[openshift_cluster_config]: https://github.com/openshift/openshift-ansible/blob/master/playbooks/common/openshift-cluster/config.yml diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py new file mode 100644 index 000000000..b17358882 --- /dev/null +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -0,0 +1,194 @@ +"""Ansible callback plugin to print a summary completion status of installation +phases. +""" +from ansible.plugins.callback import CallbackBase +from ansible import constants as C + +DOCUMENTATION = ''' + +''' + +EXAMPLES = ''' +--------------------------------------------- +Example display of a successful playbook run: + +PLAY RECAP ********************************************************************* +master01.example.com : ok=158 changed=16 unreachable=0 failed=0 +node01.example.com : ok=469 changed=74 unreachable=0 failed=0 +node02.example.com : ok=157 changed=17 unreachable=0 failed=0 +localhost : ok=24 changed=0 unreachable=0 failed=0 + + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : Complete +Master Additional Install : Complete +Node Install : Complete +GlusterFS Install : Not Started +Hosted Install : Complete +Metrics Install : Not Started +Logging Install : Not Started +Prometheus Install : Not Started +Service Catalog Install : Not Started + +----------------------------------------------------- +Example display if a failure occurs during execution: + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : In Progress + This phase can be restarted by running: playbooks/byo/openshift-master/config.yml +Master Additional Install : Not Started +Node Install : Not Started +GlusterFS Install : Not Started +Hosted Install : Not Started +Metrics Install : Not Started +Logging Install : Not Started +Prometheus Install : Not Started +Service Catalog Install : Not Started + +''' + + +class CallbackModule(CallbackBase): + """This callback summarizes installation phase status.""" + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'installer_checkpoint' + CALLBACK_NEEDS_WHITELIST = False + + def __init__(self): + super(CallbackModule, self).__init__() + + def v2_playbook_on_stats(self, stats): + + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_initialize', + 'installer_phase_health', + 'installer_phase_etcd', + 'installer_phase_nfs', + 'installer_phase_loadbalancer', + 'installer_phase_master', + 'installer_phase_master_additional', + 'installer_phase_node', + 'installer_phase_glusterfs', + 'installer_phase_hosted', + 'installer_phase_metrics', + 'installer_phase_logging', + 'installer_phase_prometheus', + 'installer_phase_servicecatalog', + 'installer_phase_management', + ] + + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_initialize': { + 'title': 'Initialization', + 'playbook': '' + }, + 'installer_phase_health': { + 'title': 'Health Check', + 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml' + }, + 'installer_phase_etcd': { + 'title': 'etcd Install', + 'playbook': 'playbooks/byo/openshift-etcd/config.yml' + }, + 'installer_phase_nfs': { + 'title': 'NFS Install', + 'playbook': 'playbooks/byo/openshift-nfs/config.yml' + }, + 'installer_phase_loadbalancer': { + 'title': 'Load balancer Install', + 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml' + }, + 'installer_phase_master': { + 'title': 'Master Install', + 'playbook': 'playbooks/byo/openshift-master/config.yml' + }, + 'installer_phase_master_additional': { + 'title': 'Master Additional Install', + 'playbook': 'playbooks/byo/openshift-master/additional_config.yml' + }, + 'installer_phase_node': { + 'title': 'Node Install', + 'playbook': 'playbooks/byo/openshift-node/config.yml' + }, + 'installer_phase_glusterfs': { + 'title': 'GlusterFS Install', + 'playbook': 'playbooks/byo/openshift-glusterfs/config.yml' + }, + 'installer_phase_hosted': { + 'title': 'Hosted Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml' + }, + 'installer_phase_metrics': { + 'title': 'Metrics Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml' + }, + 'installer_phase_logging': { + 'title': 'Logging Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml' + }, + 'installer_phase_prometheus': { + 'title': 'Prometheus Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-prometheus.yml' + }, + 'installer_phase_servicecatalog': { + 'title': 'Service Catalog Install', + 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml' + }, + 'installer_phase_management': { + 'title': 'Management Install', + 'playbook': 'playbooks/byo/openshift-management/config.yml' + }, + } + + # Find the longest phase title + max_column = 0 + for phase in phase_attributes: + max_column = max(max_column, len(phase_attributes[phase]['title'])) + + if '_run' in stats.custom: + self._display.banner('INSTALLER STATUS') + for phase in installer_phases: + phase_title = phase_attributes[phase]['title'] + padding = max_column - len(phase_title) + 2 + if phase in stats.custom['_run']: + phase_status = stats.custom['_run'][phase] + self._display.display( + '{}{}: {}'.format(phase_title, ' ' * padding, phase_status), + color=self.phase_color(phase_status)) + if phase_status == 'In Progress' and phase != 'installer_phase_initialize': + self._display.display( + '\tThis phase can be restarted by running: {}'.format( + phase_attributes[phase]['playbook'])) + + self._display.display("", screen_only=True) + + def phase_color(self, status): + """ Return color code for installer phase""" + valid_status = [ + 'In Progress', + 'Complete', + ] + + if status not in valid_status: + self._display.warning('Invalid phase status defined: {}'.format(status)) + + if status == 'Complete': + phase_color = C.COLOR_OK + elif status == 'In Progress': + phase_color = C.COLOR_ERROR + else: + phase_color = C.COLOR_WARN + + return phase_color diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md new file mode 100644 index 000000000..7b618f902 --- /dev/null +++ b/roles/kuryr/README.md @@ -0,0 +1,38 @@ +## OpenStack Kuryr + +Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker +nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for +pods. This allows to have interconnectivity between pods and OpenStack VMs. + +## Requirements + +* Ansible 2.2+ +* Centos/ RHEL 7.3+ + +## Current Kuryr restrictions when used with OpenShift + +* Openshift Origin only +* OpenShift on OpenStack Newton or newer (only with Trunk ports) + +## Key Ansible inventory Kuryr master configuration parameters + +* ``openshift_use_kuryr=True`` +* ``openshift_use_openshift_sdn=False`` +* ``openshift_sdn_network_plugin_name='cni'`` +* ``kuryr_cni_link_interface=eth0`` +* ``kuryr_openstack_auth_url=keystone_url`` +* ``kuryr_openstack_user_domain_name=Default`` +* ``kuryr_openstack_user_project_name=Default`` +* ``kuryr_openstack_project_id=project_uuid`` +* ``kuryr_openstack_username=kuryr`` +* ``kuryr_openstack_password=kuryr_pass`` +* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid`` +* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid`` +* ``kuryr_openstack_pod_service_id=service_subnet_uuid`` +* ``kuryr_openstack_pod_project_id=pod_project_uuid`` +* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid`` + +## Kuryr resources + +* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/) +* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html) diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml new file mode 100644 index 000000000..af05d80df --- /dev/null +++ b/roles/kuryr/defaults/main.yaml @@ -0,0 +1,72 @@ +--- +# Kuryr conf directory +kuryr_config_dir: /etc/kuryr + +# Kuryr username +kuryr_openstack_username: kuryr + +# Kuryr domain name or id containing user +kuryr_openstack_user_domain_name: default + +# Kuryr domain name or id containing project +kuryr_openstack_project_domain_name: default + +# Kuryr OpenShift namespace +kuryr_namespace: kube-system + +# Whether to run the cni plugin in debug mode +kuryr_cni_debug: "false" + +# The version of cni binaries +cni_version: v0.5.2 + +# Path to bin dir (where kuryr execs get installed) +bin_dir: /usr/bin + +# Path to the cni binaries +cni_bin_dir: /opt/cni/bin + +# URL for cni binaries +cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz" +cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9" + +# Kuryr ClusterRole definition +kuryr_clusterrole: + name: kuryrctl + state: present + rules: + - apiGroups: + - "" + attributeRestrictions: null + verbs: + - get + - list + - watch + resources: + - daemonsets + - deployments + - deploymentconfigs + - endpoints + - ingress + - nodes + - namespaces + - pods + - projects + - routes + - services + - apiGroups: + - "" + attributeRestrictions: null + verbs: + - update + - patch + resources: + - endpoints + - ingress + - pods + - namespaces + - nodes + - services + - services/status + - routes diff --git a/roles/etcd_migrate/meta/main.yml b/roles/kuryr/meta/main.yml index f3cabbef6..7fd5adf41 100644 --- a/roles/etcd_migrate/meta/main.yml +++ b/roles/kuryr/meta/main.yml @@ -1,10 +1,10 @@ --- galaxy_info: - author: Jan Chaloupka - description: Etcd migration - company: Red Hat, Inc. + author: Red Hat + description: Kuryr networking + company: Red Hat license: Apache License, Version 2.0 - min_ansible_version: 2.1 + min_ansible_version: 2.2 platforms: - name: EL versions: @@ -13,5 +13,5 @@ galaxy_info: - cloud - system dependencies: -- { role: etcd_common } -- { role: lib_utils } +- { role: lib_openshift } +- { role: openshift_facts } diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml new file mode 100644 index 000000000..55ab16f74 --- /dev/null +++ b/roles/kuryr/tasks/master.yaml @@ -0,0 +1,52 @@ +--- +- name: Perform OpenShit ServiceAccount config + include: serviceaccount.yaml + +- name: Create kuryr manifests tempdir + command: mktemp -d + register: manifests_tmpdir + +- name: Create kuryr ConfigMap manifest + become: yes + template: + src: configmap.yaml.j2 + dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml" + +- name: Create kuryr-controller Deployment manifest + become: yes + template: + src: controller-deployment.yaml.j2 + dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml" + +- name: Create kuryr-cni DaemonSet manifest + become: yes + template: + src: cni-daemonset.yaml.j2 + dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml" + +- name: Apply ConfigMap manifest + oc_obj: + state: present + kind: ConfigMap + name: "kuryr-config" + namespace: "{{ kuryr_namespace }}" + files: + - "{{ manifests_tmpdir.stdout }}/configmap.yaml" + +- name: Apply Controller Deployment manifest + oc_obj: + state: present + kind: Deployment + name: "kuryr-controller" + namespace: "{{ kuryr_namespace }}" + files: + - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml" + +- name: Apply kuryr-cni DaemonSet manifest + oc_obj: + state: present + kind: DaemonSet + name: "kuryr-cni-ds" + namespace: "{{ kuryr_namespace }}" + files: + - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml" diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml new file mode 100644 index 000000000..ffe814713 --- /dev/null +++ b/roles/kuryr/tasks/node.yaml @@ -0,0 +1,48 @@ +--- +- name: Create CNI bin directory + file: + state: directory + path: "{{ cni_bin_dir }}" + mode: 0755 + owner: root + group: root + recurse: yes + +- name: Create CNI extraction tempdir + command: mktemp -d + register: cni_tmpdir + +- name: Download CNI + get_url: + url: "{{ cni_bin_url }}" + checksum: "sha1:{{ cni_bin_checksum }}" + mode: 0644 + dest: "{{ cni_tmpdir.stdout }}" + register: downloaded_tarball + +- name: Extract CNI + become: yes + unarchive: + remote_src: True + src: "{{ downloaded_tarball.dest }}" + dest: "{{ cni_bin_dir }}" + when: downloaded_tarball.changed + +- name: Ensure CNI net.d exists + file: + path: /etc/cni/net.d + recurse: yes + state: directory + +- name: Configure OpenShift node with disabled service proxy + lineinfile: + dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node" + regexp: '^OPTIONS="?(.*?)"?$' + backrefs: yes + backup: yes + line: 'OPTIONS="\1 --disable dns,proxy,plugins"' + +- name: force node restart to disable the proxy + service: + name: "{{ openshift.common.service_type }}-node" + state: restarted diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml new file mode 100644 index 000000000..088f13091 --- /dev/null +++ b/roles/kuryr/tasks/serviceaccount.yaml @@ -0,0 +1,31 @@ +--- +- name: Create Controller service account + oc_serviceaccount: + name: kuryr-controller + namespace: "{{ kuryr_namespace }}" + register: saout + +- name: Create a role for the Kuryr + oc_clusterrole: "{{ kuryr_clusterrole }}" + +- name: Fetch the created Kuryr controller cluster role + oc_clusterrole: + name: kuryrctl + state: list + register: crout + +- name: Grant Kuryr the privileged security context constraints + oc_adm_policy_user: + user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}" + namespace: "{{ kuryr_namespace }}" + resource_kind: scc + resource_name: privileged + state: present + +- name: Assign role to Kuryr service account + oc_adm_policy_user: + user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}" + namespace: "{{ kuryr_namespace }}" + resource_kind: cluster-role + resource_name: "{{ crout.results.results.metadata.name }}" + state: present diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2 new file mode 100644 index 000000000..39348ae90 --- /dev/null +++ b/roles/kuryr/templates/cni-daemonset.yaml.j2 @@ -0,0 +1,53 @@ +# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes + +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kuryr-cni-ds + namespace: {{ kuryr_namespace }} + labels: + tier: node + app: kuryr +spec: + template: + metadata: + labels: + tier: node + app: kuryr + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: kuryr-controller + containers: + - name: kuryr-cni + image: kuryr/cni:latest + imagePullPolicy: IfNotPresent + command: [ "cni_ds_init" ] + securityContext: + privileged: true + volumeMounts: + - name: bin + mountPath: /opt/cni/bin + - name: net-conf + mountPath: /etc/cni/net.d + - name: config-volume + mountPath: /tmp/kuryr/kuryr.conf + subPath: kuryr-cni.conf + - name: etc + mountPath: /etc + volumes: + - name: bin + hostPath: + path: {{ cni_bin_dir }} + - name: net-conf + hostPath: + path: /etc/cni/net.d + - name: config-volume + configMap: + name: kuryr-config + - name: etc + hostPath: + path: /etc
\ No newline at end of file diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2 new file mode 100644 index 000000000..e874d6c25 --- /dev/null +++ b/roles/kuryr/templates/configmap.yaml.j2 @@ -0,0 +1,343 @@ +# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes + +apiVersion: v1 +kind: ConfigMap +metadata: + name: kuryr-config + namespace: {{ kuryr_namespace }} +data: + kuryr.conf: |+ + [DEFAULT] + + # + # From kuryr_kubernetes + # + + # Directory for Kuryr vif binding executables. (string value) + #bindir = /usr/libexec/kuryr + + # If set to true, the logging level will be set to DEBUG instead of the default + # INFO level. (boolean value) + # Note: This option can be changed without restarting. + #debug = false + + # DEPRECATED: If set to false, the logging level will be set to WARNING instead + # of the default INFO level. (boolean value) + # This option is deprecated for removal. + # Its value may be silently ignored in the future. + #verbose = true + + # The name of a logging configuration file. This file is appended to any + # existing logging configuration files. For details about logging configuration + # files, see the Python logging module documentation. Note that when logging + # configuration files are used then all logging configuration is set in the + # configuration file and other logging configuration options are ignored (for + # example, logging_context_format_string). (string value) + # Note: This option can be changed without restarting. + # Deprecated group/name - [DEFAULT]/log_config + #log_config_append = <None> + + # Defines the format string for %%(asctime)s in log records. Default: + # %(default)s . This option is ignored if log_config_append is set. (string + # value) + #log_date_format = %Y-%m-%d %H:%M:%S + + # (Optional) Name of log file to send logging output to. If no default is set, + # logging will go to stderr as defined by use_stderr. This option is ignored if + # log_config_append is set. (string value) + # Deprecated group/name - [DEFAULT]/logfile + #log_file = /var/log/kuryr/kuryr-controller.log + + # (Optional) The base directory used for relative log_file paths. This option + # is ignored if log_config_append is set. (string value) + # Deprecated group/name - [DEFAULT]/logdir + #log_dir = <None> + + # Uses logging handler designed to watch file system. When log file is moved or + # removed this handler will open a new log file with specified path + # instantaneously. It makes sense only if log_file option is specified and + # Linux platform is used. This option is ignored if log_config_append is set. + # (boolean value) + #watch_log_file = false + + # Use syslog for logging. Existing syslog format is DEPRECATED and will be + # changed later to honor RFC5424. This option is ignored if log_config_append + # is set. (boolean value) + #use_syslog = false + + # Syslog facility to receive log lines. This option is ignored if + # log_config_append is set. (string value) + #syslog_log_facility = LOG_USER + + # Log output to standard error. This option is ignored if log_config_append is + # set. (boolean value) + #use_stderr = true + + # Format string to use for log messages with context. (string value) + #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + + # Format string to use for log messages when context is undefined. (string + # value) + #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + + # Additional data to append to log message when logging level for the message + # is DEBUG. (string value) + #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + + # Prefix each line of exception output with this format. (string value) + #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + + # Defines the format string for %(user_identity)s that is used in + # logging_context_format_string. (string value) + #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + + # List of package logging levels in logger=LEVEL pairs. This option is ignored + # if log_config_append is set. (list value) + #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + + # Enables or disables publication of error events. (boolean value) + #publish_errors = false + + # The format for an instance that is passed with the log message. (string + # value) + #instance_format = "[instance: %(uuid)s] " + + # The format for an instance UUID that is passed with the log message. (string + # value) + #instance_uuid_format = "[instance: %(uuid)s] " + + # Enables or disables fatal status of deprecations. (boolean value) + #fatal_deprecations = false + + + [binding] + + driver = kuryr.lib.binding.drivers.vlan + link_iface = eth0 + + [kubernetes] + + # + # From kuryr_kubernetes + # + + # The root URL of the Kubernetes API (string value) + api_root = {{ openshift.master.api_url }} + + # Absolute path to client cert to connect to HTTPS K8S_API (string value) + # ssl_client_crt_file = /etc/kuryr/controller.crt + + # Absolute path client key file to connect to HTTPS K8S_API (string value) + # ssl_client_key_file = /etc/kuryr/controller.key + + # Absolute path to ca cert file to connect to HTTPS K8S_API (string value) + ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + + # The token to talk to the k8s API + token_file = /var/run/secrets/kubernetes.io/serviceaccount/token + + # HTTPS K8S_API server identity verification (boolean value) + # TODO (apuimedo): Make configurable + ssl_verify_server_crt = True + + # The driver to determine OpenStack project for pod ports (string value) + pod_project_driver = default + + # The driver to determine OpenStack project for services (string value) + service_project_driver = default + + # The driver to determine Neutron subnets for pod ports (string value) + pod_subnets_driver = default + + # The driver to determine Neutron subnets for services (string value) + service_subnets_driver = default + + # The driver to determine Neutron security groups for pods (string value) + pod_security_groups_driver = default + + # The driver to determine Neutron security groups for services (string value) + service_security_groups_driver = default + + # The driver that provides VIFs for Kubernetes Pods. (string value) + pod_vif_driver = nested-vlan + + + [neutron] + # Configuration options for OpenStack Neutron + + # + # From kuryr_kubernetes + # + + # Authentication URL (string value) + auth_url = {{ kuryr_openstack_auth_url }} + + # Authentication type to load (string value) + # Deprecated group/name - [neutron]/auth_plugin + auth_type = password + + # Domain ID to scope to (string value) + user_domain_name = {{ kuryr_openstack_user_domain_name }} + + # User's password (string value) + password = {{ kuryr_openstack_password }} + + # Domain name containing project (string value) + project_domain_name = {{ kuryr_openstack_project_domain_name }} + + # Project ID to scope to (string value) + # Deprecated group/name - [neutron]/tenant-id + project_id = {{ kuryr_openstack_project_id }} + + # Token (string value) + #token = <None> + + # Trust ID (string value) + #trust_id = <None> + + # User's domain id (string value) + #user_domain_id = <None> + + # User id (string value) + #user_id = <None> + + # Username (string value) + # Deprecated group/name - [neutron]/user-name + username = {{kuryr_openstack_username }} + + # Whether a plugging operation is failed if the port to plug does not become + # active (boolean value) + #vif_plugging_is_fatal = false + + # Seconds to wait for port to become active (integer value) + #vif_plugging_timeout = 0 + + [neutron_defaults] + + pod_security_groups = {{ kuryr_openstack_pod_sg_id }} + pod_subnet = {{ kuryr_openstack_pod_subnet_id }} + service_subnet = {{ kuryr_openstack_service_subnet_id }} + project = {{ kuryr_openstack_pod_project_id }} + # TODO (apuimedo): Remove the duplicated line just after this one once the + # RDO packaging contains the upstream patch + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + [pod_vif_nested] + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + kuryr-cni.conf: |+ + [DEFAULT] + + # + # From kuryr_kubernetes + # + # If set to true, the logging level will be set to DEBUG instead of the default + # INFO level. (boolean value) + # Note: This option can be changed without restarting. + #debug = false + + # The name of a logging configuration file. This file is appended to any + # existing logging configuration files. For details about logging configuration + # files, see the Python logging module documentation. Note that when logging + # configuration files are used then all logging configuration is set in the + # configuration file and other logging configuration options are ignored (for + # example, logging_context_format_string). (string value) + # Note: This option can be changed without restarting. + # Deprecated group/name - [DEFAULT]/log_config + #log_config_append = <None> + + # Defines the format string for %%(asctime)s in log records. Default: + # %(default)s . This option is ignored if log_config_append is set. (string + # value) + #log_date_format = %Y-%m-%d %H:%M:%S + + # (Optional) Name of log file to send logging output to. If no default is set, + # logging will go to stderr as defined by use_stderr. This option is ignored if + # log_config_append is set. (string value) + # Deprecated group/name - [DEFAULT]/logfile + #log_file = /var/log/kuryr/cni.log + + # (Optional) The base directory used for relative log_file paths. This option + # is ignored if log_config_append is set. (string value) + # Deprecated group/name - [DEFAULT]/logdir + #log_dir = <None> + + # Uses logging handler designed to watch file system. When log file is moved or + # removed this handler will open a new log file with specified path + # instantaneously. It makes sense only if log_file option is specified and + # Linux platform is used. This option is ignored if log_config_append is set. + # (boolean value) + #watch_log_file = false + + # Use syslog for logging. Existing syslog format is DEPRECATED and will be + # changed later to honor RFC5424. This option is ignored if log_config_append + # is set. (boolean value) + #use_syslog = false + + # Syslog facility to receive log lines. This option is ignored if + # log_config_append is set. (string value) + #syslog_log_facility = LOG_USER + + # Log output to standard error. This option is ignored if log_config_append is + # set. (boolean value) + use_stderr = true + + # Format string to use for log messages with context. (string value) + #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + + # Format string to use for log messages when context is undefined. (string + # value) + #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + + # Additional data to append to log message when logging level for the message + # is DEBUG. (string value) + #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + + # Prefix each line of exception output with this format. (string value) + #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + + # Defines the format string for %(user_identity)s that is used in + # logging_context_format_string. (string value) + #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + + # List of package logging levels in logger=LEVEL pairs. This option is ignored + # if log_config_append is set. (list value) + #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + + # Enables or disables publication of error events. (boolean value) + #publish_errors = false + + # The format for an instance that is passed with the log message. (string + # value) + #instance_format = "[instance: %(uuid)s] " + + # The format for an instance UUID that is passed with the log message. (string + # value) + #instance_uuid_format = "[instance: %(uuid)s] " + + # Enables or disables fatal status of deprecations. (boolean value) + #fatal_deprecations = false + + + [binding] + + driver = kuryr.lib.binding.drivers.vlan + link_iface = {{ kuryr_cni_link_interface }} + + [kubernetes] + + # + # From kuryr_kubernetes + # + + # The root URL of the Kubernetes API (string value) + api_root = {{ openshift.master.api_url }} + + # The token to talk to the k8s API + token_file = /etc/kuryr/token + + # Absolute path to ca cert file to connect to HTTPS K8S_API (string value) + ssl_ca_crt_file = /etc/kuryr/ca.crt + + # HTTPS K8S_API server identity verification (boolean value) + # TODO (apuimedo): Make configurable + ssl_verify_server_crt = True diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2 new file mode 100644 index 000000000..d970270b5 --- /dev/null +++ b/roles/kuryr/templates/controller-deployment.yaml.j2 @@ -0,0 +1,40 @@ +# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + name: kuryr-controller + name: kuryr-controller + namespace: {{ kuryr_namespace }} +spec: + replicas: 1 + template: + metadata: + labels: + name: kuryr-controller + name: kuryr-controller + spec: + serviceAccountName: kuryr-controller + automountServiceAccountToken: true + hostNetwork: true + containers: + - image: kuryr/controller:latest + imagePullPolicy: IfNotPresent + name: controller + terminationMessagePath: "/dev/termination-log" + # FIXME(dulek): This shouldn't be required, but without it selinux is + # complaining about access to kuryr.conf. + securityContext: + privileged: true + runAsUser: 0 + volumeMounts: + - name: config-volume + mountPath: "/etc/kuryr/kuryr.conf" + subPath: kuryr.conf + volumes: + - name: config-volume + configMap: + name: kuryr-config + defaultMode: 0666 + restartPolicy: Always diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 45d7444a4..05b2763d5 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -745,7 +745,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1421,7 +1421,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 231857cca..324f52689 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -723,7 +723,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1399,7 +1399,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1478,11 +1478,23 @@ class OCcsr(OpenShiftCLI): return False + def get_csr_request(self, request): + '''base64 decode the request object and call openssl to determine the + subject and specifically the CN: from the request + + Output: + (0, '... + Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal + ...') + ''' + import base64 + return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1] + def match_node(self, csr): '''match an inc csr to a node in self.nodes''' for node in self.nodes: - # we have a match - if node['name'] in csr['metadata']['name']: + # we need to match based upon the csr's request certificate's CN + if node['name'] in self.get_csr_request(csr['spec']['request']): node['csrs'][csr['metadata']['name']] = csr # check that the username is the node and type is 'Approved' diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 44f3f57d8..152f270ab 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 687cff579..3082f5890 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index ddf5d90b7..1ceaf5d0d 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c00eee381..0771aa5a5 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -835,7 +835,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1511,7 +1511,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1886,13 +1886,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1909,6 +1911,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 0c925ab0b..146f71f68 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -860,7 +860,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1536,7 +1536,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -2230,13 +2230,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -2253,6 +2255,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py index 955c6313e..79bd08f4e 100644 --- a/roles/lib_openshift/library/oc_atomic_container.py +++ b/roles/lib_openshift/library/oc_atomic_container.py @@ -83,7 +83,7 @@ def _install(module, container, image, values_list): if rc != 0: return rc, out, err, False else: - changed = "Extracting" in out + changed = "Extracting" in out or "Copying blob" in out return rc, out, err, changed def _uninstall(module, name): @@ -127,7 +127,7 @@ def do_update(module, container, old_image, image, values_list): if rc != 0: module.fail_json(rc=rc, msg=err) else: - changed = "Extracting" in out + changed = "Extracting" in out or "Copying blob" in out module.exit_json(msg=out, changed=changed) diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 567ecfd4e..9761b4b4e 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -709,7 +709,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1385,7 +1385,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 9515de569..047edffbb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -715,7 +715,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1391,7 +1391,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index d461e5ae9..0b6a8436b 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -759,7 +759,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1435,7 +1435,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 22ad58725..1f52fba40 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -726,7 +726,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1402,7 +1402,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index b6c6e47d9..1b63a6c13 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -699,7 +699,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1375,7 +1375,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index f7fc286e0..94b08d9ce 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -718,7 +718,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1394,7 +1394,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 2206878a4..ad837fdb5 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -735,7 +735,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1411,7 +1411,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 126d7a617..892546e56 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -738,7 +738,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1414,7 +1414,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d20904d0d..38df585f0 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -670,7 +670,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1346,7 +1346,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 91199d093..70632f86d 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -727,7 +727,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1403,7 +1403,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index f9b2d81fa..4eee748d7 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -724,7 +724,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1400,7 +1400,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 895322ba5..2e73a7645 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 8f8e46e1e..e003770d8 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -90,6 +90,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt @@ -769,7 +775,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1445,7 +1451,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1469,6 +1475,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -1483,6 +1490,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -1508,6 +1516,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host @@ -1715,6 +1725,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], @@ -1819,6 +1830,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 7130cc5fc..c142f1f43 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -713,7 +713,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1389,7 +1389,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 0c4b99e30..62bda33ad 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -90,6 +90,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets @@ -765,7 +771,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1441,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1464,13 +1470,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1487,6 +1495,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): @@ -1623,7 +1633,7 @@ class OCSecret(OpenShiftCLI): This receives a list of file names and converts it into a secret. The secret is then written to disk and passed into the `oc replace` command. ''' - secret = self.prep_secret(files, force) + secret = self.prep_secret(files, force=force) if secret['returncode'] != 0: return secret @@ -1698,8 +1708,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create @@ -1783,6 +1792,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 7ab139e85..3e8aea4f1 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -772,7 +772,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1448,7 +1448,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 5d539ced4..646a39224 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 97e213f46..99a8e8f3d 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 9339a85e5..e88f3ae8d 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -729,7 +729,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1405,7 +1405,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 2fa349547..7bbe38819 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -771,7 +771,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1447,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 55e1054e7..63adbd6ac 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -683,7 +683,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1359,7 +1359,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 63bad57b4..3c07f8d4b 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -760,7 +760,7 @@ class Yedit(object): # pragma: no cover yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} @@ -1436,7 +1436,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py index 7b81760df..454d7c4b2 100644 --- a/roles/lib_openshift/src/ansible/oc_atomic_container.py +++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py @@ -19,7 +19,7 @@ def _install(module, container, image, values_list): if rc != 0: return rc, out, err, False else: - changed = "Extracting" in out + changed = "Extracting" in out or "Copying blob" in out return rc, out, err, changed def _uninstall(module, name): @@ -63,7 +63,7 @@ def do_update(module, container, old_image, image, values_list): if rc != 0: module.fail_json(rc=rc, msg=err) else: - changed = "Extracting" in out + changed = "Extracting" in out or "Copying blob" in out module.exit_json(msg=out, changed=changed) diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py index f2f5c5095..969cf8bcd 100644 --- a/roles/lib_openshift/src/ansible/oc_route.py +++ b/roles/lib_openshift/src/ansible/oc_route.py @@ -13,6 +13,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py index faa7c1772..ee2827e69 100644 --- a/roles/lib_openshift/src/ansible/oc_secret.py +++ b/roles/lib_openshift/src/ansible/oc_secret.py @@ -15,6 +15,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py index ea11c6ca9..22b8f9165 100644 --- a/roles/lib_openshift/src/class/oc_adm_csr.py +++ b/roles/lib_openshift/src/class/oc_adm_csr.py @@ -66,11 +66,23 @@ class OCcsr(OpenShiftCLI): return False + def get_csr_request(self, request): + '''base64 decode the request object and call openssl to determine the + subject and specifically the CN: from the request + + Output: + (0, '... + Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal + ...') + ''' + import base64 + return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1] + def match_node(self, csr): '''match an inc csr to a node in self.nodes''' for node in self.nodes: - # we have a match - if node['name'] in csr['metadata']['name']: + # we need to match based upon the csr's request certificate's CN + if node['name'] in self.get_csr_request(csr['spec']['request']): node['csrs'][csr['metadata']['name']] = csr # check that the username is the node and type is 'Approved' diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py index 3a1bd732f..dc2f7977b 100644 --- a/roles/lib_openshift/src/class/oc_route.py +++ b/roles/lib_openshift/src/class/oc_route.py @@ -118,6 +118,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py index 4ee6443e9..89e70b6b2 100644 --- a/roles/lib_openshift/src/class/oc_secret.py +++ b/roles/lib_openshift/src/class/oc_secret.py @@ -67,7 +67,7 @@ class OCSecret(OpenShiftCLI): This receives a list of file names and converts it into a secret. The secret is then written to disk and passed into the `oc replace` command. ''' - secret = self.prep_secret(files, force) + secret = self.prep_secret(files, force=force) if secret['returncode'] != 0: return secret @@ -142,8 +142,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route index a12999c9e..f0d38ab5f 100644 --- a/roles/lib_openshift/src/doc/route +++ b/roles/lib_openshift/src/doc/route @@ -39,6 +39,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret index 76b147f6f..a27f90f38 100644 --- a/roles/lib_openshift/src/doc/secret +++ b/roles/lib_openshift/src/doc/secret @@ -39,6 +39,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 5a307cdb3..1fb32164e 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -597,7 +597,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py index 3b54a24fb..b106866cb 100644 --- a/roles/lib_openshift/src/lib/route.py +++ b/roles/lib_openshift/src/lib/route.py @@ -11,6 +11,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -25,6 +26,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -50,6 +52,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py index a1c202442..ad4b6aa36 100644 --- a/roles/lib_openshift/src/lib/secret.py +++ b/roles/lib_openshift/src/lib/secret.py @@ -10,13 +10,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -33,6 +35,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py index f350bd25d..f350bd25d 100644 --- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py +++ b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml index c0d200e73..6a452ccec 100755 --- a/roles/lib_openshift/src/test/integration/oc_configmap.yml +++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml @@ -55,7 +55,7 @@ config: "{{ filename }}" from_literal: foo: notbar - deployment_type: online + deployment_type: openshift-enterprise - name: fetch the updated configmap oc_configmap: @@ -70,7 +70,7 @@ assert: that: - cmout.results.results[0].metadata.name == 'configmaptest' - - cmout.results.results[0].data.deployment_type == 'online' + - cmout.results.results[0].data.deployment_type == 'openshift-enterprise' - cmout.results.results[0].data.foo == 'notbar' ###### end update test ########### diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py index 318fd6167..27042c64b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_configmap.py +++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py @@ -79,7 +79,7 @@ class OCConfigMapTest(unittest.TestCase): ''' Testing a configmap create ''' params = copy.deepcopy(OCConfigMapTest.params) params['from_file'] = {'test': '/root/file'} - params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'} + params['from_literal'] = {'foo': 'bar', 'deployment_type': 'openshift-enterprise'} configmap = '''{ "apiVersion": "v1", @@ -100,7 +100,7 @@ class OCConfigMapTest(unittest.TestCase): "apiVersion": "v1", "data": { "foo": "bar", - "deployment_type": "online", + "deployment_type": "openshift-enterprise", "test": "this is a file\\n" }, "kind": "ConfigMap", @@ -128,7 +128,7 @@ class OCConfigMapTest(unittest.TestCase): self.assertTrue(results['changed']) self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap') - self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online') + self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'openshift-enterprise') @unittest.skipIf(six.PY3, 'py2 test only') @mock.patch('os.path.exists') diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py index afdb5e4dc..5699f123b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_route.py +++ b/roles/lib_openshift/src/test/unit/test_oc_route.py @@ -39,6 +39,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'passthrough', 'dest_cacert_path': None, 'cacert_path': None, @@ -64,7 +65,10 @@ class OCRouteTest(unittest.TestCase): "selfLink": "/oapi/v1/namespaces/default/routes/test", "uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26", "resourceVersion": "439182", - "creationTimestamp": "2017-02-07T01:59:48Z" + "creationTimestamp": "2017-02-07T01:59:48Z", + "labels": { + "route": "route" + } }, "spec": { "host": "test.example", @@ -141,6 +145,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'edge', 'dest_cacert_path': None, 'cacert_path': None, @@ -166,7 +171,8 @@ class OCRouteTest(unittest.TestCase): "namespace": "default", "resourceVersion": "517745", "selfLink": "/oapi/v1/namespaces/default/routes/test", - "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a" + "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a", + "labels": {"route": "route"} }, "spec": { "host": "test.openshift.com", @@ -250,6 +256,7 @@ metadata: self.assertTrue(results['changed']) self.assertEqual(results['state'], 'present') self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test') + self.assertEqual(results['results']['results'][0]['metadata']['labels']['route'], 'route') # Making sure our mock was called as we expected mock_cmd.assert_has_calls([ diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py index 95a305b58..e5ac1f74f 100644 --- a/roles/lib_utils/library/repoquery.py +++ b/roles/lib_utils/library/repoquery.py @@ -35,6 +35,7 @@ import os # noqa: F401 import re # noqa: F401 import shutil # noqa: F401 import tempfile # noqa: F401 +import time # noqa: F401 try: import ruamel.yaml as yaml # noqa: F401 @@ -618,17 +619,22 @@ def main(): show_duplicates=dict(default=False, required=False, type='bool'), match_version=dict(default=None, required=False, type='str'), ignore_excluders=dict(default=False, required=False, type='bool'), + retries=dict(default=4, required=False, type='int'), + retry_interval=dict(default=5, required=False, type='int'), ), supports_check_mode=False, required_if=[('show_duplicates', True, ['name'])], ) - rval = Repoquery.run_ansible(module.params, module.check_mode) - - if 'failed' in rval: - module.fail_json(**rval) - - module.exit_json(**rval) + tries = 1 + while True: + rval = Repoquery.run_ansible(module.params, module.check_mode) + if 'failed' not in rval: + module.exit_json(**rval) + elif tries > module.params['retries']: + module.fail_json(**rval) + tries += 1 + time.sleep(module.params['retry_interval']) if __name__ == "__main__": diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py index baf72fe47..cf5c2e423 100644 --- a/roles/lib_utils/library/yedit.py +++ b/roles/lib_utils/library/yedit.py @@ -35,6 +35,7 @@ import os # noqa: F401 import re # noqa: F401 import shutil # noqa: F401 import tempfile # noqa: F401 +import time # noqa: F401 try: import ruamel.yaml as yaml # noqa: F401 @@ -792,7 +793,7 @@ class Yedit(object): yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py index 40773b1c1..5f5b93639 100644 --- a/roles/lib_utils/src/ansible/repoquery.py +++ b/roles/lib_utils/src/ansible/repoquery.py @@ -19,17 +19,22 @@ def main(): show_duplicates=dict(default=False, required=False, type='bool'), match_version=dict(default=None, required=False, type='str'), ignore_excluders=dict(default=False, required=False, type='bool'), + retries=dict(default=4, required=False, type='int'), + retry_interval=dict(default=5, required=False, type='int'), ), supports_check_mode=False, required_if=[('show_duplicates', True, ['name'])], ) - rval = Repoquery.run_ansible(module.params, module.check_mode) - - if 'failed' in rval: - module.fail_json(**rval) - - module.exit_json(**rval) + tries = 1 + while True: + rval = Repoquery.run_ansible(module.params, module.check_mode) + if 'failed' not in rval: + module.exit_json(**rval) + elif tries > module.params['retries']: + module.fail_json(**rval) + tries += 1 + time.sleep(module.params['retry_interval']) if __name__ == "__main__": diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py index 957c35a06..0a4fbe07a 100644 --- a/roles/lib_utils/src/class/yedit.py +++ b/roles/lib_utils/src/class/yedit.py @@ -590,7 +590,7 @@ class Yedit(object): yamlfile.yaml_dict = content if params['key']: - rval = yamlfile.get(params['key']) or {} + rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py index 567f8c9e0..07a04b7ae 100644 --- a/roles/lib_utils/src/lib/import.py +++ b/roles/lib_utils/src/lib/import.py @@ -10,6 +10,7 @@ import os # noqa: F401 import re # noqa: F401 import shutil # noqa: F401 import tempfile # noqa: F401 +import time # noqa: F401 try: import ruamel.yaml as yaml # noqa: F401 diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index 3da340c85..e2f7af5ad 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -13,8 +13,5 @@ galaxy_info: - cloud - system dependencies: -- role: nuage_ca -- role: nuage_common -- role: openshift_etcd_client_certificates - role: lib_openshift - role: lib_os_firewall diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 index 612d689c2..7be5d6743 100755 --- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -62,16 +62,14 @@ spec: selector: matchLabels: k8s-app: nuage-master-config + updateStrategy: + type: RollingUpdate template: metadata: labels: k8s-app: nuage-master-config spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists nodeSelector: install-monitor: "true" containers: diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 index 02e9a1563..6a1267d94 100755 --- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -23,7 +23,7 @@ data: # IP address and port number of master API server masterApiServer: {{ api_server_url }} # REST server URL - nuageMonRestServer: {{ nuage_mon_rest_server_url }} + nuageMonRestServer: https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }} # Bridge name for the docker bridge dockerBridgeName: docker0 # Certificate for connecting to the openshift monitor REST api @@ -32,11 +32,6 @@ data: nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key # CA certificate for verifying the master's rest server nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt - # Nuage vport mtu size - interfaceMTU: {{ nuage_vport_mtu }} - # Logging level for the plugin - # allowed options are: "dbg", "info", "warn", "err", "emer", "off" - logLevel: 3 # This will generate the required Nuage CNI yaml configuration cni_yaml_config: | @@ -72,10 +67,6 @@ spec: k8s-app: nuage-cni-ds spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists containers: # This container installs Nuage CNI binaries # and CNI network config file on each node. @@ -157,10 +148,6 @@ spec: k8s-app: nuage-vrs-ds spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists containers: # This container installs Nuage VRS running as a # container on each worker node diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml index d8bfca62a..fdf01b7c2 100644 --- a/roles/nuage_node/vars/main.yaml +++ b/roles/nuage_node/vars/main.yaml @@ -24,4 +24,4 @@ cni_bin_dir: "/opt/cni/bin/" nuage_plugin_crt_dir: /usr/share/vsp-openshift openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node -nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" +nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" diff --git a/roles/openshift_ami_prep/defaults/main.yml b/roles/openshift_ami_prep/defaults/main.yml deleted file mode 100644 index 2ba6d8eae..000000000 --- a/roles/openshift_ami_prep/defaults/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- - - -r_openshift_ami_prep_packages: -- atomic-openshift-master -- atomic-openshift-node -- atomic-openshift-docker-excluder -- atomic-openshift-sdn-ovs -- openvswitch -- docker -- etcd -#- pcs -- haproxy -- dnsmasq -- ntp -- logrotate -- httpd-tools -- bind -- firewalld -- libselinux-python -- conntrack-tools -- openssl -- cloud-init -- iproute -- python-dbus -- PyYAML -- yum-utils -- python2-boto -- python2-boto3 -- cloud-utils-growpart -# gluster -- glusterfs-fuse -- heketi-client -# nfs -- nfs-utils -- flannel -- bash-completion -# cockpit -- cockpit-ws -- cockpit-system -- cockpit-bridge -- cockpit-docker -# iscsi -- iscsi-initiator-utils -# ceph -- ceph-common -# systemcontainer -# - runc -# - container-selinux -# - atomic diff --git a/roles/openshift_ami_prep/tasks/main.yml b/roles/openshift_ami_prep/tasks/main.yml deleted file mode 100644 index 98f7bc0e2..000000000 --- a/roles/openshift_ami_prep/tasks/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: install repositories - include: yum_repos.yml - static: yes - -- name: install needed rpm(s) - package: - name: "{{ item }}" - state: present - with_items: "{{ r_openshift_ami_prep_packages }}" - -- name: create the directory for node - file: - state: directory - path: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d" - -- name: laydown systemd override - copy: - dest: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d/override.conf" - content: | - [Unit] - After=cloud-init.service - -- name: update the sysconfig to have KUBECONFIG - lineinfile: - dest: "/etc/sysconfig/{{ r_openshift_ami_prep_node }}" - line: "KUBECONFIG=/root/csr_kubeconfig" - regexp: "^KUBECONFIG=.*" - -- name: update the ExecStart to have bootstrap - lineinfile: - dest: "/usr/lib/systemd/system/{{ r_openshift_ami_prep_node }}.service" - line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" - regexp: "^ExecStart=.*" - -- name: systemctl enable origin-node - systemd: - name: "{{ item }}" - enabled: no - with_items: - - "{{ r_openshift_ami_prep_node }}.service" - - "{{ r_openshift_ami_prep_master }}.service" diff --git a/roles/openshift_ami_prep/tasks/yum_repos.yml b/roles/openshift_ami_prep/tasks/yum_repos.yml deleted file mode 100644 index c48c67ac2..000000000 --- a/roles/openshift_ami_prep/tasks/yum_repos.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create our install repository - yum_repository: - description: "{{ item.description | default(omit) }}" - name: "{{ item.name }}" - baseurl: "{{ item.baseurl }}" - gpgkey: "{{ item.gpgkey | default(omit)}}" - gpgcheck: "{{ item.gpgcheck | default(1) }}" - sslverify: "{{ item.sslverify | default(1) }}" - sslclientkey: "{{ item.sslclientkey | default(omit) }}" - sslclientcert: "{{ item.sslclientcert | default(omit) }}" - file: "{{ item.file }}" - enabled: "{{ item.enabled }}" - with_items: "{{ r_openshift_ami_prep_yum_repositories }}" diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md new file mode 100644 index 000000000..8c10c9991 --- /dev/null +++ b/roles/openshift_atomic/README.md @@ -0,0 +1,28 @@ +OpenShift Atomic +================ + +This role houses atomic specific tasks. + +Requirements +------------ + +Role Variables +-------------- + +Dependencies +------------ + +Example Playbook +---------------- + +``` +- name: Ensure atomic proxies are defined + hosts: localhost + roles: + - role: openshift_atomic +``` + +License +------- + +Apache License Version 2.0 diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_atomic/meta/main.yml index e29aaf96b..ea129f514 100644 --- a/roles/openshift_aws_iam_kms/meta/main.yml +++ b/roles/openshift_atomic/meta/main.yml @@ -1,13 +1,13 @@ --- galaxy_info: author: OpenShift - description: AWS IAM KMS setup and management + description: Atomic related tasks company: Red Hat, Inc license: ASL 2.0 - min_ansible_version: 1.2 + min_ansible_version: 2.2 platforms: - name: EL versions: - 7 dependencies: -- lib_utils +- role: lib_openshift diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/openshift_atomic/tasks/proxy.yml new file mode 100644 index 000000000..dde099984 --- /dev/null +++ b/roles/openshift_atomic/tasks/proxy.yml @@ -0,0 +1,32 @@ +--- +# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf +# regexp: the line starts with or without #, followed by the string +# http_proxy, then either : or = +- block: + + - name: Add http_proxy to /etc/atomic.conf + lineinfile: + dest: /etc/atomic.conf + regexp: "^#?http_proxy[:=]{1}" + line: "http_proxy: {{ openshift.common.http_proxy | default('') }}" + when: + - openshift.common.http_proxy is defined + - openshift.common.http_proxy != '' + + - name: Add https_proxy to /etc/atomic.conf + lineinfile: + dest: /etc/atomic.conf + regexp: "^#?https_proxy[:=]{1}" + line: "https_proxy: {{ openshift.common.https_proxy | default('') }}" + when: + - openshift.common.https_proxy is defined + - openshift.common.https_proxy != '' + + - name: Add no_proxy to /etc/atomic.conf + lineinfile: + dest: /etc/atomic.conf + regexp: "^#?no_proxy[:=]{1}" + line: "no_proxy: {{ openshift.common.no_proxy | default('') }}" + when: + - openshift.common.no_proxy is defined + - openshift.common.no_proxy != '' diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md new file mode 100644 index 000000000..4aca5c7a8 --- /dev/null +++ b/roles/openshift_aws/README.md @@ -0,0 +1,57 @@ +openshift_aws +================================== + +Provision AWS infrastructure and instances. + +This role contains many task-areas to provision resources and perform actions +against an AWS account for the purposes of dynamically building an openshift +cluster. + +This role is primarily intended to be used with "include_role" and "tasks_from". + +include_role can be called from the tasks section in a play. See example +playbook below for reference. + +These task-areas are: + +* provision a vpc: vpc.yml +* provision elastic load balancers: elb.yml +* upload IAM ssl certificates to use with load balancers: iam_cert.yml +* provision an S3 bucket: s3.yml +* provision an instance to build an AMI: provision_instance.yml +* provision a security group in AWS: security_group.yml +* provision ssh keys and users in AWS: ssh_keys.yml +* provision an AMI in AWS: seal_ami.yml +* provision scale groups: scale_group.yml +* provision launch configs: launch_config.yml + +Requirements +------------ + +* Ansible 2.3 +* Boto + +Appropriate AWS credentials and permissions are required. + + + + +Example Playbook +---------------- + +```yaml +- include_role: + name: openshift_aws + tasks_from: vpc.yml + vars: + openshift_aws_clusterid: test + openshift_aws_region: us-east-1 +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml new file mode 100644 index 000000000..5371588cf --- /dev/null +++ b/roles/openshift_aws/defaults/main.yml @@ -0,0 +1,233 @@ +--- +openshift_aws_create_s3: True +openshift_aws_create_iam_cert: True +openshift_aws_create_security_groups: True +openshift_aws_create_launch_config: True +openshift_aws_create_scale_group: True +openshift_aws_node_group_type: master + +openshift_aws_wait_for_ssh: True + +openshift_aws_clusterid: default +openshift_aws_region: us-east-1 +openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" +openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" +openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}" + +openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" +openshift_aws_iam_cert_path: '' +openshift_aws_iam_cert_key_path: '' +openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}" + +openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" +openshift_aws_ami: '' +openshift_aws_ami_copy_wait: False +openshift_aws_ami_encrypt: False +openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}" +openshift_aws_ami_name: openshift-gi +openshift_aws_base_ami_name: ami_base + +openshift_aws_launch_config_bootstrap_token: '' +openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}" + +openshift_aws_users: [] + +openshift_aws_ami_tags: + bootstrap: "true" + openshift-created: "true" + clusterid: "{{ openshift_aws_clusterid }}" + +openshift_aws_s3_mode: create +openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" + +openshift_aws_elb_health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + +openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_idle_timout: 400 +openshift_aws_elb_scheme: internet-facing +openshift_aws_elb_cert_arn: '' + +openshift_aws_elb_listeners: + master: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: 443 + - protocol: ssl + load_balancer_port: 443 + instance_protocol: ssl + instance_port: 443 + # ssl certificate required for https or ssl + ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + internal: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + +openshift_aws_node_group_config_master_volumes: +- device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + +openshift_aws_node_group_config_node_volumes: +- device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: True + +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_termination_policy: Default +openshift_aws_node_group_replace_instances: [] +openshift_aws_node_group_replace_all_instances: False +openshift_aws_node_group_config_extra_labels: {} + +openshift_aws_node_group_config: + tags: "{{ openshift_aws_node_group_config_tags }}" + master: + instance_type: m4.xlarge + ami: "{{ openshift_aws_ami }}" + volumes: "{{ openshift_aws_node_group_config_master_volumes }}" + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + labels: + type: master + wait_for_instances: True + termination_policy: "{{ openshift_aws_node_group_termination_policy }}" + replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + compute: + instance_type: m4.xlarge + ami: "{{ openshift_aws_ami }}" + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 100 + desired_size: 3 + tags: + host-type: node + sub-host-type: compute + labels: + type: compute + termination_policy: "{{ openshift_aws_node_group_termination_policy }}" + replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + infra: + instance_type: m4.xlarge + ami: "{{ openshift_aws_ami }}" + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" + health_check: + period: 60 + type: EC2 + min_size: 2 + max_size: 20 + desired_size: 2 + tags: + host-type: node + sub-host-type: infra + labels: + type: infra + termination_policy: "{{ openshift_aws_node_group_termination_policy }}" + replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + +openshift_aws_elb_security_groups: +- "{{ openshift_aws_clusterid }}" +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" + +openshift_aws_elb_instance_filter: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": "{{ openshift_aws_node_group_type }}" + instance-state-name: running + +openshift_aws_launch_config_security_groups: +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + +openshift_aws_node_security_groups: + default: + name: "{{ openshift_aws_clusterid }}" + desc: "{{ openshift_aws_clusterid }} default" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: all + from_port: all + to_port: all + group_name: "{{ openshift_aws_clusterid }}" + master: + name: "{{ openshift_aws_clusterid }}_master" + desc: "{{ openshift_aws_clusterid }} master instances" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + compute: + name: "{{ openshift_aws_clusterid }}_compute" + desc: "{{ openshift_aws_clusterid }} compute node instances" + infra: + name: "{{ openshift_aws_clusterid }}_infra" + desc: "{{ openshift_aws_clusterid }} infra node instances" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 30000 + to_port: 32000 + cidr_ip: 0.0.0.0/0 + etcd: + name: "{{ openshift_aws_clusterid }}_etcd" + desc: "{{ openshift_aws_clusterid }} etcd instances" + +openshift_aws_vpc_tags: + Name: "{{ openshift_aws_vpc_name }}" + +openshift_aws_subnet_name: us-east-1c + +openshift_aws_vpc: + name: "{{ openshift_aws_vpc_name }}" + cidr: 172.31.0.0/16 + subnets: + us-east-1: + - cidr: 172.31.48.0/20 + az: "us-east-1c" + - cidr: 172.31.32.0/20 + az: "us-east-1e" + - cidr: 172.31.16.0/20 + az: "us-east-1a" + +openshift_aws_node_run_bootstrap_startup: True +openshift_aws_node_user_data: '' +openshift_aws_node_config_namespace: openshift-node diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py new file mode 100644 index 000000000..06e1f9602 --- /dev/null +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_aws +''' + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_aws role''' + + @staticmethod + def build_instance_tags(clusterid, status='owned'): + ''' This function will return a dictionary of the instance tags. + + The main desire to have this inside of a filter_plugin is that we + need to build the following key. + + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + + ''' + tags = {'clusterid': clusterid, + 'kubernetes.io/cluster/{}'.format(clusterid): status} + + return tags + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'build_instance_tags': self.build_instance_tags} diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml new file mode 100644 index 000000000..875efcb8f --- /dev/null +++ b/roles/openshift_aws/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_utils diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml new file mode 100644 index 000000000..07020dd62 --- /dev/null +++ b/roles/openshift_aws/tasks/ami_copy.yml @@ -0,0 +1,34 @@ +--- +- fail: + msg: "{{ item }} needs to be defined" + when: item is not defined + with_items: + - openshift_aws_ami_copy_src_ami + - openshift_aws_ami_copy_name + +- name: Create IAM KMS key with alias + oo_iam_kms: + state: present + alias: "{{ openshift_aws_iam_kms_alias }}" + region: "{{ openshift_aws_region }}" + register: created_kms + +- debug: var=created_kms.results + +- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}" + ec2_ami_copy: + name: "{{ openshift_aws_ami_copy_name }}" + region: "{{ openshift_aws_region }}" + source_region: "{{ openshift_aws_ami_copy_src_region }}" + source_image_id: "{{ openshift_aws_ami_copy_src_ami }}" + encrypted: "{{ openshift_aws_ami_encrypt | bool }}" + kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}" + wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}" + tags: "{{ openshift_aws_ami_tags }}" + register: copy_result + +- debug: var=copy_result + +- name: return AMI ID with setfact + set_fact: + openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml new file mode 100644 index 000000000..0dac1c23d --- /dev/null +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -0,0 +1,34 @@ +--- +# When openshift_aws_use_custom_ami is '' then +# we retrieve the latest build AMI. +# Then set openshift_aws_ami to the ami. +- when: openshift_aws_ami == '' + block: + - name: fetch recently created AMI + ec2_ami_find: + region: "{{ openshift_aws_region }}" + sort: creationDate + sort_order: descending + name: "{{ openshift_aws_ami_name }}*" + ami_tags: "{{ openshift_aws_ami_tags }}" + no_result_action: fail + register: amiout + + - name: Set the openshift_aws_ami + set_fact: + openshift_aws_ami: "{{ amiout.results[0].ami_id }}" + when: + - "'results' in amiout" + - amiout.results|length > 0 + +- when: openshift_aws_create_security_groups + name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + +- when: openshift_aws_create_launch_config + name: "Create {{ openshift_aws_node_group_type }} launch config" + include: launch_config.yml + +- when: openshift_aws_create_scale_group + name: "Create {{ openshift_aws_node_group_type }} node group" + include: scale_group.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml new file mode 100644 index 000000000..7bc3184df --- /dev/null +++ b/roles/openshift_aws/tasks/elb.yml @@ -0,0 +1,68 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: debug + debug: var=vpcout + +- name: fetch the remote instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: "{{ openshift_aws_elb_instance_filter }}" + register: instancesout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: + debug: + msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] + if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type + else openshift_aws_elb_listeners }}" + +- name: "Create ELB {{ l_openshift_aws_elb_name }}" + ec2_elb_lb: + name: "{{ l_openshift_aws_elb_name }}" + state: present + security_group_names: "{{ openshift_aws_elb_security_groups }}" + idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + region: "{{ openshift_aws_region }}" + subnets: + - "{{ subnetout.subnets[0].id }}" + health_check: "{{ openshift_aws_elb_health_check }}" + listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] + if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type + else openshift_aws_elb_listeners }}" + scheme: "{{ openshift_aws_elb_scheme }}" + tags: + KubernetesCluster: "{{ openshift_aws_clusterid }}" + register: new_elb + +# It is necessary to ignore_errors here because the instances are not in 'ready' +# state when first added to ELB +- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" + ec2_elb: + instance_id: "{{ item.id }}" + ec2_elbs: "{{ l_openshift_aws_elb_name }}" + state: present + region: "{{ openshift_aws_region }}" + wait: False + with_items: "{{ instancesout.instances }}" + ignore_errors: True + retries: 10 + register: elb_call + until: elb_call|succeeded + +- debug: + msg: "{{ item }}" + with_items: + - "{{ new_elb }}" diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml new file mode 100644 index 000000000..f74a62b8b --- /dev/null +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -0,0 +1,35 @@ +--- +- name: upload certificates to AWS IAM + iam_cert23: + state: present + name: "{{ openshift_aws_iam_cert_name }}" + cert: "{{ openshift_aws_iam_cert_path }}" + key: "{{ openshift_aws_iam_cert_key_path }}" + cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}" + register: elb_cert_chain + failed_when: + - "'failed' in elb_cert_chain" + - elb_cert_chain.failed + - "'msg' in elb_cert_chain" + - "'already exists and has a different certificate body' in elb_cert_chain.msg or 'BotoServerError' in elb_cert_chain.msg or 'Traceback' in elb_cert_chain.msg.module_stderr" + when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' + +- debug: msg="{{ elb_cert_chain }}" + +- name: set_fact openshift_aws_elb_cert_arn + set_fact: + openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' + +- name: wait for cert to propagate + pause: + seconds: 5 + when: elb_cert_chain.changed diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml new file mode 100644 index 000000000..8b7b02a0e --- /dev/null +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -0,0 +1,39 @@ +--- +- fail: + msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image." + when: + - openshift_aws_ami is undefined + +- fail: + msg: "Ensure that openshift_deployment_type is defined." + when: + - openshift_deployment_type is undefined + +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: "{{ openshift_aws_launch_config_security_groups }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + region: "{{ openshift_aws_region }}" + register: ec2sgs + +# Create the scale group config +- name: Create the node scale group launch config + ec2_lc: + name: "{{ openshift_aws_launch_config_name }}" + region: "{{ openshift_aws_region }}" + image_id: "{{ openshift_aws_ami }}" + instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" + user_data: "{{ lookup('template', 'user_data.j2') }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + ebs_optimized: False + volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" + assign_public_ip: True diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml new file mode 100644 index 000000000..737cfc7a6 --- /dev/null +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -0,0 +1,22 @@ +--- +- name: fetch elbs + ec2_elb_facts: + region: "{{ openshift_aws_region }}" + names: + - "{{ item }}" + with_items: + - "{{ openshift_aws_elb_name }}-external" + - "{{ openshift_aws_elb_name }}-internal" + delegate_to: localhost + register: elbs + +- debug: var=elbs + +- name: set fact + set_fact: + openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + osm_custom_cors_origins: + - "{{ elbs.results[1].elbs[0].dns_name }}" + - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml new file mode 100644 index 000000000..a8518d43a --- /dev/null +++ b/roles/openshift_aws/tasks/provision.yml @@ -0,0 +1,46 @@ +--- +- when: openshift_aws_create_iam_cert | bool + name: create the iam_cert for elb certificate + include: iam_cert.yml + +- when: openshift_aws_create_s3 | bool + name: create s3 bucket for registry + include: s3.yml + +- name: include scale group creation for master + include: build_node_group.yml + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": "{{ openshift_aws_node_group_type }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: create our master internal load balancers + include: elb.yml + vars: + openshift_aws_elb_direction: internal + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" + openshift_aws_elb_scheme: internal + +- name: create our master external load balancers + include: elb.yml + vars: + openshift_aws_elb_direction: external + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" + openshift_aws_elb_scheme: internet-facing + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" + when: openshift_aws_wait_for_ssh | bool diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml new file mode 100644 index 000000000..25ae6ce1c --- /dev/null +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -0,0 +1,63 @@ +--- +- name: set openshift_node_bootstrap to True when building AMI + set_fact: + openshift_node_bootstrap: True + +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: create instance for ami creation + ec2: + assign_public_ip: yes + region: "{{ openshift_aws_region }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + group: "{{ openshift_aws_build_ami_group }}" + instance_type: m4.xlarge + vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" + image: "{{ openshift_aws_base_ami }}" + volumes: + - device_name: /dev/sdb + volume_type: gp2 + volume_size: 100 + delete_on_termination: true + wait: yes + exact_count: 1 + count_tag: + Name: "{{ openshift_aws_base_ami_name }}" + instance_tags: + Name: "{{ openshift_aws_base_ami_name }}" + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ instancesout.instances[0].public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + +- name: add host to nodes + add_host: + groups: nodes + name: "{{ instancesout.instances[0].public_dns_name }}" diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml new file mode 100644 index 000000000..fc4996c68 --- /dev/null +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -0,0 +1,66 @@ +--- +# Get bootstrap config token +# bootstrap should be created on first master +# need to fetch it and shove it into cloud data +- name: fetch master instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: slurp down the bootstrap.kubeconfig + slurp: + src: /etc/origin/master/bootstrap.kubeconfig + delegate_to: "{{ instancesout.instances[0].public_ip_address }}" + remote_user: root + register: bootstrap + +- name: set_fact for kubeconfig token + set_fact: + openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}" + +- name: include build node group for infra + include: build_node_group.yml + vars: + openshift_aws_node_group_type: infra + openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra" + openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}" + +- name: include build node group for compute + include: build_node_group.yml + vars: + openshift_aws_node_group_type: compute + openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute" + openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}" + +- when: openshift_aws_wait_for_ssh | bool + block: + - name: pause and allow for instances to scale before we query them + pause: + seconds: 10 + + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": node + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml new file mode 100644 index 000000000..9cf37c840 --- /dev/null +++ b/roles/openshift_aws/tasks/s3.yml @@ -0,0 +1,7 @@ +--- +- name: Create an s3 bucket + s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: "{{ openshift_aws_s3_mode }}" + region: "{{ openshift_aws_region }}" + when: openshift_aws_create_s3 | bool diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml new file mode 100644 index 000000000..eb31636e7 --- /dev/null +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -0,0 +1,34 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the subnet to use in scale group + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: Create the scale group + ec2_asg: + name: "{{ openshift_aws_scale_group_name }}" + launch_config_name: "{{ openshift_aws_launch_config_name }}" + health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}" + health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}" + min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}" + max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}" + desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}" + region: "{{ openshift_aws_region }}" + termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" + load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" + wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}" + vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" + replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}" + replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}" + tags: + - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml new file mode 100644 index 000000000..0cb749dcc --- /dev/null +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -0,0 +1,49 @@ +--- +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: bundle ami + ec2_ami: + instance_id: "{{ instancesout.instances.0.id }}" + region: "{{ openshift_aws_region }}" + state: present + description: "This was provisioned {{ ansible_date_time.iso8601 }}" + name: "{{ openshift_aws_ami_name }}" + tags: "{{ openshift_aws_ami_tags }}" + wait: yes + register: amioutput + +- debug: var=amioutput + +- when: openshift_aws_ami_encrypt | bool + block: + - name: augment the encrypted ami tags with source-ami + set_fact: + source_tag: + source-ami: "{{ amioutput.image_id }}" + + - name: copy the ami for encrypted disks + include: ami_copy.yml + vars: + openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted" + openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" + # TODO: How does the kms alias get passed to ec2_ami_copy + openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" + openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}" + # this option currently fails due to boto waiters + # when supported this need to be reapplied + #openshift_aws_ami_copy_wait: True + +- name: terminate temporary instance + ec2: + state: absent + region: "{{ openshift_aws_region }}" + instance_ids: "{{ instancesout.instances.0.id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml new file mode 100644 index 000000000..161e72fb4 --- /dev/null +++ b/roles/openshift_aws/tasks/security_group.yml @@ -0,0 +1,45 @@ +--- +- name: Fetch the VPC for the vpc.id + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_clusterid }}" + register: vpcout + +- name: Create default security group for cluster + ec2_group: + name: "{{ openshift_aws_node_security_groups.default.name }}" + description: "{{ openshift_aws_node_security_groups.default.desc }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}" + register: sg_default_created + +- name: create the node group sgs + ec2_group: + name: "{{ item.name}}" + description: "{{ item.desc }}" + rules: "{{ item.rules if 'rules' in item else [] }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: sg_create + with_items: + - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: create the k8s sgs for the node group + ec2_group: + name: "{{ item.name }}_k8s" + description: "{{ item.desc }} for k8s" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: k8s_sg_create + with_items: + - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: tag sg groups with proper tags + ec2_tag: + tags: + KubernetesCluster: "{{ openshift_aws_clusterid }}" + resource: "{{ item.group_id }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml new file mode 100644 index 000000000..166f3b938 --- /dev/null +++ b/roles/openshift_aws/tasks/setup_master_group.yml @@ -0,0 +1,35 @@ +--- +- name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid }}" + +- name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region }}" + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: add new master to masters group + add_host: + groups: "masters,etcd,nodes" + name: "{{ item.public_dns_name }}" + hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}" + with_items: "{{ instancesout.instances }}" + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_dns_name }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws/tasks/ssh_keys.yml index 232cf20ed..f439ce74e 100644 --- a/roles/openshift_aws_ssh_keys/tasks/main.yml +++ b/roles/openshift_aws/tasks/ssh_keys.yml @@ -3,6 +3,6 @@ ec2_key: name: "{{ item.key_name }}" key_material: "{{ item.pub_key }}" - region: "{{ r_openshift_aws_ssh_keys_region }}" - with_items: "{{ r_openshift_aws_ssh_keys_users }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ openshift_aws_users }}" no_log: True diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws/tasks/vpc.yml index cfe08dae5..ce2c8eac5 100644 --- a/roles/openshift_aws_vpc/tasks/main.yml +++ b/roles/openshift_aws/tasks/vpc.yml @@ -2,13 +2,12 @@ - name: Create AWS VPC ec2_vpc_net: state: present - cidr_block: "{{ r_openshift_aws_vpc_cidr }}" + cidr_block: "{{ openshift_aws_vpc.cidr }}" dns_support: True dns_hostnames: True - region: "{{ r_openshift_aws_vpc_region }}" - name: "{{ r_openshift_aws_vpc_clusterid }}" - tags: - Name: "{{ r_openshift_aws_vpc_clusterid }}" + region: "{{ openshift_aws_region }}" + name: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_vpc_tags }}" register: vpc - name: Sleep to avoid a race condition when creating the vpc @@ -18,23 +17,23 @@ - name: assign the vpc igw ec2_vpc_igw: - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" vpc_id: "{{ vpc.vpc.id }}" register: igw - name: assign the vpc subnets ec2_vpc_subnet: - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" vpc_id: "{{ vpc.vpc.id }}" cidr: "{{ item.cidr }}" az: "{{ item.az }}" resource_tags: Name: "{{ item.az }}" - with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}" + with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}" - name: Grab the route tables from our VPC ec2_vpc_route_table_facts: - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" filters: vpc-id: "{{ vpc.vpc.id }}" register: route_table @@ -44,9 +43,9 @@ lookup: id route_table_id: "{{ route_table.route_tables[0].id }}" vpc_id: "{{ vpc.vpc.id }}" - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" tags: - Name: "{{ r_openshift_aws_vpc_name }}" + Name: "{{ openshift_aws_vpc_name }}" routes: - dest: 0.0.0.0/0 gateway_id: igw diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 new file mode 100644 index 000000000..76aebdcea --- /dev/null +++ b/roles/openshift_aws/templates/user_data.j2 @@ -0,0 +1,26 @@ +{% if openshift_aws_node_user_data is defined and openshift_aws_node_user_data != '' %} +{{ openshift_aws_node_user_data }} +{% else %} +#cloud-config +write_files: +- path: /root/openshift_bootstrap/openshift_settings.yaml + owner: 'root:root' + permissions: '0640' + content: | + openshift_group_type: {{ openshift_aws_node_group_type }} +{% if openshift_aws_node_group_type != 'master' %} +- path: /etc/origin/node/bootstrap.kubeconfig + owner: 'root:root' + permissions: '0640' + encoding: b64 + content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }} +{% endif %} +runcmd: +{% if openshift_aws_node_run_bootstrap_startup %} +- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] +{% endif %} +{% if openshift_aws_node_group_type != 'master' %} +- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] +- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] +{% endif %} +{% endif %} diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md deleted file mode 100644 index 111818451..000000000 --- a/roles/openshift_aws_ami_copy/README.md +++ /dev/null @@ -1,50 +0,0 @@ -openshift_aws_ami_perms -========= - -Ansible role for copying an AMI - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- openshift_aws_ami_copy_src_ami: source AMI id to copy from -- openshift_aws_ami_copy_region: region where the AMI is found -- openshift_aws_ami_copy_name: name to assign to new AMI -- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption -- openshift_aws_ami_copy_tags: dict with desired tags -- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters. - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml - - name: copy the ami for encrypted disks - include_role: - name: openshift_aws_ami_copy - vars: - r_openshift_aws_ami_copy_region: us-east-1 - r_openshift_aws_ami_copy_name: myami - r_openshift_aws_ami_copy_src_ami: ami-1234 - r_openshift_aws_ami_copy_kms_arn: arn:xxxx - r_openshift_aws_ami_copy_tags: {} - r_openshift_aws_ami_copy_encrypt: False - -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml deleted file mode 100644 index bcccd4042..000000000 --- a/roles/openshift_aws_ami_copy/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- fail: - msg: "{{ item }} needs to be defined" - when: item is not defined - with_items: - - r_openshift_aws_ami_copy_src_ami - - r_openshift_aws_ami_copy_name - - r_openshift_aws_ami_copy_region - -- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}" - ec2_ami_copy: - region: "{{ r_openshift_aws_ami_copy_region }}" - source_region: "{{ r_openshift_aws_ami_copy_region }}" - name: "{{ r_openshift_aws_ami_copy_name }}" - source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}" - encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}" - kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}" - wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}" - tags: "{{ r_openshift_aws_ami_copy_tags }}" - register: copy_result - -- debug: var=copy_result - -- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami - set_fact: - r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md deleted file mode 100644 index ecc45fa14..000000000 --- a/roles/openshift_aws_elb/README.md +++ /dev/null @@ -1,75 +0,0 @@ -openshift_aws_elb -========= - -Ansible role to provision and manage AWS ELB's for Openshift. - -Requirements ------------- - -Ansible Modules: - -- ec2_elb -- ec2_elb_lb - -python package: - -python-boto - -Role Variables --------------- - -- r_openshift_aws_elb_instances: instances to put in ELB -- r_openshift_aws_elb_elb_name: name of elb -- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to -- r_openshift_aws_elb_region: AWS Region -- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb -```yaml - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 -``` -- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb -```yaml -- protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 -- protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" -``` - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- include_role: - name: openshift_aws_elb - vars: - r_openshift_aws_elb_instances: aws_instances_to_put_in_elb - r_openshift_aws_elb_elb_name: elb_name - r_openshift_aws_elb_security_groups: security_group_names - r_openshift_aws_elb_region: aws_region - r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}" - r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}" -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml deleted file mode 100644 index ed5d38079..000000000 --- a/roles/openshift_aws_elb/defaults/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -r_openshift_aws_elb_health_check: - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 - -r_openshift_aws_elb_cert_arn: '' - -r_openshift_aws_elb_listeners: - master: - external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 - - protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" - internal: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 80 - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml deleted file mode 100644 index 58be652a5..000000000 --- a/roles/openshift_aws_elb/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: Openshift ELB provisioning - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 1.2 - platforms: - - name: EL - versions: - - 7 -dependencies: [] diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml deleted file mode 100644 index 64ec18545..000000000 --- a/roles/openshift_aws_elb/tasks/main.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: fetch the default subnet id - ec2_remote_facts: - region: "{{ r_openshift_aws_elb_region }}" - filters: "{{ r_openshift_aws_elb_instance_filter }}" - register: instancesout - -- name: fetch the default subnet id - ec2_vpc_subnet_facts: - region: "{{ r_openshift_aws_elb_region }}" - filters: - "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}" - register: subnetout - -- name: - debug: - msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] - if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type - else r_openshift_aws_elb_listeners }}" - -- name: "Create ELB {{ r_openshift_aws_elb_name }}" - ec2_elb_lb: - name: "{{ r_openshift_aws_elb_name }}" - state: present - security_group_names: "{{ r_openshift_aws_elb_security_groups }}" - idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}" - region: "{{ r_openshift_aws_elb_region }}" - subnets: - - "{{ subnetout.subnets[0].id }}" - health_check: "{{ r_openshift_aws_elb_health_check }}" - listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] - if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type - else r_openshift_aws_elb_listeners }}" - scheme: "{{ r_openshift_aws_elb_scheme }}" - tags: - KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}" - register: new_elb - -# It is necessary to ignore_errors here because the instances are not in 'ready' -# state when first added to ELB -- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}" - ec2_elb: - instance_id: "{{ item.id }}" - ec2_elbs: "{{ r_openshift_aws_elb_name }}" - state: present - region: "{{ r_openshift_aws_elb_region }}" - wait: False - with_items: "{{ instancesout.instances }}" - ignore_errors: True - retries: 10 - register: elb_call - until: elb_call|succeeded - -- debug: - msg: "{{ item }}" - with_items: - - "{{ new_elb }}" diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md deleted file mode 100644 index 9468e785c..000000000 --- a/roles/openshift_aws_iam_kms/README.md +++ /dev/null @@ -1,43 +0,0 @@ -openshift_aws_iam_kms -========= - -Ansible role to create AWS IAM KMS keys for encryption - -Requirements ------------- - -Ansible Modules: - -oo_iam_kms - -Role Variables --------------- - -- r_openshift_aws_iam_kms_region: AWS region to create KMS key -- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key - -Dependencies ------------- - -lib_utils - -Example Playbook ----------------- -```yaml -- include_role: - name: openshift_aws_iam_kms - vars: - r_openshift_aws_iam_kms_region: 'us-east-1' - r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms' -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml deleted file mode 100644 index 32aac2666..000000000 --- a/roles/openshift_aws_iam_kms/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- fail: - msg: "{{ item.name }} needs to be defined." - when: item.cond | bool - with_items: - - name: "{{ r_openshift_aws_iam_kms_alias }}" - cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}" - - name: "{{ r_openshift_aws_iam_kms_region }}" - cond: "{{ r_openshift_aws_iam_kms_region is undefined }}" - -- name: Create IAM KMS key with alias - oo_iam_kms: - state: present - alias: "{{ r_openshift_aws_iam_kms_alias }}" - region: "{{ r_openshift_aws_iam_kms_region }}" - register: created_kms - -- debug: var=created_kms.results diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md deleted file mode 100644 index 52b7e83b6..000000000 --- a/roles/openshift_aws_launch_config/README.md +++ /dev/null @@ -1,72 +0,0 @@ -openshift_aws_launch_config -========= - -Ansible role to create an AWS launch config for a scale group. - -This includes the AMI, volumes, user_data, etc. - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- -- r_openshift_aws_launch_config_name: "{{ launch_config_name }}" -- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" -- r_openshift_aws_launch_config_region: "{{ region }}" -- r_openshift_aws_launch_config: "{{ node_group_config }}" -```yaml - master: - instance_type: m4.xlarge - ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True -``` -- r_openshift_aws_launch_config_type: compute -- r_openshift_aws_launch_config_custom_image: ami-xxxxx -- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig> - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml - - name: create compute nodes config - include_role: - name: openshift_aws_launch_config - vars: - r_openshift_aws_launch_config_name: "{{ launch_config_name }}" - r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" - r_openshift_aws_launch_config_region: "{{ region }}" - r_openshift_aws_launch_config: "{{ node_group_config }}" - r_openshift_aws_launch_config_type: compute - r_openshift_aws_launch_config_custom_image: ami-1234 - r_openshift_aws_launch_config_bootstrap_token: abcd -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_launch_config/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml deleted file mode 100644 index e61670cc2..000000000 --- a/roles/openshift_aws_launch_config/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: Openshift AWS VPC creation - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 2.3 - platforms: - - name: EL - versions: - - 7 -dependencies: [] diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml deleted file mode 100644 index 437cf1f71..000000000 --- a/roles/openshift_aws_launch_config/tasks/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: fail when params are not set - fail: - msg: Please specify the role parameters. - when: - - r_openshift_aws_launch_config_cluseterid is undefined - - r_openshift_aws_launch_config_type is undefined - - r_openshift_aws_launch_config_region is undefined - - r_openshift_aws_launch_config is undefined - -- name: fetch the security groups for launch config - ec2_group_facts: - filters: - group-name: - - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg - - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg - - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s - region: "{{ r_openshift_aws_launch_config_region }}" - register: ec2sgs - -# Create the scale group config -- name: Create the node scale group config - ec2_lc: - name: "{{ r_openshift_aws_launch_config_name }}" - region: "{{ r_openshift_aws_launch_config_region }}" - image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}" - instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}" - security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" - user_data: |- - #cloud-config - {% if r_openshift_aws_launch_config_type != 'master' %} - write_files: - - path: /root/csr_kubeconfig - owner: root:root - permissions: '0640' - content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} - - path: /root/openshift_settings - owner: root:root - permissions: '0640' - content: - openshift_type: "{{ r_openshift_aws_launch_config_type }}" - runcmd: - - [ systemctl, enable, atomic-openshift-node] - - [ systemctl, start, atomic-openshift-node] - {% endif %} - key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}" - ebs_optimized: False - volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}" - assign_public_ip: True - register: test diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2 deleted file mode 100644 index 1a1e29550..000000000 --- a/roles/openshift_aws_launch_config/templates/cloud-init.j2 +++ /dev/null @@ -1,9 +0,0 @@ -{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %} -#cloud-config -write_files: -- path: /root/csr_kubeconfig - owner: root:root - permissions: '0640' - content: |- - {{ r_openshift_aws_launch_config_bootstrap_token }} -{% endif %} diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md deleted file mode 100644 index c32c57bc5..000000000 --- a/roles/openshift_aws_node_group/README.md +++ /dev/null @@ -1,77 +0,0 @@ -openshift_aws_node_group -========= - -Ansible role to create an aws node group. - -This includes the security group, launch config, and scale group. - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- -```yaml -- r_openshift_aws_node_group_name: myscalegroup -- r_openshift_aws_node_group_clusterid: myclusterid -- r_openshift_aws_node_group_region: us-east-1 -- r_openshift_aws_node_group_lc_name: launch_config -- r_openshift_aws_node_group_type: master|infra|compute -- r_openshift_aws_node_group_config: "{{ node_group_config }}" -```yaml -master: - instance_type: m4.xlarge - ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True -``` -- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" - -```yaml -us-east-1a # name of subnet -``` - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml - - name: "create {{ openshift_build_node_type }} node groups" - include_role: - name: openshift_aws_node_group - vars: - r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute" - r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" - r_openshift_aws_node_group_clusterid: "{{ clusterid }}" - r_openshift_aws_node_group_region: "{{ region }}" - r_openshift_aws_node_group_config: "{{ node_group_config }}" - r_openshift_aws_node_group_type: compute - r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml deleted file mode 100644 index 44c5116a1..000000000 --- a/roles/openshift_aws_node_group/defaults/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -r_openshift_aws_node_group_type: master - -r_openshift_aws_node_group_config: - tags: - clusterid: "{{ r_openshift_aws_node_group_clusterid }}" - master: - instance_type: m4.xlarge - ami: "{{ r_openshift_aws_node_group_ami }}" - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True - compute: - instance_type: m4.xlarge - ami: "{{ r_openshift_aws_node_group_ami }}" - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: True - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 100 - desired_size: 3 - tags: - host-type: node - sub-host-type: compute - infra: - instance_type: m4.xlarge - ami: "{{ r_openshift_aws_node_group_ami }}" - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: True - health_check: - period: 60 - type: EC2 - min_size: 2 - max_size: 20 - desired_size: 2 - tags: - host-type: node - sub-host-type: infra diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml deleted file mode 100644 index 6f5364b03..000000000 --- a/roles/openshift_aws_node_group/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: validate role inputs - fail: - msg: Please pass in the required role variables - when: - - r_openshift_aws_node_group_clusterid is not defined - - r_openshift_aws_node_group_region is not defined - - r_openshift_aws_node_group_subnet_name is not defined - -- name: fetch the subnet to use in scale group - ec2_vpc_subnet_facts: - region: "{{ r_openshift_aws_node_group_region }}" - filters: - "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}" - register: subnetout - -- name: Create the scale group - ec2_asg: - name: "{{ r_openshift_aws_node_group_name }}" - launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}" - health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}" - health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}" - min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}" - max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}" - desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}" - region: "{{ r_openshift_aws_node_group_region }}" - termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" - load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" - wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}" - vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" - tags: - - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md deleted file mode 100644 index afafe61cf..000000000 --- a/roles/openshift_aws_s3/README.md +++ /dev/null @@ -1,43 +0,0 @@ -openshift_aws_s3 -========= - -Ansible role to create an s3 bucket - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_s3_clusterid: myclusterid -- r_openshift_aws_s3_region: us-east-1 -- r_openshift_aws_s3_mode: create|delete - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- name: create an s3 bucket - include_role: - name: openshift_aws_s3 - vars: - r_openshift_aws_s3_clusterid: mycluster - r_openshift_aws_s3_region: us-east-1 - r_openshift_aws_s3_mode: create -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml deleted file mode 100644 index 46bd781bd..000000000 --- a/roles/openshift_aws_s3/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Create an s3 bucket - s3: - bucket: "{{ r_openshift_aws_s3_clusterid }}" - mode: "{{ r_openshift_aws_s3_mode }}" - region: "{{ r_openshift_aws_s3_region }}" diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md deleted file mode 100644 index eeb76bbb6..000000000 --- a/roles/openshift_aws_sg/README.md +++ /dev/null @@ -1,59 +0,0 @@ -openshift_aws_sg -========= - -Ansible role to create an aws security groups - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_sg_clusterid: myclusterid -- r_openshift_aws_sg_region: us-east-1 -- r_openshift_aws_sg_type: master|infra|compute -```yaml -# defaults/main.yml - default: - name: "{{ r_openshift_aws_sg_clusterid }}" - desc: "{{ r_openshift_aws_sg_clusterid }} default" - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: all - from_port: all - to_port: all - group_name: "{{ r_openshift_aws_sg_clusterid }}" -``` - - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- name: create security groups for master - include_role: - name: openshift_aws_sg - vars: - r_openshift_aws_sg_clusterid: mycluster - r_openshift_aws_sg_region: us-east-1 - r_openshift_aws_sg_type: master -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml deleted file mode 100644 index 9c480d337..000000000 --- a/roles/openshift_aws_sg/defaults/main.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -r_openshift_aws_sg_sg: - default: - name: "{{ r_openshift_aws_sg_clusterid }}" - desc: "{{ r_openshift_aws_sg_clusterid }} default" - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: all - from_port: all - to_port: all - group_name: "{{ r_openshift_aws_sg_clusterid }}" - master: - name: "{{ r_openshift_aws_sg_clusterid }}_master" - desc: "{{ r_openshift_aws_sg_clusterid }} master instances" - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 443 - to_port: 443 - cidr_ip: 0.0.0.0/0 - compute: - name: "{{ r_openshift_aws_sg_clusterid }}_compute" - desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances" - infra: - name: "{{ r_openshift_aws_sg_clusterid }}_infra" - desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances" - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 443 - to_port: 443 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 30000 - to_port: 32000 - cidr_ip: 0.0.0.0/0 - etcd: - name: "{{ r_openshift_aws_sg_clusterid }}_etcd" - desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances" diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml deleted file mode 100644 index 2294fdcc9..000000000 --- a/roles/openshift_aws_sg/tasks/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Validate role inputs - fail: - msg: Please ensure to pass the correct variables - when: - - r_openshift_aws_sg_region is undefined - - r_openshift_aws_sg_region is undefined - - -- name: Fetch the VPC for vpc.id - ec2_vpc_net_facts: - region: "{{ r_openshift_aws_sg_region }}" - filters: - "tag:Name": "{{ r_openshift_aws_sg_clusterid }}" - register: vpcout - -- name: Create default security group for cluster - ec2_group: - name: "{{ r_openshift_aws_sg_sg.default.name }}" - description: "{{ r_openshift_aws_sg_sg.default.desc }}" - region: "{{ r_openshift_aws_sg_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}" - register: sg_default_created - -- name: create the node group sgs - ec2_group: - name: "{{ item.name}}" - description: "{{ item.desc }}" - rules: "{{ item.rules if 'rules' in item else [] }}" - region: "{{ r_openshift_aws_sg_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - register: sg_create - with_items: - - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}" - -- name: create the k8s sgs for the node group - ec2_group: - name: "{{ item.name }}_k8s" - description: "{{ item.desc }} for k8s" - region: "{{ r_openshift_aws_sg_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - register: k8s_sg_create - with_items: - - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}" - -- name: tag sg groups with proper tags - ec2_tag: - tags: - KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}" - resource: "{{ item.group_id }}" - region: "{{ r_openshift_aws_sg_region }}" - with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md deleted file mode 100644 index 4f8667918..000000000 --- a/roles/openshift_aws_ssh_keys/README.md +++ /dev/null @@ -1,49 +0,0 @@ -openshift_aws_ssh_keys -========= - -Ansible role for sshind SSH keys - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_ssh_keys_users: list of dicts of users -- r_openshift_aws_ssh_keys_region: ec2_region to install the keys - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -users: -- username: user1 - pub_key: <user1 ssh public key> -- username: user2 - pub_key: <user2 ssh public key> - -region: us-east-1 - -- include_role: - name: openshift_aws_ssh_keys - vars: - r_openshift_aws_ssh_keys_users: "{{ users }}" - r_openshift_aws_ssh_keys_region: "{{ region }}" -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md deleted file mode 100644 index d88cf0581..000000000 --- a/roles/openshift_aws_vpc/README.md +++ /dev/null @@ -1,62 +0,0 @@ -openshift_aws_vpc -========= - -Ansible role to create a default AWS VPC - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_vpc_clusterid: "{{ clusterid }}" -- r_openshift_aws_vpc_cidr: 172.31.48.0/20 -- r_openshift_aws_vpc_subnets: "{{ subnets }}" -```yaml - subnets: - us-east-1: # These are us-east-1 region defaults. Ensure this matches your region - - cidr: 172.31.48.0/20 - az: "us-east-1c" - - cidr: 172.31.32.0/20 - az: "us-east-1e" - - cidr: 172.31.16.0/20 - az: "us-east-1a" -``` -- r_openshift_aws_vpc_region: "{{ region }}" -- r_openshift_aws_vpc_tags: dict of tags to apply to vpc -- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}" - -Dependencies ------------- - - -Example Playbook ----------------- - -```yaml - - name: create default vpc - include_role: - name: openshift_aws_vpc - vars: - r_openshift_aws_vpc_clusterid: mycluster - r_openshift_aws_vpc_cidr: 172.31.48.0/20 - r_openshift_aws_vpc_subnets: "{{ subnets }}" - r_openshift_aws_vpc_region: us-east-1 - r_openshift_aws_vpc_tags: {} - r_openshift_aws_vpc_name: mycluster - -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_vpc/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_ca/defaults/main.yml b/roles/openshift_ca/defaults/main.yml index ecfcc88b3..742b15df4 100644 --- a/roles/openshift_ca/defaults/main.yml +++ b/roles/openshift_ca/defaults/main.yml @@ -1,3 +1,11 @@ --- openshift_ca_cert_expire_days: 1825 openshift_master_cert_expire_days: 730 + +openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" +openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" +openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" +openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" +openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig" + +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index dfbdf0cc7..f8b784a63 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -14,4 +14,3 @@ galaxy_info: - system dependencies: - role: openshift_cli -- role: openshift_named_certificates diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml index d04c1766d..4d80bf921 100644 --- a/roles/openshift_ca/vars/main.yml +++ b/roles/openshift_ca/vars/main.yml @@ -1,9 +1,2 @@ --- -openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" -openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" -openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" -openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" -openshift_version: "{{ openshift_pkg_version | default('') }}" - -openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig" loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md deleted file mode 100644 index 8283afed6..000000000 --- a/roles/openshift_cfme/README.md +++ /dev/null @@ -1,404 +0,0 @@ -# OpenShift-Ansible - CFME Role - -# PROOF OF CONCEPT - Alpha Version - -This role is based on the work in the upstream -[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods) -project. For additional literature on configuration specific to -ManageIQ (optional post-installation tasks), visit the project's -[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration). - -Please submit a -[new issue](https://github.com/openshift/openshift-ansible/issues/new) -if you run into bugs with this role or wish to request enhancements. - -# Important Notes - -This is an early *proof of concept* role to install the Cloud Forms -Management Engine (ManageIQ) on OpenShift Container Platform (OCP). - -* This role is still in **ALPHA STATUS** -* Many options are hard-coded still (ex: NFS setup) -* Not many configurable options yet -* **Should** be ran on a dedicated cluster -* **Will not run** on undersized infra -* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable - -## Requirements - -**NOTE:** These requirements are copied from the upstream -[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods) -project. - -### Prerequisites: - -* - [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html) - or - [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html) - provisioned -* NFS or other compatible volume provider -* A cluster-admin user (created by role if required) - -### Cluster Sizing - -In order to avoid random deployment failures due to resource -starvation, we recommend a minimum cluster size for a **test** -environment. - -| Type | Size | CPUs | Memory | -|----------------|---------|----------|----------| -| Masters | `1+` | `8` | `12GB` | -| Nodes | `2+` | `4` | `8GB` | -| PV Storage | `25GB` | `N/A` | `N/A` | - - -![Basic CFME Deployment](img/CFMEBasicDeployment.png) - -**CFME has hard-requirements for memory. CFME will NOT install if your - infrastructure does not meet or exceed the requirements given - above. Do not run this playbook if you do not have the required - memory, you will just waste your time.** - - -### Other sizing considerations - -* Recommendations assume MIQ will be the **only application running** - on this cluster. -* Alternatively, you can provision an infrastructure node to run - registry/metrics/router/logging pods. -* Each MIQ application pod will consume at least `3GB` of RAM on initial - deployment (blank deployment without providers). -* RAM consumption will ramp up higher depending on appliance use, once - providers are added expect higher resource consumption. - - -### Assumptions - -1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements -1) Your NFS server is on your master host -1) Your PV backing NFS storage volume is mounted on `/exports/` - -Required directories that NFS will export to back the PVs: - -* `/exports/miq-pv0[123]` - -If the required directories are not present at install-time, they will -be created using the recommended permissions per the -[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data): - -* UID/GID: `root`/`root` -* Mode: `0775` - -**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS - storage, **ensure** it is mounted on `/exports/` **before** running - this role. - - - -## Role Variables - -Core variables in this role: - -| Name | Default value | Description | -|-------------------------------|---------------|---------------| -| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding | - - -Variables you may override have defaults defined in -[defaults/main.yml](defaults/main.yml). - - -# Important Notes - -This is a **tech preview** status role presently. Use it with the same -caution you would give any other pre-release software. - -**Most importantly** follow this one rule: don't re-run the entrypoint -playbook multiple times in a row without cleaning up after previous -runs if some of the CFME steps have ran. This is a known -flake. Cleanup instructions are provided at the bottom of this README. - - -# Usage - -This section describes the basic usage of this role. All parameters -will use their [default values](defaults/main.yml). - -## Pre-flight Checks - -**IMPORTANT:** As documented above in [the prerequisites](#prerequisites), - you **must already** have your OCP cluster up and running. - -**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to -save some spin-up time post-deployment, you can begin pre-pulling the -docker image to each of your nodes now: - -``` -root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine -``` - -## Getting Started - -1) The *entry point playbook* to install CFME is located in -[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml) -directory - -2) Update your existing `hosts` inventory file and ensure the -parameter `openshift_cfme_install_app` is set to `True` under the -`[OSEv3:vars]` block. - -2) Using your existing `hosts` inventory file, run `ansible-playbook` -with the entry point playbook: - -``` -$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml -``` - -## Next Steps - -Once complete, the playbook will let you know: - - -``` -TASK [openshift_cfme : Status update] ********************************************************* -ok: [ho.st.na.me] => { - "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n" -} -``` - -This will take several minutes (*possibly 10 or more*, depending on -your network connection). However, you can get some insight into the -deployment process during initialization. - -### oc describe pod manageiq-0 - -*Some useful information about the output you will see if you run the -`oc describe pod manageiq-0` command* - -**Readiness probe**s - These will take a while to become -`Healthy`. The initial health probes won't even happen for at least 8 -minutes depending on how long it takes you to pull down the large -images. ManageIQ is a large application so it may take a considerable -amount of time for it to deploy and be marked as `Healthy`. - -If you go to the node you know the application is running on (check -for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe` -output) you can run a `docker pull` command to monitor the progress of -the image pull: - -``` -[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine -Trying to pull repository docker.io/manageiq/manageiq-pods ... -sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods -Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a -Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine -``` - -The example above demonstrates the case where the image has been -successfully pulled already. - -If the image isn't completely pulled already then you will see -multiple progress bars detailing each image layer download status. - - -### rsh - -*Useful inspection/progress monitoring techniques with the `oc rsh` -command.* - - -On your master node, switch to the `cfme` project (or whatever you -named it if you overrode the `openshift_cfme_project` variable) and -check on the pod states: - -``` -[root@cfme-master01 ~]# oc project cfme -Now using project "cfme" on server "https://10.10.0.100:8443". - -[root@cfme-master01 ~]# oc get pod -NAME READY STATUS RESTARTS AGE -manageiq-0 0/1 Running 0 14m -memcached-1-3lk7g 1/1 Running 0 14m -postgresql-1-12slb 1/1 Running 0 14m -``` - -Note how the `manageiq-0` pod says `0/1` under the **READY** -column. After some time (depending on your network connection) you'll -be able to `rsh` into the pod to find out more of what's happening in -real time. First, the easy-mode command, run this once `rsh` is -available and then watch until it says `Started Initialize Appliance -Database`: - -``` -[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service -``` - -For the full explanation of what this means, and more interactive -inspection techniques, keep reading on. - -To obtain a shell on our `manageiq` pod we use this command: - -``` -[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l -``` - -The `rsh` command opens a shell in your pod for you. In this case it's -the pod called `manageiq-0`. `systemd` is managing the services in -this pod so we can use the `list-units` command to see what is running -currently: `# systemctl list-units | grep appliance`. - -If you see the `appliance-initialize` service running, this indicates -that basic setup is still in progress. We can monitor the process with -the `journalctl` command like so: - - -``` -[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status == -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV == -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment == -Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config == -Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance == -Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key -Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database -Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database... -Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting -Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete -Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data == -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup == -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816 -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/ -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53 -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks == -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping -Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping -Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service. -Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database. -``` - -Most of what we see here (above) is the initial database seeding -process. This process isn't very quick, so be patient. - -At the bottom of the log there is a special line from the `systemctl` -service, `Removed symlink -/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The -`appliance-initialize` service is no longer marked as enabled. This -indicates that the base application initialization is complete now. - -We're not done yet though, there are other ancillary services which -run in this pod to support the application. *Still in the rsh shell*, -Use the `ps` command to monitor for the `httpd` processes -starting. You will see output similar to the following when that stage -has completed: - -``` -[root@manageiq-0 vmdb]# ps aux | grep http -root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND -``` - -Furthermore, you can find other related processes by just looking for -ones with `MIQ` in their name: - -``` -[root@manageiq-0 vmdb]# ps aux | grep miq -root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server -root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic -root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic -root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic -root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic -root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5 -root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems -root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting -root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting -root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker] -root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker] -root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker] -root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq -``` - -Finally, *still in the rsh shell*, to test if the application is -running correctly, we can request the application homepage. If the -page is available the page title will be `ManageIQ: Login`: - -``` -[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>' -<title> -ManageIQ: Login -</title> -``` - -**Note:** The `-s` flag makes `curl` operations silent and the `-k` -flag to ignore errors about untrusted certificates. - - - -# Additional Upstream Resources - -Below are some useful resources from the upstream project -documentation. You may find these of value. - -* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful) -* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes) -* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting) - - -# Manual Cleanup - -At this time uninstallation/cleanup is still a manual process. You -will have to follow a few steps to fully remove CFME from your -cluster. - -Delete the project: - -* `oc delete project cfme` - -Delete the PVs: - -* `oc delete pv miq-pv01` -* `oc delete pv miq-pv02` -* `oc delete pv miq-pv03` - -Clean out the old PV data: - -* `cd /exports/` -* `find miq* -type f -delete` -* `find miq* -type d -delete` - -Remove the NFS exports: - -* `rm /etc/exports.d/openshift_cfme.exports` -* `exportfs -ar` - -Delete the user: - -* `oc delete user cfme` - -**NOTE:** The `oc delete project cfme` command will return quickly -however it will continue to operate in the background. Continue -running `oc get project` after you've completed the other steps to -monitor the pods and final project termination progress. diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml deleted file mode 100644 index 393bee1f3..000000000 --- a/roles/openshift_cfme/defaults/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Namespace for the CFME project (Note: changed post-3.6 to use -# reserved 'openshift-' namespace prefix) -openshift_cfme_project: openshift-cfme -# Namespace/project description -openshift_cfme_project_description: ManageIQ - CloudForms Management Engine -# Basic user assigned the `admin` role for the project -openshift_cfme_user: cfme -# Project system account for enabling privileged pods -openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default" -# All the required exports -openshift_cfme_pv_exports: - - miq-pv01 - - miq-pv02 - - miq-pv03 -# PV template files and their created object names -openshift_cfme_pv_data: - - pv_name: miq-pv01 - pv_template: miq-pv-db.yaml - pv_label: CFME DB PV - - pv_name: miq-pv02 - pv_template: miq-pv-region.yaml - pv_label: CFME Region PV - - pv_name: miq-pv03 - pv_template: miq-pv-server.yaml - pv_label: CFME Server PV - -# Tuning parameter to use more than 5 images at once from an ImageStream -openshift_cfme_maxImagesBulkImportedPerRepository: 100 -# Hostname/IP of the NFS server. Currently defaults to first master -openshift_cfme_nfs_server: "{{ groups.nfs.0 }}" -# TODO: Refactor '_install_app' variable. This is just for testing but -# maybe in the future it should control the entire yes/no for CFME. -# -# Whether or not the manageiq app should be initialized ('oc new-app -# --template=manageiq). If False everything UP TO 'new-app' is ran. -openshift_cfme_install_app: False -# Docker image to pull -openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" -openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" -openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" -openshift_cfme_application_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'app-latest-fine' }}" -openshift_cfme_memcached_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'memcached-latest-fine' }}" -openshift_cfme_postgresql_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'postgresql-latest-fine' }}" diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml deleted file mode 100644 index 8f0d2af38..000000000 --- a/roles/openshift_cfme/files/miq-template.yaml +++ /dev/null @@ -1,566 +0,0 @@ ---- -path: /tmp/miq-template-out -data: - apiVersion: v1 - kind: Template - labels: - template: manageiq - metadata: - name: manageiq - annotations: - description: "ManageIQ appliance with persistent storage" - tags: "instant-app,manageiq,miq" - iconClass: "icon-rails" - objects: - - apiVersion: v1 - kind: Secret - metadata: - name: "${NAME}-secrets" - stringData: - pg-password: "${DATABASE_PASSWORD}" - - apiVersion: v1 - kind: Service - metadata: - annotations: - description: "Exposes and load balances ManageIQ pods" - service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' - name: ${NAME} - spec: - clusterIP: None - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - name: ${NAME} - - apiVersion: v1 - kind: Route - metadata: - name: ${NAME} - spec: - host: ${APPLICATION_DOMAIN} - port: - targetPort: https - tls: - termination: passthrough - to: - kind: Service - name: ${NAME} - - apiVersion: v1 - kind: ImageStream - metadata: - name: miq-app - annotations: - description: "Keeps track of the ManageIQ image changes" - spec: - dockerImageRepository: "${APPLICATION_IMG_NAME}" - - apiVersion: v1 - kind: ImageStream - metadata: - name: miq-postgresql - annotations: - description: "Keeps track of the PostgreSQL image changes" - spec: - dockerImageRepository: "${POSTGRESQL_IMG_NAME}" - - apiVersion: v1 - kind: ImageStream - metadata: - name: miq-memcached - annotations: - description: "Keeps track of the Memcached image changes" - spec: - dockerImageRepository: "${MEMCACHED_IMG_NAME}" - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-${DATABASE_SERVICE_NAME}" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${DATABASE_VOLUME_CAPACITY} - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-region" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${APPLICATION_REGION_VOLUME_CAPACITY} - - apiVersion: apps/v1beta1 - kind: "StatefulSet" - metadata: - name: ${NAME} - annotations: - description: "Defines how to deploy the ManageIQ appliance" - spec: - serviceName: "${NAME}" - replicas: "${APPLICATION_REPLICA_COUNT}" - template: - metadata: - labels: - name: ${NAME} - name: ${NAME} - spec: - containers: - - name: manageiq - image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" - livenessProbe: - tcpSocket: - port: 443 - initialDelaySeconds: 480 - timeoutSeconds: 3 - readinessProbe: - httpGet: - path: / - port: 443 - scheme: HTTPS - initialDelaySeconds: 200 - timeoutSeconds: 3 - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP - securityContext: - privileged: true - volumeMounts: - - - name: "${NAME}-server" - mountPath: "/persistent" - - - name: "${NAME}-region" - mountPath: "/persistent-region" - env: - - - name: "APPLICATION_INIT_DELAY" - value: "${APPLICATION_INIT_DELAY}" - - - name: "DATABASE_SERVICE_NAME" - value: "${DATABASE_SERVICE_NAME}" - - - name: "DATABASE_REGION" - value: "${DATABASE_REGION}" - - - name: "MEMCACHED_SERVICE_NAME" - value: "${MEMCACHED_SERVICE_NAME}" - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - valueFrom: - secretKeyRef: - name: "${NAME}-secrets" - key: "pg-password" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${APPLICATION_MEM_REQ}" - cpu: "${APPLICATION_CPU_REQ}" - limits: - memory: "${APPLICATION_MEM_LIMIT}" - lifecycle: - preStop: - exec: - command: - - /opt/manageiq/container-scripts/sync-pv-data - volumes: - - - name: "${NAME}-region" - persistentVolumeClaim: - claimName: ${NAME}-region - volumeClaimTemplates: - - metadata: - name: "${NAME}-server" - annotations: - # Uncomment this if using dynamic volume provisioning. - # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html - # volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ ReadWriteOnce ] - resources: - requests: - storage: "${APPLICATION_VOLUME_CAPACITY}" - - apiVersion: v1 - kind: "Service" - metadata: - name: "${MEMCACHED_SERVICE_NAME}" - annotations: - description: "Exposes the memcached server" - spec: - ports: - - - name: "memcached" - port: 11211 - targetPort: 11211 - selector: - name: "${MEMCACHED_SERVICE_NAME}" - - apiVersion: v1 - kind: "DeploymentConfig" - metadata: - name: "${MEMCACHED_SERVICE_NAME}" - annotations: - description: "Defines how to deploy memcached" - spec: - strategy: - type: "Recreate" - triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "memcached" - from: - kind: "ImageStreamTag" - name: "miq-memcached:${MEMCACHED_IMG_TAG}" - - - type: "ConfigChange" - replicas: 1 - selector: - name: "${MEMCACHED_SERVICE_NAME}" - template: - metadata: - name: "${MEMCACHED_SERVICE_NAME}" - labels: - name: "${MEMCACHED_SERVICE_NAME}" - spec: - volumes: [] - containers: - - - name: "memcached" - image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" - ports: - - - containerPort: 11211 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 5 - tcpSocket: - port: 11211 - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 30 - tcpSocket: - port: 11211 - volumeMounts: [] - env: - - - name: "MEMCACHED_MAX_MEMORY" - value: "${MEMCACHED_MAX_MEMORY}" - - - name: "MEMCACHED_MAX_CONNECTIONS" - value: "${MEMCACHED_MAX_CONNECTIONS}" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - value: "${MEMCACHED_SLAB_PAGE_SIZE}" - resources: - requests: - memory: "${MEMCACHED_MEM_REQ}" - cpu: "${MEMCACHED_CPU_REQ}" - limits: - memory: "${MEMCACHED_MEM_LIMIT}" - - apiVersion: v1 - kind: "Service" - metadata: - name: "${DATABASE_SERVICE_NAME}" - annotations: - description: "Exposes the database server" - spec: - ports: - - - name: "postgresql" - port: 5432 - targetPort: 5432 - selector: - name: "${DATABASE_SERVICE_NAME}" - - apiVersion: v1 - kind: "DeploymentConfig" - metadata: - name: "${DATABASE_SERVICE_NAME}" - annotations: - description: "Defines how to deploy the database" - spec: - strategy: - type: "Recreate" - triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "postgresql" - from: - kind: "ImageStreamTag" - name: "miq-postgresql:${POSTGRESQL_IMG_TAG}" - - - type: "ConfigChange" - replicas: 1 - selector: - name: "${DATABASE_SERVICE_NAME}" - template: - metadata: - name: "${DATABASE_SERVICE_NAME}" - labels: - name: "${DATABASE_SERVICE_NAME}" - spec: - volumes: - - - name: "miq-pgdb-volume" - persistentVolumeClaim: - claimName: "${NAME}-${DATABASE_SERVICE_NAME}" - containers: - - - name: "postgresql" - image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" - ports: - - - containerPort: 5432 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 15 - exec: - command: - - "/bin/sh" - - "-i" - - "-c" - - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 60 - tcpSocket: - port: 5432 - volumeMounts: - - - name: "miq-pgdb-volume" - mountPath: "/var/lib/pgsql/data" - env: - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - valueFrom: - secretKeyRef: - name: "${NAME}-secrets" - key: "pg-password" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${POSTGRESQL_MEM_REQ}" - cpu: "${POSTGRESQL_CPU_REQ}" - limits: - memory: "${POSTGRESQL_MEM_LIMIT}" - - parameters: - - - name: "NAME" - displayName: Name - required: true - description: "The name assigned to all of the frontend objects defined in this template." - value: manageiq - - - name: "DATABASE_SERVICE_NAME" - displayName: "PostgreSQL Service Name" - required: true - description: "The name of the OpenShift Service exposed for the PostgreSQL container." - value: "postgresql" - - - name: "DATABASE_USER" - displayName: "PostgreSQL User" - required: true - description: "PostgreSQL user that will access the database." - value: "root" - - - name: "DATABASE_PASSWORD" - displayName: "PostgreSQL Password" - required: true - description: "Password for the PostgreSQL user." - from: "[a-zA-Z0-9]{8}" - generate: expression - - - name: "DATABASE_NAME" - required: true - displayName: "PostgreSQL Database Name" - description: "Name of the PostgreSQL database accessed." - value: "vmdb_production" - - - name: "DATABASE_REGION" - required: true - displayName: "Application Database Region" - description: "Database region that will be used for application." - value: "0" - - - name: "MEMCACHED_SERVICE_NAME" - required: true - displayName: "Memcached Service Name" - description: "The name of the OpenShift Service exposed for the Memcached container." - value: "memcached" - - - name: "MEMCACHED_MAX_MEMORY" - displayName: "Memcached Max Memory" - description: "Memcached maximum memory for memcached object storage in MB." - value: "64" - - - name: "MEMCACHED_MAX_CONNECTIONS" - displayName: "Memcached Max Connections" - description: "Memcached maximum number of connections allowed." - value: "1024" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - displayName: "Memcached Slab Page Size" - description: "Memcached size of each slab page." - value: "1m" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - displayName: "PostgreSQL Max Connections" - description: "PostgreSQL maximum number of database connections allowed." - value: "100" - - - name: "POSTGRESQL_SHARED_BUFFERS" - displayName: "PostgreSQL Shared Buffer Amount" - description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "256MB" - - - name: "APPLICATION_CPU_REQ" - displayName: "Application Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." - value: "1000m" - - - name: "POSTGRESQL_CPU_REQ" - displayName: "PostgreSQL Min CPU Requested" - required: true - description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." - value: "500m" - - - name: "MEMCACHED_CPU_REQ" - displayName: "Memcached Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." - value: "200m" - - - name: "APPLICATION_MEM_REQ" - displayName: "Application Min RAM Requested" - required: true - description: "Minimum amount of memory the Application container will need." - value: "6144Mi" - - - name: "POSTGRESQL_MEM_REQ" - displayName: "PostgreSQL Min RAM Requested" - required: true - description: "Minimum amount of memory the PostgreSQL container will need." - value: "1024Mi" - - - name: "MEMCACHED_MEM_REQ" - displayName: "Memcached Min RAM Requested" - required: true - description: "Minimum amount of memory the Memcached container will need." - value: "64Mi" - - - name: "APPLICATION_MEM_LIMIT" - displayName: "Application Max RAM Limit" - required: true - description: "Maximum amount of memory the Application container can consume." - value: "16384Mi" - - - name: "POSTGRESQL_MEM_LIMIT" - displayName: "PostgreSQL Max RAM Limit" - required: true - description: "Maximum amount of memory the PostgreSQL container can consume." - value: "8192Mi" - - - name: "MEMCACHED_MEM_LIMIT" - displayName: "Memcached Max RAM Limit" - required: true - description: "Maximum amount of memory the Memcached container can consume." - value: "256Mi" - - - name: "POSTGRESQL_IMG_NAME" - displayName: "PostgreSQL Image Name" - description: "This is the PostgreSQL image name requested to deploy." - value: "docker.io/manageiq/manageiq-pods" - - - name: "POSTGRESQL_IMG_TAG" - displayName: "PostgreSQL Image Tag" - description: "This is the PostgreSQL image tag/version requested to deploy." - value: "postgresql-latest-fine" - - - name: "MEMCACHED_IMG_NAME" - displayName: "Memcached Image Name" - description: "This is the Memcached image name requested to deploy." - value: "docker.io/manageiq/manageiq-pods" - - - name: "MEMCACHED_IMG_TAG" - displayName: "Memcached Image Tag" - description: "This is the Memcached image tag/version requested to deploy." - value: "memcached-latest-fine" - - - name: "APPLICATION_IMG_NAME" - displayName: "Application Image Name" - description: "This is the Application image name requested to deploy." - value: "docker.io/manageiq/manageiq-pods" - - - name: "APPLICATION_IMG_TAG" - displayName: "Application Image Tag" - description: "This is the Application image tag/version requested to deploy." - value: "app-latest-fine" - - - name: "APPLICATION_DOMAIN" - displayName: "Application Hostname" - description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." - value: "" - - - name: "APPLICATION_REPLICA_COUNT" - displayName: "Application Replica Count" - description: "This is the number of Application replicas requested to deploy." - value: "1" - - - name: "APPLICATION_INIT_DELAY" - displayName: "Application Init Delay" - required: true - description: "Delay in seconds before we attempt to initialize the application." - value: "15" - - - name: "APPLICATION_VOLUME_CAPACITY" - displayName: "Application Volume Capacity" - required: true - description: "Volume space available for application data." - value: "5Gi" - - - name: "APPLICATION_REGION_VOLUME_CAPACITY" - displayName: "Application Region Volume Capacity" - required: true - description: "Volume space available for region application data." - value: "5Gi" - - - name: "DATABASE_VOLUME_CAPACITY" - displayName: "Database Volume Capacity" - required: true - description: "Volume space available for database." - value: "15Gi" diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports deleted file mode 100644 index 5457d41fc..000000000 --- a/roles/openshift_cfme/files/openshift_cfme.exports +++ /dev/null @@ -1,3 +0,0 @@ -/exports/miq-pv01 *(rw,no_root_squash,no_wdelay) -/exports/miq-pv02 *(rw,no_root_squash,no_wdelay) -/exports/miq-pv03 *(rw,no_root_squash,no_wdelay) diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml deleted file mode 100644 index 7e90b09a4..000000000 --- a/roles/openshift_cfme/handlers/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -###################################################################### -# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml -# -# TODO: Use the consolidated 'openshift_handlers' role once it's ready -# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782 -###################################################################### - -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - notify: Verify API Server - -- name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png Binary files differdeleted file mode 100644 index a89c1e325..000000000 --- a/roles/openshift_cfme/img/CFMEBasicDeployment.png +++ /dev/null diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml deleted file mode 100644 index 7fa7d3997..000000000 --- a/roles/openshift_cfme/tasks/create_pvs.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Check for existance and then conditionally: -# - evaluate templates -# - PVs -# -# These tasks idempotently create required CFME PV objects. Do not -# call this file directly. This file is intended to be ran as an -# include that has a 'with_items' attached to it. Hence the use below -# of variables like "{{ item.pv_label }}" - -- name: "Check if the {{ item.pv_label }} template has been created already" - oc_obj: - namespace: "{{ openshift_cfme_project }}" - state: list - kind: pv - name: "{{ item.pv_name }}" - register: miq_pv_check - -# Skip all of this if the PV already exists -- block: - - name: "Ensure the {{ item.pv_label }} template is evaluated" - template: - src: "{{ item.pv_template }}.j2" - dest: "{{ template_dir }}/{{ item.pv_template }}" - - - name: "Ensure {{ item.pv_label }} is created" - oc_obj: - namespace: "{{ openshift_cfme_project }}" - kind: pv - name: "{{ item.pv_name }}" - state: present - delete_after: True - files: - - "{{ template_dir }}/{{ item.pv_template }}" - when: - - not miq_pv_check.results.results.0 diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml deleted file mode 100644 index 74ae16d91..000000000 --- a/roles/openshift_cfme/tasks/main.yml +++ /dev/null @@ -1,117 +0,0 @@ ---- -###################################################################### -# Users, projects, and privileges - -- name: Ensure the CFME user exists - oc_user: - state: present - username: "{{ openshift_cfme_user }}" - -- name: Ensure the CFME namespace exists with CFME user as admin - oc_project: - state: present - name: "{{ openshift_cfme_project }}" - display_name: "{{ openshift_cfme_project_description }}" - admin: "{{ openshift_cfme_user }}" - -- name: Ensure the CFME namespace service account is privileged - oc_adm_policy_user: - namespace: "{{ openshift_cfme_project }}" - user: "{{ openshift_cfme_service_account }}" - resource_kind: scc - resource_name: privileged - state: present - -###################################################################### -# NFS -# In the case that we are not running on a cloud provider, volumes must be statically provisioned - -- include: nfs.yml - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -###################################################################### -# CFME App Template -# -# Note, this is different from the create_pvs.yml tasks in that the -# application template does not require any jinja2 evaluation. -# -# TODO: Handle the case where the server template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- name: Check if the CFME Server template has been created already - oc_obj: - namespace: "{{ openshift_cfme_project }}" - state: list - kind: template - name: manageiq - register: miq_server_check - -- name: Copy over CFME Server template - copy: - src: miq-template.yaml - dest: "{{ template_dir }}/miq-template.yaml" - -- name: Ensure the server template was read from disk - debug: - var=r_openshift_cfme_miq_template_content - -- name: Ensure CFME Server Template exists - oc_obj: - namespace: "{{ openshift_cfme_project }}" - kind: template - name: "manageiq" - state: present - content: "{{ r_openshift_cfme_miq_template_content }}" - -###################################################################### -# Let's do this - -- name: Ensure the CFME Server is created - oc_process: - namespace: "{{ openshift_cfme_project }}" - template_name: manageiq - create: True - params: - APPLICATION_IMG_NAME: "{{ openshift_cfme_application_img_name }}" - POSTGRESQL_IMG_NAME: "{{ openshift_cfme_postgresql_img_name }}" - MEMCACHED_IMG_NAME: "{{ openshift_cfme_memcached_img_name }}" - APPLICATION_IMG_TAG: "{{ openshift_cfme_application_img_tag }}" - POSTGRESQL_IMG_TAG: "{{ openshift_cfme_postgresql_img_tag }}" - MEMCACHED_IMG_TAG: "{{ openshift_cfme_memcached_img_tag }}" - register: cfme_new_app_process - run_once: True - when: - # User said to install CFME in their inventory - - openshift_cfme_install_app | bool - # # The server app doesn't exist already - # - not miq_server_check.results.results.0 - -- debug: - var: cfme_new_app_process - -###################################################################### -# Various cleanup steps - -# TODO: Not sure what to do about this right now. Might be able to -# just delete it? This currently warns about "Unable to find -# '<TEMP_DIR>' in expected paths." -- name: Ensure the temporary PV/App templates are erased - file: - path: "{{ item }}" - state: absent - with_fileglob: - - "{{ template_dir }}/*.yaml" - -- name: Ensure the temporary PV/app template directory is erased - file: - path: "{{ template_dir }}" - state: absent - -###################################################################### - -- name: Status update - debug: - msg: > - CFME has been deployed. Note that there will be a delay before - it is fully initialized. diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml deleted file mode 100644 index 8db45492e..000000000 --- a/roles/openshift_cfme/tasks/nfs.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Tasks to statically provision NFS volumes -# Include if not using dynamic volume provisioning -- name: Ensure the /exports/ directory exists - file: - path: /exports/ - state: directory - mode: 0755 - owner: root - group: root - -- name: Ensure the miq-pv0X export directories exist - file: - path: "/exports/{{ item }}" - state: directory - mode: 0775 - owner: root - group: root - with_items: "{{ openshift_cfme_pv_exports }}" - -- name: Ensure the NFS exports for CFME PVs exist - copy: - src: openshift_cfme.exports - dest: /etc/exports.d/openshift_cfme.exports - register: nfs_exports_updated - -- name: Ensure the NFS export table is refreshed if exports were added - command: exportfs -ar - when: - - nfs_exports_updated.changed - - -###################################################################### -# Create the required CFME PVs. Check out these online docs if you -# need a refresher on includes looping with items: -# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0 -# * http://stackoverflow.com/a/35128533 -# -# TODO: Handle the case where a PV template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- include: create_pvs.yml - with_items: "{{ openshift_cfme_pv_data }}" diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml deleted file mode 100644 index 02b0f10bf..000000000 --- a/roles/openshift_cfme/tasks/tune_masters.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Ensure bulk image import limit is tuned - yedit: - src: /etc/origin/master/master-config.yaml - key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository' - value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}" - state: present - backup: True - notify: - - restart master - -- meta: flush_handlers diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml deleted file mode 100644 index 406b59364..000000000 --- a/roles/openshift_cfme/tasks/uninstall.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- include_role: - name: lib_openshift - -- name: Uninstall CFME - ManageIQ - debug: - msg: Uninstalling Cloudforms Management Engine - ManageIQ - -- name: Ensure the CFME project is removed - oc_project: - state: absent - name: "{{ openshift_cfme_project }}" - -- name: Ensure the CFME template is removed - oc_obj: - namespace: "{{ openshift_cfme_project }}" - state: absent - kind: template - name: manageiq - -- name: Ensure the CFME PVs are removed - oc_obj: - state: absent - all_namespaces: True - kind: pv - name: "{{ item }}" - with_items: "{{ openshift_cfme_pv_exports }}" - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -- name: Ensure the CFME user is removed - oc_user: - state: absent - username: "{{ openshift_cfme_user }}" - -- name: Ensure the CFME NFS Exports are removed - file: - path: /etc/exports.d/openshift_cfme.exports - state: absent - register: nfs_exports_removed - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -- name: Ensure the NFS export table is refreshed if exports were removed - command: exportfs -ar - when: - - nfs_exports_removed.changed - - not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 deleted file mode 100644 index 7218773f0..000000000 --- a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: miq-pv02 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/miq-pv02 - server: {{ openshift_cfme_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 04a1ce873..29ed82783 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -14,5 +14,4 @@ galaxy_info: dependencies: - role: openshift_docker when: not skip_docker_role | default(False) | bool -- role: openshift_common - role: openshift_facts diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 9e61805f9..14d8a3325 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,6 +1,9 @@ --- - set_fact: - l_use_crio: "{{ openshift_use_crio | default(false) }}" + l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" + l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}" +- set_fact: + l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}" - name: Install clients package: name={{ openshift.common.service_type }}-clients state=present @@ -20,23 +23,23 @@ backend: "docker" when: - openshift.common.is_containerized | bool - - not l_use_crio + - not l_use_cli_atomic_image | bool - block: - name: Pull CLI Image command: > - atomic pull --storage ostree {{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}:{{ openshift_image_tag }} + atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: - image: "{{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}" + image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}" tag: "{{ openshift_image_tag }}" backend: "atomic" when: - openshift.common.is_containerized | bool - - l_use_crio + - l_use_cli_atomic_image | bool - name: Reload facts to pick up installed OpenShift version openshift_facts: diff --git a/roles/openshift_clock/defaults/main.yml b/roles/openshift_clock/defaults/main.yml new file mode 100644 index 000000000..a94f67199 --- /dev/null +++ b/roles/openshift_clock/defaults/main.yml @@ -0,0 +1,2 @@ +--- +openshift_clock_enabled: True diff --git a/roles/openshift_clock/meta/main.yml b/roles/openshift_clock/meta/main.yml index 3e175beb0..d1e86d826 100644 --- a/roles/openshift_clock/meta/main.yml +++ b/roles/openshift_clock/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- { role: openshift_facts } +dependencies: [] diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml index 3911201ea..f8b02524a 100644 --- a/roles/openshift_clock/tasks/main.yaml +++ b/roles/openshift_clock/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- name: Set clock facts - openshift_facts: - role: clock - local_facts: - enabled: "{{ openshift_clock_enabled | default(None) }}" +- name: Determine if chrony is installed + command: rpm -q chrony + failed_when: false + register: chrony_installed - name: Install ntp package package: name=ntp state=present - when: openshift.clock.enabled | bool and not openshift.clock.chrony_installed | bool + when: + - openshift_clock_enabled | bool + - chrony_installed.rc != 0 - name: Start and enable ntpd/chronyd - shell: timedatectl set-ntp true - when: openshift.clock.enabled | bool + command: timedatectl set-ntp true + when: openshift_clock_enabled | bool diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md deleted file mode 100644 index 2a271854b..000000000 --- a/roles/openshift_common/README.md +++ /dev/null @@ -1,45 +0,0 @@ -OpenShift/Atomic Enterprise Common -=================================== - -OpenShift/Atomic Enterprise common installation and configuration tasks. - -Requirements ------------- - -A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, -rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos. - -Role Variables --------------- - -| Name | Default value | | -|---------------------------|-------------------|---------------------------------------------| -| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters | -| openshift_debug_level | 2 | Global openshift debug log verbosity | -| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) | -| openshift_ip | UNDEF | Internal IP address to use for this host | -| openshift_public_hostname | UNDEF | Public hostname to use for this host | -| openshift_public_ip | UNDEF | Public IP address to use for this host | -| openshift_portal_net | UNDEF | Service IP CIDR | - -Dependencies ------------- - -os_firewall -openshift_facts -openshift_repos - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml deleted file mode 100644 index 267c03605..000000000 --- a/roles/openshift_common/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_cluster_id: 'default' -openshift_debug_level: 2 diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml deleted file mode 100644 index 7cc95d8fa..000000000 --- a/roles/openshift_common/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Common - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.7 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_facts diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml deleted file mode 100644 index a0bd6c860..000000000 --- a/roles/openshift_common/tasks/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -- fail: - msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - -- fail: - msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool - -- fail: - msg: Nuage sdn can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - -- fail: - msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: Contiv can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: Contiv can not be used with nuage - when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool - -- fail: - msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. - when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool - -- fail: - msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool - -- fail: - msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: openshift_hostname must be 63 characters or less - when: openshift_hostname is defined and openshift_hostname | length > 63 - -- name: Set common Cluster facts - openshift_facts: - role: common - local_facts: - install_examples: "{{ openshift_install_examples | default(True) }}" - use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" - sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" - use_flannel: "{{ openshift_use_flannel | default(None) }}" - use_calico: "{{openshift_use_calico | default(None) }}" - use_nuage: "{{ openshift_use_nuage | default(None) }}" - use_contiv: "{{ openshift_use_contiv | default(None) }}" - use_manageiq: "{{ openshift_use_manageiq | default(None) }}" - data_dir: "{{ openshift_data_dir | default(None) }}" - use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}" - -- name: Install the base package for versioning - package: - name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - when: not openshift.common.is_containerized | bool - -- name: Set version facts - openshift_facts: - -# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the -# hostname by default. -- set_fact: - set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}" - -- name: Set hostname - command: > - hostnamectl set-hostname {{ openshift.common.hostname }} - when: openshift_set_hostname | default(set_hostname_default) | bool diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md index 198163127..57e732f37 100644 --- a/roles/openshift_default_storage_class/README.md +++ b/roles/openshift_default_storage_class/README.md @@ -1,7 +1,7 @@ openshift_master_storage_class ========= -A role that deploys configuratons for Openshift StorageClass +A role that deploys configurations for Openshift StorageClass Requirements ------------ diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index bdece7640..014c06641 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -13,6 +13,12 @@ openshift_storageclass_defaults: parameters: type: pd-standard + openstack: + name: standard + provisioner: cinder + parameters: + fstype: xfs + openshift_storageclass_default: "true" openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}" openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}" diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml index 172e2ac25..281ec8ed5 100644 --- a/roles/openshift_default_storage_class/tasks/main.yml +++ b/roles/openshift_default_storage_class/tasks/main.yml @@ -1,5 +1,5 @@ --- -# Install default storage classes in GCE & AWS +# Install default storage classes in GCE & AWS & OPENSTACK - name: Ensure storageclass object oc_storageclass: name: "{{ openshift_storageclass_name }}" diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 334150f63..5a3e50678 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -6,9 +6,6 @@ with_items: - role: docker local_facts: - additional_registries: "{{ openshift_docker_additional_registries | default(None) }}" - blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}" - insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}" selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" log_driver: "{{ openshift_docker_log_driver | default(None) }}" log_options: "{{ openshift_docker_log_options | default(None) }}" @@ -23,12 +20,6 @@ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - set_fact: - docker_additional_registries: "{{ openshift.docker.additional_registries - | default(omit) }}" - docker_blocked_registries: "{{ openshift.docker.blocked_registries - | default(omit) }}" - docker_insecure_registries: "{{ openshift.docker.insecure_registries - | default(omit) }}" docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml index 3268c390f..fbc72c8a3 100644 --- a/roles/openshift_etcd_client_certificates/meta/main.yml +++ b/roles/openshift_etcd_client_certificates/meta/main.yml @@ -11,6 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_etcd_facts -- role: etcd_client_certificates +dependencies: [] diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml new file mode 100644 index 000000000..7f8b667f0 --- /dev/null +++ b/roles/openshift_etcd_client_certificates/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include_role: + name: etcd + tasks_from: client_certificates diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index b3ecd57a6..0c072b64a 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -6,6 +6,5 @@ etcd_ip: "{{ openshift.common.ip }}" etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}" etcd_cert_prefix: etcd_cert_config_dir: "/etc/etcd" -etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc etcd_peer_url_scheme: https etcd_url_scheme: https diff --git a/roles/openshift_etcd_server_certificates/meta/main.yml b/roles/openshift_etcd_server_certificates/meta/main.yml deleted file mode 100644 index 7750f14af..000000000 --- a/roles/openshift_etcd_server_certificates/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Etcd Server Certificates - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.1 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_etcd_facts -- role: etcd_server_certificates diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md index 8cc479c73..014cef111 100644 --- a/roles/openshift_examples/README.md +++ b/roles/openshift_examples/README.md @@ -21,13 +21,13 @@ Facts Role Variables -------------- -| Name | Default value | | -|-------------------------------------|-----------------------------------------------------|------------------------------------------| -| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams | -| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams | -| openshift_examples_load_db_templates| true | Loads database templates | -| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc | -| openshift_examples_load_xpaas | false | Loads xpass streams and templates | +| Name | Default value | | +|-------------------------------------|----------------------------------------------------------------|------------------------------------------| +| openshift_examples_load_centos | true when openshift_deployment_type not 'openshift-enterprise' | Load centos image streams | +| openshift_examples_load_rhel | true if openshift_deployment_type is 'openshift-enterprise' | Load rhel image streams | +| openshift_examples_load_db_templates| true | Loads database templates | +| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc | +| openshift_examples_load_xpaas | false | Loads xpass streams and templates | Dependencies diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index ca3f219d8..1a14c32f5 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -5,7 +5,7 @@ # # This script should be run from openshift-ansible/roles/openshift_examples -XPAAS_VERSION=ose-v1.4.1 +XPAAS_VERSION=ose-v1.4.5 ORIGIN_VERSION=${1:-v3.7} RHAMP_TAG=2.0.0.GA EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json index 6500ed0d3..5e7585eeb 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json @@ -8,10 +8,10 @@ "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mariadb", "tags": "database,mariadb", - "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json index 4378fa4a0..217ef11dd 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json @@ -8,10 +8,10 @@ "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mariadb", "tags": "database,mariadb", - "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json index 7271a2c69..10f202c59 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json @@ -8,10 +8,10 @@ "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mongodb", "tags": "database,mongodb", - "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json index d70d2263f..97e4128a4 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json @@ -8,10 +8,10 @@ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mongodb", "tags": "database,mongodb", - "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json index 54785993c..c0946416d 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json @@ -8,10 +8,10 @@ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mysql-database", "tags": "database,mysql", - "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json index 2bd84b106..48ac114fd 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json @@ -8,10 +8,10 @@ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mysql-database", "tags": "database,mysql", - "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json index 849c9d83f..7c419f1ae 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json @@ -8,10 +8,10 @@ "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-postgresql", "tags": "database,postgresql", - "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json index b622baa01..8a2d23907 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json @@ -8,10 +8,10 @@ "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-postgresql", "tags": "database,postgresql", - "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json index 15bdd079b..ee60af9db 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json @@ -8,10 +8,10 @@ "description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-redis", "tags": "database,redis", - "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.", diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json index 1e31b02e0..e0e0a88d5 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json @@ -8,10 +8,10 @@ "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-redis", "tags": "database,redis", - "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.", diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json index 6cef21945..e7af160d9 100644 --- a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json @@ -9,7 +9,7 @@ "metadata": { "name": "httpd", "annotations": { - "openshift.io/display-name": "Httpd" + "openshift.io/display-name": "Apache HTTP Server (httpd)" } }, "spec": { @@ -17,8 +17,9 @@ { "name": "latest", "annotations": { - "openshift.io/display-name": "Httpd (Latest)", - "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.", + "openshift.io/display-name": "Apache HTTP Server (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Apache HTTP Server (httpd) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.", "iconClass": "icon-apache", "tags": "builder,httpd", "supports":"httpd", @@ -32,8 +33,9 @@ { "name": "2.4", "annotations": { - "openshift.io/display-name": "Httpd 2.4", - "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.", + "openshift.io/display-name": "Apache HTTP Server 2.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Apache HTTP Server (httpd) 2.4 on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.", "iconClass": "icon-apache", "tags": "builder,httpd", "supports":"httpd", @@ -63,6 +65,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Ruby (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", "iconClass": "icon-ruby", "tags": "builder,ruby", @@ -71,13 +74,14 @@ }, "from": { "kind": "ImageStreamTag", - "name": "2.3" + "name": "2.4" } }, { "name": "2.0", "annotations": { "openshift.io/display-name": "Ruby 2.0", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.", "iconClass": "icon-ruby", "tags": "hidden,builder,ruby", @@ -94,6 +98,7 @@ "name": "2.2", "annotations": { "openshift.io/display-name": "Ruby 2.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby 2.2 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.", "iconClass": "icon-ruby", "tags": "builder,ruby", @@ -110,6 +115,7 @@ "name": "2.3", "annotations": { "openshift.io/display-name": "Ruby 2.3", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby 2.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.", "iconClass": "icon-ruby", "tags": "builder,ruby", @@ -121,6 +127,23 @@ "kind": "DockerImage", "name": "centos/ruby-23-centos7:latest" } + }, + { + "name": "2.4", + "annotations": { + "openshift.io/display-name": "Ruby 2.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Ruby 2.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.4/README.md.", + "iconClass": "icon-ruby", + "tags": "builder,ruby", + "supports": "ruby:2.4,ruby", + "version": "2.4", + "sampleRepo": "https://github.com/openshift/ruby-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "centos/ruby-24-centos7:latest" + } } ] } @@ -140,6 +163,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Node.js (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", @@ -155,6 +179,7 @@ "name": "0.10", "annotations": { "openshift.io/display-name": "Node.js 0.10", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "DEPRECATED: Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.", "iconClass": "icon-nodejs", "tags": "hidden,nodejs", @@ -171,6 +196,7 @@ "name": "4", "annotations": { "openshift.io/display-name": "Node.js 4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Node.js 4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", @@ -187,6 +213,7 @@ "name": "6", "annotations": { "openshift.io/display-name": "Node.js 6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", @@ -217,6 +244,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Perl (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.", "iconClass": "icon-perl", "tags": "builder,perl", @@ -232,6 +260,7 @@ "name": "5.16", "annotations": { "openshift.io/display-name": "Perl 5.16", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.", "iconClass": "icon-perl", "tags": "hidden,builder,perl", @@ -248,6 +277,7 @@ "name": "5.20", "annotations": { "openshift.io/display-name": "Perl 5.20", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl 5.20 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.", "iconClass": "icon-perl", "tags": "builder,perl", @@ -264,6 +294,7 @@ "name": "5.24", "annotations": { "openshift.io/display-name": "Perl 5.24", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl 5.24 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.", "iconClass": "icon-perl", "tags": "builder,perl", @@ -294,6 +325,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PHP (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", @@ -309,6 +341,7 @@ "name": "5.5", "annotations": { "openshift.io/display-name": "PHP 5.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.", "iconClass": "icon-php", "tags": "hidden,builder,php", @@ -325,6 +358,7 @@ "name": "5.6", "annotations": { "openshift.io/display-name": "PHP 5.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP 5.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.", "iconClass": "icon-php", "tags": "builder,php", @@ -341,6 +375,7 @@ "name": "7.0", "annotations": { "openshift.io/display-name": "PHP 7.0", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP 7.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.", "iconClass": "icon-php", "tags": "builder,php", @@ -371,6 +406,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Python (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", @@ -386,6 +422,7 @@ "name": "3.3", "annotations": { "openshift.io/display-name": "Python 3.3", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.", "iconClass": "icon-python", "tags": "hidden,builder,python", @@ -402,6 +439,7 @@ "name": "2.7", "annotations": { "openshift.io/display-name": "Python 2.7", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 2.7 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.", "iconClass": "icon-python", "tags": "builder,python", @@ -418,6 +456,7 @@ "name": "3.4", "annotations": { "openshift.io/display-name": "Python 3.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 3.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.", "iconClass": "icon-python", "tags": "builder,python", @@ -434,6 +473,7 @@ "name": "3.5", "annotations": { "openshift.io/display-name": "Python 3.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 3.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.", "iconClass": "icon-python", "tags": "builder,python", @@ -464,6 +504,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "WildFly (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.", "iconClass": "icon-wildfly", "tags": "builder,wildfly,java", @@ -479,6 +520,7 @@ "name": "8.1", "annotations": { "openshift.io/display-name": "WildFly 8.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run WildFly 8.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.", "iconClass": "icon-wildfly", "tags": "builder,wildfly,java", @@ -495,6 +537,7 @@ "name": "9.0", "annotations": { "openshift.io/display-name": "WildFly 9.0", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run WildFly 9.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.", "iconClass": "icon-wildfly", "tags": "builder,wildfly,java", @@ -511,6 +554,7 @@ "name": "10.0", "annotations": { "openshift.io/display-name": "WildFly 10.0", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run WildFly 10.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.", "iconClass": "icon-wildfly", "tags": "builder,wildfly,java", @@ -527,6 +571,7 @@ "name": "10.1", "annotations": { "openshift.io/display-name": "WildFly 10.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run WildFly 10.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.", "iconClass": "icon-wildfly", "tags": "builder,wildfly,java", @@ -557,6 +602,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MySQL (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.", "iconClass": "icon-mysql-database", "tags": "mysql" @@ -570,6 +616,7 @@ "name": "5.5", "annotations": { "openshift.io/display-name": "MySQL 5.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.", "iconClass": "icon-mysql-database", "tags": "hidden,mysql", @@ -584,6 +631,7 @@ "name": "5.6", "annotations": { "openshift.io/display-name": "MySQL 5.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL 5.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.", "iconClass": "icon-mysql-database", "tags": "mysql", @@ -598,6 +646,7 @@ "name": "5.7", "annotations": { "openshift.io/display-name": "MySQL 5.7", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL 5.7 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.", "iconClass": "icon-mysql-database", "tags": "mysql", @@ -626,6 +675,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MariaDB (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", "tags": "mariadb" @@ -639,6 +689,7 @@ "name": "10.1", "annotations": { "openshift.io/display-name": "MariaDB 10.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.", "iconClass": "icon-mariadb", "tags": "mariadb", @@ -667,6 +718,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", "tags": "postgresql" @@ -680,6 +732,7 @@ "name": "9.2", "annotations": { "openshift.io/display-name": "PostgreSQL 9.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.", "iconClass": "icon-postgresql", "tags": "hidden,postgresql", @@ -694,6 +747,7 @@ "name": "9.4", "annotations": { "openshift.io/display-name": "PostgreSQL 9.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.", "iconClass": "icon-postgresql", "tags": "postgresql", @@ -708,6 +762,7 @@ "name": "9.5", "annotations": { "openshift.io/display-name": "PostgreSQL 9.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.", "iconClass": "icon-postgresql", "tags": "postgresql", @@ -736,6 +791,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MongoDB (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", "tags": "mongodb" @@ -749,6 +805,7 @@ "name": "2.4", "annotations": { "openshift.io/display-name": "MongoDB 2.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.", "iconClass": "icon-mongodb", "tags": "hidden,mongodb", @@ -763,6 +820,7 @@ "name": "2.6", "annotations": { "openshift.io/display-name": "MongoDB 2.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.", "iconClass": "icon-mongodb", "tags": "mongodb", @@ -777,6 +835,7 @@ "name": "3.2", "annotations": { "openshift.io/display-name": "MongoDB 3.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.", "iconClass": "icon-mongodb", "tags": "mongodb", @@ -805,6 +864,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Redis (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Redis database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.", "iconClass": "icon-redis", "tags": "redis" @@ -818,6 +878,7 @@ "name": "3.2", "annotations": { "openshift.io/display-name": "Redis 3.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Redis 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.", "iconClass": "icon-redis", "tags": "redis", @@ -846,6 +907,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Jenkins (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.", "iconClass": "icon-jenkins", "tags": "jenkins" @@ -859,6 +921,7 @@ "name": "1", "annotations": { "openshift.io/display-name": "Jenkins 1.X", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", "tags": "hidden,jenkins", @@ -873,6 +936,7 @@ "name": "2", "annotations": { "openshift.io/display-name": "Jenkins 2.X", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Jenkins v2.x server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", "tags": "jenkins", diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json index abdae01e3..2b082fc75 100644 --- a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json @@ -9,7 +9,7 @@ "metadata": { "name": "httpd", "annotations": { - "openshift.io/display-name": "Httpd" + "openshift.io/display-name": "Apache HTTP Server (httpd)" } }, "spec": { @@ -17,8 +17,9 @@ { "name": "latest", "annotations": { - "openshift.io/display-name": "Httpd (Latest)", - "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.", + "openshift.io/display-name": "Apache HTTP Server (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Apache HTTP Server (httpd) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.", "iconClass": "icon-apache", "tags": "builder,httpd", "supports":"httpd", @@ -32,8 +33,9 @@ { "name": "2.4", "annotations": { - "openshift.io/display-name": "Httpd 2.4", - "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.", + "openshift.io/display-name": "Apache HTTP Server 2.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Apache HTTP Server (httpd) 2.4 on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.", "iconClass": "icon-apache", "tags": "builder,httpd", "supports":"httpd", @@ -63,6 +65,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Ruby (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", "iconClass": "icon-ruby", "tags": "builder,ruby", @@ -71,13 +74,14 @@ }, "from": { "kind": "ImageStreamTag", - "name": "2.3" + "name": "2.4" } }, { "name": "2.0", "annotations": { "openshift.io/display-name": "Ruby 2.0", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.", "iconClass": "icon-ruby", "tags": "hidden,builder,ruby", @@ -94,6 +98,7 @@ "name": "2.2", "annotations": { "openshift.io/display-name": "Ruby 2.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby 2.2 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.", "iconClass": "icon-ruby", "tags": "builder,ruby", @@ -110,6 +115,7 @@ "name": "2.3", "annotations": { "openshift.io/display-name": "Ruby 2.3", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Ruby 2.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.", "iconClass": "icon-ruby", "tags": "builder,ruby", @@ -121,6 +127,23 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/ruby-23-rhel7:latest" } + }, + { + "name": "2.4", + "annotations": { + "openshift.io/display-name": "Ruby 2.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Ruby 2.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.4/README.md.", + "iconClass": "icon-ruby", + "tags": "builder,ruby", + "supports": "ruby:2.4,ruby", + "version": "2.4", + "sampleRepo": "https://github.com/openshift/ruby-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/ruby-24-rhel7:latest" + } } ] } @@ -140,6 +163,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Node.js (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", @@ -155,6 +179,7 @@ "name": "0.10", "annotations": { "openshift.io/display-name": "Node.js 0.10", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "DEPRECATED: Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.", "iconClass": "icon-nodejs", "tags": "hidden,nodejs", @@ -171,6 +196,7 @@ "name": "4", "annotations": { "openshift.io/display-name": "Node.js 4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Node.js 4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", @@ -187,6 +213,7 @@ "name": "6", "annotations": { "openshift.io/display-name": "Node.js 6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", @@ -217,6 +244,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Perl (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.", "iconClass": "icon-perl", "tags": "builder,perl", @@ -232,6 +260,7 @@ "name": "5.16", "annotations": { "openshift.io/display-name": "Perl 5.16", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.", "iconClass": "icon-perl", "tags": "hidden,builder,perl", @@ -248,6 +277,7 @@ "name": "5.20", "annotations": { "openshift.io/display-name": "Perl 5.20", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl 5.20 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.", "iconClass": "icon-perl", "tags": "builder,perl", @@ -264,6 +294,7 @@ "name": "5.24", "annotations": { "openshift.io/display-name": "Perl 5.24", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Perl 5.24 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.", "iconClass": "icon-perl", "tags": "builder,perl", @@ -294,6 +325,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PHP (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", @@ -309,6 +341,7 @@ "name": "5.5", "annotations": { "openshift.io/display-name": "PHP 5.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.", "iconClass": "icon-php", "tags": "hidden,builder,php", @@ -325,6 +358,7 @@ "name": "5.6", "annotations": { "openshift.io/display-name": "PHP 5.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP 5.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.", "iconClass": "icon-php", "tags": "builder,php", @@ -341,6 +375,7 @@ "name": "7.0", "annotations": { "openshift.io/display-name": "PHP 7.0", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run PHP 7.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.", "iconClass": "icon-php", "tags": "builder,php", @@ -371,6 +406,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Python (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", @@ -386,6 +422,7 @@ "name": "3.3", "annotations": { "openshift.io/display-name": "Python 3.3", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.", "iconClass": "icon-python", "tags": "hidden,builder,python", @@ -402,6 +439,7 @@ "name": "2.7", "annotations": { "openshift.io/display-name": "Python 2.7", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 2.7 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.", "iconClass": "icon-python", "tags": "builder,python", @@ -418,6 +456,7 @@ "name": "3.4", "annotations": { "openshift.io/display-name": "Python 3.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 3.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.", "iconClass": "icon-python", "tags": "builder,python", @@ -434,6 +473,7 @@ "name": "3.5", "annotations": { "openshift.io/display-name": "Python 3.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Build and run Python 3.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.", "iconClass": "icon-python", "tags": "builder,python", @@ -464,6 +504,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MySQL (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.", "iconClass": "icon-mysql-database", "tags": "mysql" @@ -477,6 +518,7 @@ "name": "5.5", "annotations": { "openshift.io/display-name": "MySQL 5.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.", "iconClass": "icon-mysql-database", "tags": "hidden,mysql", @@ -491,6 +533,7 @@ "name": "5.6", "annotations": { "openshift.io/display-name": "MySQL 5.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL 5.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.", "iconClass": "icon-mysql-database", "tags": "mysql", @@ -505,6 +548,7 @@ "name": "5.7", "annotations": { "openshift.io/display-name": "MySQL 5.7", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MySQL 5.7 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.", "iconClass": "icon-mysql-database", "tags": "mysql", @@ -533,6 +577,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MariaDB (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", "tags": "mariadb" @@ -546,6 +591,7 @@ "name": "10.1", "annotations": { "openshift.io/display-name": "MariaDB 10.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.", "iconClass": "icon-mariadb", "tags": "mariadb", @@ -574,6 +620,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", "tags": "postgresql" @@ -587,6 +634,7 @@ "name": "9.2", "annotations": { "openshift.io/display-name": "PostgreSQL 9.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.", "iconClass": "icon-postgresql", "tags": "hidden,postgresql", @@ -601,6 +649,7 @@ "name": "9.4", "annotations": { "openshift.io/display-name": "PostgreSQL 9.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.", "iconClass": "icon-postgresql", "tags": "postgresql", @@ -615,6 +664,7 @@ "name": "9.5", "annotations": { "openshift.io/display-name": "PostgreSQL 9.5", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.", "iconClass": "icon-postgresql", "tags": "postgresql", @@ -643,6 +693,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MongoDB (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", "tags": "mongodb" @@ -656,6 +707,7 @@ "name": "2.4", "annotations": { "openshift.io/display-name": "MongoDB 2.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.", "iconClass": "icon-mongodb", "tags": "hidden,mongodb", @@ -670,6 +722,7 @@ "name": "2.6", "annotations": { "openshift.io/display-name": "MongoDB 2.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.", "iconClass": "icon-mongodb", "tags": "mongodb", @@ -684,6 +737,7 @@ "name": "3.2", "annotations": { "openshift.io/display-name": "MongoDB 3.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.", "iconClass": "icon-mongodb", "tags": "mongodb", @@ -712,6 +766,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Redis (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Redis database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.", "iconClass": "icon-redis", "tags": "redis" @@ -725,6 +780,7 @@ "name": "3.2", "annotations": { "openshift.io/display-name": "Redis 3.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Redis 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.", "iconClass": "icon-redis", "tags": "redis", @@ -753,6 +809,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Jenkins (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.", "iconClass": "icon-jenkins", "tags": "jenkins" @@ -766,6 +823,7 @@ "name": "1", "annotations": { "openshift.io/display-name": "Jenkins 1.X", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", "tags": "hidden,jenkins", @@ -780,6 +838,7 @@ "name": "2", "annotations": { "openshift.io/display-name": "Jenkins 2.X", + "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a Jenkins 2.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", "tags": "jenkins", diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json index 289f809fa..a8b90a493 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json @@ -8,10 +8,10 @@ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.", "tags": "quickstart,php,cakephp", "iconClass": "icon-php", - "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", @@ -219,7 +219,7 @@ "timeoutSeconds": 3, "initialDelaySeconds": 30, "httpGet": { - "path": "/", + "path": "/health.php", "port": 8080 } }, @@ -342,7 +342,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json index 0562982b3..e84397394 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json @@ -8,10 +8,10 @@ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", "tags": "quickstart,php,cakephp", "iconClass": "icon-php", - "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", @@ -219,7 +219,7 @@ "timeoutSeconds": 3, "initialDelaySeconds": 30, "httpGet": { - "path": "/", + "path": "/health.php", "port": 8080 } }, @@ -325,7 +325,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json index 7a3875d09..96048f200 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json @@ -8,10 +8,10 @@ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "tags": "quickstart,perl,dancer", "iconClass": "icon-perl", - "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/dancer-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", @@ -206,7 +206,7 @@ "timeoutSeconds": 3, "initialDelaySeconds": 30, "httpGet": { - "path": "/", + "path": "/health", "port": 8080 } }, @@ -307,7 +307,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json index 399ec72a8..1c87e05f3 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json @@ -8,10 +8,10 @@ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", "tags": "quickstart,perl,dancer", "iconClass": "icon-perl", - "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/dancer-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", @@ -206,7 +206,7 @@ "timeoutSeconds": 3, "initialDelaySeconds": 30, "httpGet": { - "path": "/", + "path": "/health", "port": 8080 } }, @@ -290,7 +290,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json index e37f7a492..060f45dac 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json @@ -8,10 +8,10 @@ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "tags": "quickstart,python,django", "iconClass": "icon-python", - "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/django-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", @@ -311,7 +311,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json index 965c2ebfe..66f2f0ca1 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json @@ -8,10 +8,10 @@ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", "tags": "quickstart,python,django", "iconClass": "icon-python", - "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/django-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", @@ -294,7 +294,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json index 6cf9d76eb..ebba9ee65 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json @@ -4,14 +4,14 @@ "metadata": { "name": "httpd-example", "annotations": { - "openshift.io/display-name": "Httpd", - "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", + "openshift.io/display-name": "Apache HTTP Server", + "description": "An example Apache HTTP Server (httpd) application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", "tags": "quickstart,httpd", "iconClass": "icon-apache", - "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a static application served by Apache HTTP Server (httpd), including a build configuration and application deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/httpd-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json index 62f43bc0b..28b4b9d81 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json @@ -8,10 +8,10 @@ "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins", - "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", @@ -112,10 +112,6 @@ "value": "true" }, { - "name": "OPENSHIFT_JENKINS_JVM_ARCH", - "value": "${JVM_ARCH}" - }, - { "name": "KUBERNETES_MASTER", "value": "https://kubernetes.default:443" }, @@ -124,6 +120,10 @@ "value": "true" }, { + "name": "JENKINS_SERVICE_NAME", + "value": "${JENKINS_SERVICE_NAME}" + }, + { "name": "JNLP_SERVICE_NAME", "value": "${JNLP_SERVICE_NAME}" } @@ -260,12 +260,6 @@ "value": "true" }, { - "name": "JVM_ARCH", - "displayName": "Jenkins JVM Architecture", - "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.", - "value": "i386" - }, - { "name": "MEMORY_LIMIT", "displayName": "Memory Limit", "description": "Maximum amount of memory the container can use.", diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json index e9068e455..4915bb12c 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json @@ -8,10 +8,10 @@ "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins", - "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", @@ -129,10 +129,6 @@ "value": "true" }, { - "name": "OPENSHIFT_JENKINS_JVM_ARCH", - "value": "${JVM_ARCH}" - }, - { "name": "KUBERNETES_MASTER", "value": "https://kubernetes.default:443" }, @@ -141,6 +137,10 @@ "value": "true" }, { + "name": "JENKINS_SERVICE_NAME", + "value": "${JENKINS_SERVICE_NAME}" + }, + { "name": "JNLP_SERVICE_NAME", "value": "${JNLP_SERVICE_NAME}" } @@ -277,12 +277,6 @@ "value": "true" }, { - "name": "JVM_ARCH", - "displayName": "Jenkins JVM Architecture", - "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.", - "value": "i386" - }, - { "name": "MEMORY_LIMIT", "displayName": "Memory Limit", "description": "Maximum amount of memory the container can use.", diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json index df3704b9f..9543b5681 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json @@ -8,10 +8,10 @@ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "tags": "quickstart,nodejs", "iconClass": "icon-nodejs", - "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", @@ -309,7 +309,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json index eb6ab33d9..0649d6204 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json @@ -8,10 +8,10 @@ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", "tags": "quickstart,nodejs", "iconClass": "icon-nodejs", - "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", @@ -292,7 +292,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json index 59e2e41ea..3810ef727 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json @@ -8,10 +8,10 @@ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "tags": "quickstart,ruby,rails", "iconClass": "icon-ruby", - "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/rails-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", @@ -354,7 +354,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json index b3d080a91..3d8336c5a 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json @@ -8,10 +8,10 @@ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", "tags": "quickstart,ruby,rails", "iconClass": "icon-ruby", - "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", - "template.openshift.io/provider-display-name": "Red Hat, Inc.", - "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex", - "template.openshift.io/support-url": "https://access.redhat.com" + "openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/openshift/rails-ex", + "openshift.io/support-url": "https://access.redhat.com" } }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", @@ -337,7 +337,8 @@ "metadata": { "name": "${DATABASE_SERVICE_NAME}", "annotations": { - "description": "Defines how to deploy the database" + "description": "Defines how to deploy the database", + "template.alpha.openshift.io/wait-for-ready": "true" } }, "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json index 0bb56452b..0aad7fae6 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json @@ -314,6 +314,38 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "jboss-eap71-openshift", + "annotations": { + "openshift.io/display-name": "Red Hat JBoss EAP 7.1" + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap71-openshift", + "tags": [ + { + "name": "1.0-TP", + "annotations": { + "description": "JBoss EAP 7.1 Tech Preview.", + "iconClass": "icon-jboss", + "tags": "builder,eap,javaee,java,jboss,xpaas", + "supports":"eap:7.1,javaee:7,java:8,xpaas:1.0", + "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", + "sampleContextDir": "kitchensink", + "sampleRef": "7.0.0.GA", + "version": "1.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7-tech-preview/eap71-openshift:1.0" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "jboss-decisionserver62-openshift", "annotations": { "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server" @@ -524,6 +556,32 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "jboss-datagrid71-openshift", + "annotations": { + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1" + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift", + "tags": [ + { + "name": "1.0", + "annotations": { + "description": "JBoss Data Grid 7.1 S2I images.", + "iconClass": "icon-jboss", + "tags": "datagrid,jboss,xpaas", + "supports": "datagrid:7.1,xpaas:1.0", + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "jboss-datagrid65-client-openshift", "annotations": { "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP" @@ -549,6 +607,31 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "jboss-datagrid71-client-openshift", + "annotations": { + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP" + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift", + "tags": [ + { + "name": "1.0", + "annotations": { + "description": "JBoss Data Grid 7.1 Client Modules for EAP.", + "iconClass": "icon-jboss", + "tags": "client,jboss,xpaas", + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "jboss-datavirt63-openshift", "annotations": { "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" @@ -671,6 +754,16 @@ "supports":"amq:6.2,messaging,xpaas:1.4", "version": "1.4" } + }, + { + "name": "1.5", + "annotations": { + "description": "JBoss A-MQ 6.2 broker image.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "supports":"amq:6.2,messaging,xpaas:1.5", + "version": "1.5" + } } ] } @@ -697,6 +790,17 @@ "version": "1.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3" } + }, + { + "name": "1.1", + "annotations": { + "description": "JBoss A-MQ 6.3 broker image.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "supports": "amq:6.3,messaging,xpaas:1.1", + "version": "1.1", + "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3" + } } ] } diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json index af20b373a..d219ead67 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json @@ -6,14 +6,14 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.4.0", + "version": "1.5.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral, no SSL)" }, "name": "amq62-basic" }, "labels": { "template": "amq62-basic", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.", "parameters": [ @@ -215,7 +215,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.4" + "name": "jboss-amq-62:1.5" } } }, diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json index 5acdbfabf..529a2a8ec 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json @@ -3,17 +3,17 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.", + "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template supports SSL and requires usage of OpenShift secrets.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.4.0", + "version": "1.5.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent with SSL)" }, "name": "amq62-persistent-ssl" }, "labels": { "template": "amq62-persistent-ssl", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.", "parameters": [ @@ -26,9 +26,9 @@ }, { "displayName": "Split Data?", - "description": "Split the data directory for each node in a mesh.", + "description": "Split the data directory for each node in a mesh, this is now the default behaviour.", "name": "AMQ_SPLIT", - "value": "false", + "value": "true", "required": false }, { @@ -360,7 +360,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.4" + "name": "jboss-amq-62:1.5" } } }, @@ -546,6 +546,114 @@ } }, { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-drainer" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.5" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-drainer" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-drainer", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-drainer", + "image": "jboss-amq-62", + "command": [ + "/opt/amq/bin/drain.sh" + ], + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "mountPath": "/opt/amq/data", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json index b8089cd6d..b17d23c06 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json @@ -3,17 +3,17 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.", + "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template doesn't feature SSL support.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.4.0", + "version": "1.5.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent, no SSL)" }, "name": "amq62-persistent" }, "labels": { "template": "amq62-persistent", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.", "parameters": [ @@ -26,9 +26,9 @@ }, { "displayName": "Split Data?", - "description": "Split the data directory for each node in a mesh.", + "description": "Split the data directory for each node in a mesh, this is now the default behaviour.", "name": "AMQ_SPLIT", - "value": "false", + "value": "true", "required": false }, { @@ -229,7 +229,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.4" + "name": "jboss-amq-62:1.5" } } }, @@ -363,6 +363,114 @@ } }, { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-drainer" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.5" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-drainer" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-drainer", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-drainer", + "image": "jboss-amq-62", + "command": [ + "/opt/amq/bin/drain.sh" + ], + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "mountPath": "/opt/amq/data", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json index b52fdbfb0..a4a099e08 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json @@ -6,14 +6,14 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.4.0", + "version": "1.5.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral with SSL)" }, "name": "amq62-ssl" }, "labels": { "template": "amq62-ssl", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.", "parameters": [ @@ -346,7 +346,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.4" + "name": "jboss-amq-62:1.5" } } }, diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json index d29f6a300..4655d3174 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json @@ -6,14 +6,14 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.0", + "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral, no SSL)" }, "name": "amq63-basic" }, "labels": { "template": "amq63-basic", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.", "parameters": [ @@ -215,7 +215,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-63:1.0" + "name": "jboss-amq-63:1.1" } } }, diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json index 47f6396dd..0e8b80592 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json @@ -3,17 +3,17 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.", + "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template supports SSL and requires usage of OpenShift secrets.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.0", + "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent with SSL)" }, "name": "amq63-persistent-ssl" }, "labels": { "template": "amq63-persistent-ssl", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.", "parameters": [ @@ -26,9 +26,9 @@ }, { "displayName": "Split Data?", - "description": "Split the data directory for each node in a mesh.", + "description": "Split the data directory for each node in a mesh, this is now the default behaviour.", "name": "AMQ_SPLIT", - "value": "false", + "value": "true", "required": false }, { @@ -360,7 +360,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-63:1.0" + "name": "jboss-amq-63:1.1" } } }, @@ -546,6 +546,114 @@ } }, { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-drainer" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-63:1.1" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-drainer" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-drainer", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-drainer", + "image": "jboss-amq-63", + "command": [ + "/opt/amq/bin/drain.sh" + ], + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "mountPath": "/opt/amq/data", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json index 4b64203c4..b94a8bbb8 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json @@ -3,17 +3,17 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.", + "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template doesn't feature SSL support.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.0", + "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent, no SSL)" }, "name": "amq63-persistent" }, "labels": { "template": "amq63-persistent", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.", "parameters": [ @@ -26,9 +26,9 @@ }, { "displayName": "Split Data?", - "description": "Split the data directory for each node in a mesh.", + "description": "Split the data directory for each node in a mesh, this is now the default behaviour.", "name": "AMQ_SPLIT", - "value": "false", + "value": "true", "required": false }, { @@ -229,7 +229,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-63:1.0" + "name": "jboss-amq-63:1.1" } } }, @@ -363,6 +363,114 @@ } }, { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-drainer" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-63:1.1" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-drainer" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-drainer", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-drainer", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-drainer", + "image": "jboss-amq-63", + "command": [ + "/opt/amq/bin/drain.sh" + ], + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "mountPath": "/opt/amq/data", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json index 20ad50016..f2de718c5 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json @@ -6,14 +6,14 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.0", + "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral with SSL)" }, "name": "amq63-ssl" }, "labels": { "template": "amq63-ssl", - "xpaas": "1.4.0" + "xpaas": "1.5.0" }, "message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.", "parameters": [ @@ -346,7 +346,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-63:1.0" + "name": "jboss-amq-63:1.1" } } }, diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json new file mode 100644 index 000000000..200f232ea --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json @@ -0,0 +1,372 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 7.1 applications.", + "tags": "datagrid,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 (Ephemeral, no https)" + }, + "name": "datagrid71-basic" + }, + "labels": { + "template": "datagrid71-basic", + "xpaas": "1.4.0" + }, + "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Username", + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "displayName": "Password", + "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Infinispan Connectors", + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "displayName": "Cache Names", + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Datavirt Cache Names", + "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.", + "name": "DATAVIRT_CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Default Cache Type", + "description": "Default cache type for all caches. If empty then distributed will be the default", + "name": "CACHE_TYPE_DEFAULT", + "value": "", + "required": false + }, + { + "displayName": "Encryption Requires SSL Client Authentication?", + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "displayName": "Memcached Cache Name", + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "displayName": "REST Security Domain", + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid71-openshift:1.0" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid71-openshift", + "imagePullPolicy": "Always", + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod-internal", + "containerPort": 11222, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11333, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "DATAVIRT_CACHE_NAMES", + "value": "${DATAVIRT_CACHE_NAMES}" + }, + { + "name": "CACHE_TYPE_DEFAULT", + "value": "${CACHE_TYPE_DEFAULT}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json new file mode 100644 index 000000000..b2c385608 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json @@ -0,0 +1,550 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 7.1 applications.", + "tags": "datagrid,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 (Ephemeral with https)" + }, + "name": "datagrid71-https" + }, + "labels": { + "template": "datagrid71-https", + "xpaas": "1.4.0" + }, + "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Username", + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "displayName": "Password", + "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Infinispan Connectors", + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "displayName": "Cache Names", + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Datavirt Cache Names", + "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.", + "name": "DATAVIRT_CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Default Cache Type", + "description": "Default cache type for all caches. If empty then distributed will be the default", + "name": "CACHE_TYPE_DEFAULT", + "value": "", + "required": false + }, + { + "displayName": "Encryption Requires SSL Client Authentication?", + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "displayName": "Memcached Cache Name", + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "displayName": "REST Security Domain", + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid71-openshift:1.0" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid71-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod-internal", + "containerPort": 11222, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11333, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "DATAVIRT_CACHE_NAMES", + "value": "${DATAVIRT_CACHE_NAMES}" + }, + { + "name": "CACHE_TYPE_DEFAULT", + "value": "${CACHE_TYPE_DEFAULT}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json new file mode 100644 index 000000000..8512ead55 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json @@ -0,0 +1,852 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 7.1 and MySQL applications with persistent storage.", + "tags": "datagrid,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + MySQL (Persistent with https)" + }, + "name": "datagrid71-mysql-persistent" + }, + "labels": { + "template": "datagrid71-mysql-persistent", + "xpaas": "1.4.0" + }, + "message": "A new data grid service (using MySQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Username", + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "displayName": "Password", + "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "java:/jboss/datasources/mysql", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "MySQL Lower Case Table Names", + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false + }, + { + "displayName": "MySQL Maximum number of connections", + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "MySQL FullText Minimum Word Length", + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL FullText Maximum Word Length", + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL AIO", + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO", + "required": false + }, + { + "displayName": "Database Volume Capacity", + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Infinispan Connectors", + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "displayName": "Cache Names", + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Datavirt Cache Names", + "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.", + "name": "DATAVIRT_CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Default Cache Type", + "description": "Default cache type for all caches. If empty then distributed will be the default", + "name": "CACHE_TYPE_DEFAULT", + "value": "", + "required": false + }, + { + "displayName": "Encryption Requires SSL Client Authentication?", + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "displayName": "Memcached Cache Name", + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "displayName": "REST Security Domain", + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "MySQL Image Stream Tag", + "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.", + "name": "MYSQL_IMAGE_STREAM_TAG", + "value": "5.7", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid71-openshift:1.0" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid71-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod-internal", + "containerPort": 11222, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11333, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "DATAVIRT_CACHE_NAMES", + "value": "${DATAVIRT_CACHE_NAMES}" + }, + { + "name": "CACHE_TYPE_DEFAULT", + "value": "${CACHE_TYPE_DEFAULT}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "mysql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json new file mode 100644 index 000000000..9ac76cecc --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json @@ -0,0 +1,811 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 7.1 and MySQL applications.", + "tags": "datagrid,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + MySQL (Ephemeral with https)" + }, + "name": "datagrid71-mysql" + }, + "labels": { + "template": "datagrid71-mysql", + "xpaas": "1.4.0" + }, + "message": "A new data grid service (using MySQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Username", + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "displayName": "Password", + "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "java:/jboss/datasources/mysql", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "MySQL Lower Case Table Names", + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false + }, + { + "displayName": "MySQL Maximum number of connections", + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "MySQL FullText Minimum Word Length", + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL FullText Maximum Word Length", + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL AIO", + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO", + "required": false + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Infinispan Connectors", + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "displayName": "Cache Names", + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Datavirt Cache Names", + "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.", + "name": "DATAVIRT_CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Default Cache Type", + "description": "Default cache type for all caches. If empty then distributed will be the default", + "name": "CACHE_TYPE_DEFAULT", + "value": "", + "required": false + }, + { + "displayName": "Encryption Requires SSL Client Authentication?", + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "displayName": "Memcached Cache Name", + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "displayName": "REST Security Domain", + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "MySQL Image Stream Tag", + "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.", + "name": "MYSQL_IMAGE_STREAM_TAG", + "value": "5.7", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid71-openshift:1.0" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid71-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod-internal", + "containerPort": 11222, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11333, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "DATAVIRT_CACHE_NAMES", + "value": "${DATAVIRT_CACHE_NAMES}" + }, + { + "name": "CACHE_TYPE_DEFAULT", + "value": "${CACHE_TYPE_DEFAULT}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "mysql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json new file mode 100644 index 000000000..8f7180ff6 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json @@ -0,0 +1,824 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 7.1 and PostgreSQL applications with persistent storage.", + "tags": "datagrid,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + PostgreSQL (Persistent with https)" + }, + "name": "datagrid71-postgresql-persistent" + }, + "labels": { + "template": "datagrid71-postgresql-persistent", + "xpaas": "1.4.0" + }, + "message": "A new data grid service (using PostgreSQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Username", + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "displayName": "Password", + "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/postgresql", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "PostgreSQL Maximum number of connections", + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "PostgreSQL Shared Buffers", + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false + }, + { + "displayName": "Database Volume Capacity", + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Infinispan Connectors", + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "displayName": "Cache Names", + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Datavirt Cache Names", + "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.", + "name": "DATAVIRT_CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Default Cache Type", + "description": "Default cache type for all caches. If empty then distributed will be the default", + "name": "CACHE_TYPE_DEFAULT", + "value": "", + "required": false + }, + { + "displayName": "Encryption Requires SSL Client Authentication?", + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "displayName": "Memcached Cache Name", + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "displayName": "REST Security Domain", + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "PostgreSQL Image Stream Tag", + "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.", + "name": "POSTGRESQL_IMAGE_STREAM_TAG", + "value": "9.5", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid71-openshift:1.0" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid71-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod-internal", + "containerPort": 11222, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11333, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "DATAVIRT_CACHE_NAMES", + "value": "${DATAVIRT_CACHE_NAMES}" + }, + { + "name": "CACHE_TYPE_DEFAULT", + "value": "${CACHE_TYPE_DEFAULT}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "postgresql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json new file mode 100644 index 000000000..0e9fea735 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json @@ -0,0 +1,783 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 7.1 and PostgreSQL applications built using.", + "tags": "datagrid,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + PostgreSQL (Ephemeral with https)" + }, + "name": "datagrid71-postgresql" + }, + "labels": { + "template": "datagrid71-postgresql", + "xpaas": "1.4.0" + }, + "message": "A new data grid service (using PostgreSQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Username", + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "displayName": "Password", + "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/postgresql", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "PostgreSQL Maximum number of connections", + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "PostgreSQL Shared Buffers", + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Infinispan Connectors", + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "displayName": "Cache Names", + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Datavirt Cache Names", + "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.", + "name": "DATAVIRT_CACHE_NAMES", + "value": "", + "required": false + }, + { + "displayName": "Default Cache Type", + "description": "Default cache type for all caches. If empty then distributed will be the default", + "name": "CACHE_TYPE_DEFAULT", + "value": "", + "required": false + }, + { + "displayName": "Encryption Requires SSL Client Authentication?", + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "displayName": "Memcached Cache Name", + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "displayName": "REST Security Domain", + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "PostgreSQL Image Stream Tag", + "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.", + "name": "POSTGRESQL_IMAGE_STREAM_TAG", + "value": "9.5", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid71-openshift:1.0" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid71-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod-internal", + "containerPort": 11222, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11333, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "DATAVIRT_CACHE_NAMES", + "value": "${DATAVIRT_CACHE_NAMES}" + }, + { + "name": "CACHE_TYPE_DEFAULT", + "value": "${CACHE_TYPE_DEFAULT}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "postgresql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json new file mode 100644 index 000000000..4917dbe23 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json @@ -0,0 +1,872 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + A-MQ (Persistent with https)" + }, + "name": "eap71-amq-persistent-s2i" + }, + "labels": { + "template": "eap71-amq-persistent-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "helloworld-mdb", + "required": false + }, + { + "displayName": "A-MQ Volume Size", + "description": "Size of the volume used by A-MQ for persisting messages.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "displayName": "JMS Connection Factory JNDI Name", + "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", + "name": "MQ_JNDI", + "value": "java:/ConnectionFactory", + "required": false + }, + { + "displayName": "Split Data?", + "description": "Split the data directory for each node in a mesh.", + "name": "AMQ_SPLIT", + "value": "false", + "required": false + }, + { + "displayName": "A-MQ Protocols", + "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.", + "name": "MQ_PROTOCOL", + "value": "openwire", + "required": false + }, + { + "displayName": "Queues", + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", + "name": "MQ_QUEUES", + "value": "HELLOWORLDMDBQueue", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", + "name": "MQ_TOPICS", + "value": "HELLOWORLDMDBTopic", + "required": false + }, + { + "displayName": "A-MQ Serializable Packages", + "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html", + "name": "MQ_SERIALIZABLE_PACKAGES", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "A-MQ Username", + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": false + }, + { + "displayName": "A-MQ Password", + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": false + }, + { + "displayName": "A-MQ Mesh Discovery Type", + "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.", + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "kube", + "required": false + }, + { + "displayName": "A-MQ Storage Limit", + "description": "The A-MQ storage usage limit", + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "100 gb", + "required": false + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's OpenWire port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MQ_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-amq=MQ" + }, + { + "name": "MQ_JNDI", + "value": "${MQ_JNDI}" + }, + { + "name": "MQ_USERNAME", + "value": "${MQ_USERNAME}" + }, + { + "name": "MQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "MQ_PROTOCOL", + "value": "tcp" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "MQ_SERIALIZABLE_PACKAGES", + "value": "${MQ_SERIALIZABLE_PACKAGES}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Rolling", + "rollingParams": { + "maxSurge": 0 + } + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.4" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "jboss-amq-62", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/amq/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/opt/amq/data/kahadb", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_TRANSPORTS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "MQ_SERIALIZABLE_PACKAGES", + "value": "${MQ_SERIALIZABLE_PACKAGES}" + }, + { + "name": "AMQ_SPLIT", + "value": "${AMQ_SPLIT}" + }, + { + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "${AMQ_MESH_DISCOVERY_TYPE}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "${AMQ_STORAGE_USAGE_LIMIT}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-amq-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json new file mode 100644 index 000000000..8344be836 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json @@ -0,0 +1,817 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 A-MQ applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + A-MQ (with https)" + }, + "name": "eap71-amq-s2i" + }, + "labels": { + "template": "eap71-amq-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "helloworld-mdb", + "required": false + }, + { + "displayName": "JMS Connection Factory JNDI Name", + "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", + "name": "MQ_JNDI", + "value": "java:/ConnectionFactory", + "required": false + }, + { + "displayName": "A-MQ Protocols", + "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.", + "name": "MQ_PROTOCOL", + "value": "openwire", + "required": false + }, + { + "displayName": "Queues", + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", + "name": "MQ_QUEUES", + "value": "HELLOWORLDMDBQueue", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", + "name": "MQ_TOPICS", + "value": "HELLOWORLDMDBTopic", + "required": false + }, + { + "displayName": "A-MQ Serializable Packages", + "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html", + "name": "MQ_SERIALIZABLE_PACKAGES", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "A-MQ Username", + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": false + }, + { + "displayName": "A-MQ Password", + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": false + }, + { + "displayName": "A-MQ Mesh Discovery Type", + "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.", + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "kube", + "required": false + }, + { + "displayName": "A-MQ Storage Limit", + "description": "The A-MQ storage usage limit", + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "100 gb", + "required": false + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's OpenWire port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MQ_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-amq=MQ" + }, + { + "name": "MQ_JNDI", + "value": "${MQ_JNDI}" + }, + { + "name": "MQ_USERNAME", + "value": "${MQ_USERNAME}" + }, + { + "name": "MQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "MQ_PROTOCOL", + "value": "tcp" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "MQ_SERIALIZABLE_PACKAGES", + "value": "${MQ_SERIALIZABLE_PACKAGES}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.4" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "jboss-amq-62", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/amq/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_TRANSPORTS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "MQ_SERIALIZABLE_PACKAGES", + "value": "${MQ_SERIALIZABLE_PACKAGES}" + }, + { + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "${AMQ_MESH_DISCOVERY_TYPE}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "${AMQ_STORAGE_USAGE_LIMIT}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json new file mode 100644 index 000000000..751bc5650 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json @@ -0,0 +1,389 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for EAP 7 applications built using S2I.", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 (no https)" + }, + "name": "eap71-basic-s2i" + }, + "labels": { + "template": "eap71-basic-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 based application has been created in your project.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-developer/jboss-eap-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "7.0.0.GA", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "kitchensink", + "required": false + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json new file mode 100644 index 000000000..06ae63685 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json @@ -0,0 +1,585 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for EAP 7 applications built using S2I.", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 (with https)" + }, + "name": "eap71-https-s2i" + }, + "labels": { + "template": "eap71-https-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-developer/jboss-eap-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "7.0.0.GA", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "kitchensink", + "required": false + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json new file mode 100644 index 000000000..8f10754ce --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json @@ -0,0 +1,862 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MongoDB (Persistent with https)" + }, + "name": "eap71-mongodb-persistent-s2i" + }, + "labels": { + "template": "eap71-mongodb-persistent-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Volume Capacity", + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "MongoDB No Preallocation", + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC", + "required": false + }, + { + "displayName": "MongoDB Small Files", + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES", + "required": false + }, + { + "displayName": "MongoDB Quiet", + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database admin password", + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + }, + { + "displayName": "MongoDB Image Stream Tag", + "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.", + "name": "MONGODB_IMAGE_STREAM_TAG", + "value": "3.2", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "mongodb", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 27017, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mongodb-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mongodb-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json new file mode 100644 index 000000000..a1d0ac4b4 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json @@ -0,0 +1,821 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 MongoDB applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MongoDB (Ephemeral with https)" + }, + "name": "eap71-mongodb-s2i" + }, + "labels": { + "template": "eap71-mongodb-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "name": "DB_JNDI", + "value": "", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "MongoDB No Preallocation", + "description": "Disable data file preallocation.", + "name": "MONGODB_NOPREALLOC", + "required": false + }, + { + "displayName": "MongoDB Small Files", + "description": "Set MongoDB to use a smaller default data file size.", + "name": "MONGODB_SMALLFILES", + "required": false + }, + { + "displayName": "MongoDB Quiet", + "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", + "name": "MONGODB_QUIET", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database admin password", + "description": "Database admin password", + "name": "DB_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + }, + { + "displayName": "MongoDB Image Stream Tag", + "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.", + "name": "MONGODB_IMAGE_STREAM_TAG", + "value": "3.2", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 27017, + "targetPort": 27017 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mongodb=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "DB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mongodb", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mongodb", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-mongodb", + "image": "mongodb", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 27017, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MONGODB_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${DB_ADMIN_PASSWORD}" + }, + { + "name": "MONGODB_NOPREALLOC", + "value": "${MONGODB_NOPREALLOC}" + }, + { + "name": "MONGODB_SMALLFILES", + "value": "${MONGODB_SMALLFILES}" + }, + { + "name": "MONGODB_QUIET", + "value": "${MONGODB_QUIET}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json new file mode 100644 index 000000000..6faabd01e --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json @@ -0,0 +1,878 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MySQL (Persistent with https)" + }, + "name": "eap71-mysql-persistent-s2i" + }, + "labels": { + "template": "eap71-mysql-persistent-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/TodoListDS", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Volume Capacity", + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "MySQL Lower Case Table Names", + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false + }, + { + "displayName": "MySQL Maximum number of connections", + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "MySQL FullText Minimum Word Length", + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL FullText Maximum Word Length", + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL AIO", + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + }, + { + "displayName": "MySQL Image Stream Tag", + "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.", + "name": "MYSQL_IMAGE_STREAM_TAG", + "value": "5.7", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + }, + { + "name": "DEFAULT_JOB_REPOSITORY", + "value": "${APPLICATION_NAME}-mysql" + }, + { + "name": "TIMER_SERVICE_DATA_STORE", + "value": "${APPLICATION_NAME}-mysql" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "mysql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json new file mode 100644 index 000000000..9f1c260b3 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json @@ -0,0 +1,837 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 MySQL applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MySQL (Ephemeral with https)" + }, + "name": "eap71-mysql-s2i" + }, + "labels": { + "template": "eap71-mysql-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/TodoListDS", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "MySQL Lower Case Table Names", + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false + }, + { + "displayName": "MySQL Maximum number of connections", + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "MySQL FullText Minimum Word Length", + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL FullText Maximum Word Length", + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false + }, + { + "displayName": "MySQL AIO", + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + }, + { + "displayName": "MySQL Image Stream Tag", + "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.", + "name": "MYSQL_IMAGE_STREAM_TAG", + "value": "5.7", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + }, + { + "name": "DEFAULT_JOB_REPOSITORY", + "value": "${APPLICATION_NAME}-mysql" + }, + { + "name": "TIMER_SERVICE_DATA_STORE", + "value": "${APPLICATION_NAME}-mysql" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "mysql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json new file mode 100644 index 000000000..2aff9d795 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json @@ -0,0 +1,852 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + PostgreSQL (Persistent with https)" + }, + "name": "eap71-postgresql-persistent-s2i" + }, + "labels": { + "template": "eap71-postgresql-persistent-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/TodoListDS", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Database Volume Capacity", + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "PostgreSQL Maximum number of connections", + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "PostgreSQL Shared Buffers", + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + }, + { + "displayName": "PostgreSQL Image Stream Tag", + "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.", + "name": "POSTGRESQL_IMAGE_STREAM_TAG", + "value": "9.5", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + }, + { + "name": "DEFAULT_JOB_REPOSITORY", + "value": "${APPLICATION_NAME}-postgresql" + }, + { + "name": "TIMER_SERVICE_DATA_STORE", + "value": "${APPLICATION_NAME}-postgresql" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "postgresql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json new file mode 100644 index 000000000..5bb6aaffb --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json @@ -0,0 +1,811 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for EAP 7 PostgreSQL applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + PostgreSQL (Ephemeral with https)" + }, + "name": "eap71-postgresql-s2i" + }, + "labels": { + "template": "eap71-postgresql-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.3", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false + }, + { + "displayName": "Database JNDI Name", + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/TodoListDS", + "required": false + }, + { + "displayName": "Database Name", + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "Datasource Minimum Pool Size", + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Maximum Pool Size", + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "displayName": "Datasource Transaction Isolation", + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "displayName": "PostgreSQL Maximum number of connections", + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false + }, + { + "displayName": "PostgreSQL Shared Buffers", + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false + }, + { + "displayName": "A-MQ cluster password", + "description": "A-MQ cluster admin password", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Username", + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "displayName": "Database Password", + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + }, + { + "displayName": "PostgreSQL Image Stream Tag", + "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.", + "name": "POSTGRESQL_IMAGE_STREAM_TAG", + "value": "9.5", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port.", + "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]" + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + }, + { + "name": "DEFAULT_JOB_REPOSITORY", + "value": "${APPLICATION_NAME}-postgresql" + }, + { + "name": "TIMER_SERVICE_DATA_STORE", + "value": "${APPLICATION_NAME}-postgresql" + } + ] + } + ], + "volumes": [ + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "postgresql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json new file mode 100644 index 000000000..a7a347338 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json @@ -0,0 +1,823 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for EAP 6 applications built using S2I, enabled for SSO.", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + Single Sign-On (with https)" + }, + "name": "eap71-sso-s2i" + }, + "labels": { + "template": "eap71-sso-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.", + "name": "HOSTNAME_HTTP", + "value": "", + "required": true + }, + { + "displayName": "Custom https Route Hostname", + "description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": true + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/redhat-developer/redhat-sso-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "7.0.x-ose", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "", + "required": false + }, + { + "displayName": "Queues", + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topics", + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap7-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap7-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate (e.g. jboss)", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate (e.g. mykeystorepass)", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "HornetQ Password", + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the server certificate (e.g. secret-key)", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate (e.g. password)", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "URL for SSO", + "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.", + "name": "SSO_URL", + "value": "", + "required": true + }, + { + "displayName": "URL for SSO (internal service)", + "description": "The URL for the internal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.", + "name": "SSO_SERVICE_URL", + "value": "https://secure-sso:8443/auth", + "required": false + }, + { + "displayName": "SSO Realm", + "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).", + "name": "SSO_REALM", + "value": "", + "required": true + }, + { + "displayName": "SSO Username", + "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.", + "name": "SSO_USERNAME", + "value": "", + "required": false + }, + { + "displayName": "SSO Password", + "description": "The password for the SSO service user.", + "name": "SSO_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "SSO Public Key", + "description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability", + "name": "SSO_PUBLIC_KEY", + "value": "", + "required": false + }, + { + "displayName": "SSO Bearer Only?", + "description": "SSO Client Access Type", + "name": "SSO_BEARER_ONLY", + "value": "", + "required": false + }, + { + "displayName": "Artifact Directories", + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target", + "required": false + }, + { + "displayName": "SSO SAML Keystore Secret", + "description": "The name of the secret containing the keystore file", + "name": "SSO_SAML_KEYSTORE_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "SSO SAML Keystore", + "description": "The name of the keystore file within the secret", + "name": "SSO_SAML_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "SSO SAML Certificate Name", + "description": "The name associated with the server certificate", + "name": "SSO_SAML_CERTIFICATE_NAME", + "value": "jboss", + "required": false + }, + { + "displayName": "SSO SAML Keystore Password", + "description": "The password for the keystore and certificate", + "name": "SSO_SAML_KEYSTORE_PASSWORD", + "value": "mykeystorepass", + "required": false + }, + { + "displayName": "SSO Client Secret", + "description": "The SSO Client Secret for Confidential Access", + "name": "SSO_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Enable CORS for SSO?", + "description": "Enable CORS for SSO applications", + "name": "SSO_ENABLE_CORS", + "value": "false", + "required": false + }, + { + "displayName": "SSO SAML Logout Page", + "description": "SSO logout page for SAML applications", + "name": "SSO_SAML_LOGOUT_PAGE", + "value": "/", + "required": false + }, + { + "displayName": "Disable SSL Validation in EAP->SSO communication", + "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)", + "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION", + "value": "true", + "required": false + }, + { + "displayName": "SSO Trust Store", + "description": "The name of the truststore file within the secret (e.g. truststore.jks)", + "name": "SSO_TRUSTSTORE", + "value": "", + "required": false + }, + { + "displayName": "SSO Trust Store Password", + "description": "The password for the truststore and certificate (e.g. mykeystorepass)", + "name": "SSO_TRUSTSTORE_PASSWORD", + "value": "", + "required": false + }, + { + "displayName": "SSO Trust Store Secret", + "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName", + "name": "SSO_TRUSTSTORE_SECRET", + "value": "eap7-app-secret", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + }, + "env": [ + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + }, + { + "name": "MAVEN_ARGS_APPEND", + "value": "" + }, + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + } + ] + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "sso-saml-keystore-volume", + "mountPath": "/etc/sso-saml-secret-volume", + "readOnly": true + }, + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + }, + { + "name": "sso-truststore-volume", + "mountPath": "/etc/sso-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "HOSTNAME_HTTP", + "value": "${HOSTNAME_HTTP}" + }, + { + "name": "HOSTNAME_HTTPS", + "value": "${HOSTNAME_HTTPS}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + }, + { + "name": "SSO_URL", + "value": "${SSO_URL}" + }, + { + "name": "SSO_SERVICE_URL", + "value": "${SSO_SERVICE_URL}" + }, + { + "name": "SSO_REALM", + "value": "${SSO_REALM}" + }, + { + "name": "SSO_USERNAME", + "value": "${SSO_USERNAME}" + }, + { + "name": "SSO_PASSWORD", + "value": "${SSO_PASSWORD}" + }, + { + "name": "SSO_PUBLIC_KEY", + "value": "${SSO_PUBLIC_KEY}" + }, + { + "name": "SSO_BEARER_ONLY", + "value": "${SSO_BEARER_ONLY}" + }, + { + "name": "SSO_SAML_KEYSTORE_SECRET", + "value": "${SSO_SAML_KEYSTORE_SECRET}" + }, + { + "name": "SSO_SAML_KEYSTORE", + "value": "${SSO_SAML_KEYSTORE}" + }, + { + "name": "SSO_SAML_KEYSTORE_DIR", + "value": "/etc/sso-saml-secret-volume" + }, + { + "name": "SSO_SAML_CERTIFICATE_NAME", + "value": "${SSO_SAML_CERTIFICATE_NAME}" + }, + { + "name": "SSO_SAML_KEYSTORE_PASSWORD", + "value": "${SSO_SAML_KEYSTORE_PASSWORD}" + }, + { + "name": "SSO_SECRET", + "value": "${SSO_SECRET}" + }, + { + "name": "SSO_ENABLE_CORS", + "value": "${SSO_ENABLE_CORS}" + }, + { + "name": "SSO_SAML_LOGOUT_PAGE", + "value": "${SSO_SAML_LOGOUT_PAGE}" + }, + { + "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION", + "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}" + }, + { + "name": "SSO_TRUSTSTORE", + "value": "${SSO_TRUSTSTORE}" + }, + { + "name": "SSO_TRUSTSTORE_DIR", + "value": "/etc/sso-secret-volume" + }, + { + "name": "SSO_TRUSTSTORE_PASSWORD", + "value": "${SSO_TRUSTSTORE_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "sso-saml-keystore-volume", + "secret": { + "secretName": "${SSO_SAML_KEYSTORE_SECRET}" + } + }, + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + }, + { + "name": "sso-truststore-volume", + "secret": { + "secretName": "${SSO_TRUSTSTORE_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json new file mode 100644 index 000000000..6dc63c75f --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json @@ -0,0 +1,657 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for EAP 7 DB applications built using S2I. Includes support for installing third-party DB drivers.", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.4.0", + "openshift.io/display-name": "Red Hat JBoss EAP 7.1 (with https, supporting third-party DB drivers)" + }, + "name": "eap71-third-party-db-s2i" + }, + "labels": { + "template": "eap71-third-party-db-s2i", + "xpaas": "1.4.0" + }, + "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.", + "parameters": [ + { + "displayName": "Application Name", + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "eap-app", + "required": true + }, + { + "displayName": "Configuration Secret Name", + "description": "The name of the secret containing configuration properties for the datasources.", + "name": "CONFIGURATION_NAME", + "value": "eap-app-config", + "required": true + }, + { + "displayName": "Custom http Route Hostname", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "displayName": "Custom https Route Hostname", + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "displayName": "Git Repository URL", + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true + }, + { + "displayName": "Git Reference", + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "master", + "required": false + }, + { + "displayName": "Context Directory", + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "datavirt/hibernate-webapp", + "required": false + }, + { + "displayName": "Drivers ImageStreamTag", + "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver", + "name": "EXTENSIONS_IMAGE", + "value": "jboss-datavirt63-driver-openshift:1.0", + "required": true + }, + { + "displayName": "Drivers ImageStream Namespace", + "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.", + "name": "EXTENSIONS_IMAGE_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "Drivers Image Install Directory", + "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)", + "name": "EXTENSIONS_INSTALL_DIR", + "value": "/extensions", + "required": true + }, + { + "displayName": "Queue Names", + "description": "Queue names to preconfigure within Messaging subsystem.", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "displayName": "Topic Names", + "description": "Topic names to preconfigure within Messaging subsystem.", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "displayName": "Service Account Name", + "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.", + "name": "SERVICE_ACCOUNT_NAME", + "value": "eap-service-account", + "required": true + }, + { + "displayName": "Server Keystore Secret Name", + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "eap-app-secret", + "required": true + }, + { + "displayName": "Server Keystore Filename", + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "displayName": "Server Keystore Type", + "description": "The type of the keystore file (JKS or JCEKS)", + "name": "HTTPS_KEYSTORE_TYPE", + "value": "", + "required": false + }, + { + "displayName": "Server Certificate Name", + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "jboss", + "required": false + }, + { + "displayName": "Server Keystore Password", + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "mykeystorepass", + "required": false + }, + { + "displayName": "Messaging Cluster Admin Password", + "description": "Admin password for Messaging cluster.", + "name": "MQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Github Webhook Secret", + "description": "A secret string used to configure the GitHub webhook.", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Generic Webhook Secret", + "description": "A secret string used to configure the Generic webhook.", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "ImageStream Namespace", + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "displayName": "JGroups Secret Name", + "description": "The name of the secret containing the keystore to be used for securing JGroups communications.", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "displayName": "JGroups Keystore Filename", + "description": "The name of the keystore file within the JGroups secret.", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "displayName": "JGroups Certificate Name", + "description": "The name associated with the JGroups server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "secret-key", + "required": false + }, + { + "displayName": "JGroups Keystore Password", + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "password", + "required": false + }, + { + "displayName": "JGroups Cluster Password", + "description": "Password used by JGroups to authenticate nodes in the cluster.", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "displayName": "Deploy Exploded Archives", + "description": "Controls whether exploded deployment content should be automatically deployed", + "name": "AUTO_DEPLOY_EXPLODED", + "value": "false", + "required": false + }, + { + "displayName": "Maven mirror URL", + "description": "Maven mirror to use for S2I builds", + "name": "MAVEN_MIRROR_URL", + "value": "", + "required": false + }, + { + "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.", + "name": "ARTIFACT_DIR", + "value": "", + "required": false + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}", + "images": [ + { + "from": { + "kind": "ImageStreamTag", + "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}", + "name": "${EXTENSIONS_IMAGE}" + }, + "paths": [ + { + "destinationDir": "./${CONTEXT_DIR}/extensions/extras", + "sourcePath": "${EXTENSIONS_INSTALL_DIR}/." + } + ] + } + ] + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "MAVEN_MIRROR_URL", + "value": "${MAVEN_MIRROR_URL}" + }, + { + "name": "CUSTOM_INSTALL_DIRECTORIES", + "value": "extensions/*" + }, + { + "name": "ARTIFACT_DIR", + "value": "${ARTIFACT_DIR}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap71-openshift:1.0-TP" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ImageChange", + "imageChange": { + "from": { + "kind": "ImageStreamTag", + "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}", + "name": "${EXTENSIONS_IMAGE}" + } + } + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccountName": "${SERVICE_ACCOUNT_NAME}", + "terminationGracePeriodSeconds": 75, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "configuration", + "mountPath": "/etc/eap-environment", + "readOnly": true + }, + { + "name": "eap-keystore-volume", + "mountPath": "/etc/eap-secret-volume", + "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/opt/eap/bin/jboss-cli.sh", + "-c", + ":shutdown(timeout=60)" + ] + } + } + }, + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "ENV_FILES", + "value": "/etc/eap-environment/*" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/eap-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_KEYSTORE_TYPE", + "value": "${HTTPS_KEYSTORE_TYPE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "MQ_CLUSTER_PASSWORD", + "value": "${MQ_CLUSTER_PASSWORD}" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + }, + { + "name": "AUTO_DEPLOY_EXPLODED", + "value": "${AUTO_DEPLOY_EXPLODED}" + } + ] + } + ], + "volumes": [ + { + "name": "configuration", + "secret": { + "secretName": "${CONFIGURATION_NAME}" + } + }, + { + "name": "eap-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json index cd0bec3c1..aad649f84 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Camel route using ActiveMQ in Karaf container.", + "description": "Camel route using ActiveMQ in Karaf container. This quickstart shows how to use Camel in a Karaf Container using Blueprint to connect to the A-MQ xPaaS message broker on OpenShift that should already be installed, one simple way to run a A-MQ service is following the documentation of the A-MQ xPaaS image for OpenShift related to the amq62-basic template.", "tags": "quickstart,java,karaf,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "karaf2-camel-amq-1.0.0.redhat-000010", + "value": "karaf2-camel-amq-1.0.0.redhat-000019", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -49,7 +49,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000010", + "value": "1.0.0.redhat-000019", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json index 2ecce08a9..38b7bc249 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "A simple Camel route in Karaf container.", + "description": "A simple Camel route in Karaf container. This quickstart shows a simple Apache Camel application that logs a message to the server log every 5th second.", "tags": "quickstart,java,karaf,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "karaf2-camel-log-1.0.0.redhat-000010", + "value": "karaf2-camel-log-1.0.0.redhat-000019", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -49,7 +49,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000010", + "value": "1.0.0.redhat-000019", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json index d80939efb..6d9573e5b 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Camel example using Rest DSL with SQL Database in Karaf container.", + "description": "Camel example using Rest DSL with SQL Database in Karaf container. This example demonstrates how to use SQL via JDBC along with Camel's REST DSL to expose a RESTful API. The OpenShift MySQL container image should already be installed and running on your OpenShift installation, one simple way to run a MySQL service is following the documentation of the Openshift MySQL container image related to the mysql-ephemeral template..", "tags": "quickstart,java,karaf,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010", + "value": "karaf2-camel-rest-sql-1.0.0.redhat-000019", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -73,7 +73,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000010", + "value": "1.0.0.redhat-000019", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json index f99099868..fdc0c00e5 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "REST example using CXF in Karaf container.", + "description": "REST example using CXF in Karaf container. This quickstart demonstrates how to create a RESTful (JAX-RS) web service using CXF and expose it through the OSGi HTTP Service", "tags": "quickstart,java,karaf,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "karaf2-cxf-rest-1.0.0.redhat-000010", + "value": "karaf2-cxf-rest-1.0.0.redhat-000019", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -49,7 +49,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000010", + "value": "1.0.0.redhat-000019", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json index 8b3cd6ed0..2c1a73a29 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.", + "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first, one simple way to run a A-MQ service is following the documentation of the A-MQ xPaaS image for OpenShift related to the amq62-basic template", "tags": "quickstart,java,springboot,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-amq-1.0.0.redhat-000055", + "value": "spring-boot-camel-amq-1.0.0.redhat-000064", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -60,7 +60,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000055", + "value": "1.0.0.redhat-000064", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json index bc5bbad22..b62e768b6 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.", + "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using OpenShift ConfigMaps and Secrets. This example requires that a ConfigMap named camel-config and a Secret named camel-config are present in the namespace before the application is deployed, instruction about how to manually create them can be found here: https://github.com/fabric8-quickstarts/spring-boot-camel-config/blob/fis-2.0.x.redhat/README.redhat.md ", "tags": "quickstart,java,springboot,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-config-1.0.0.redhat-000005", + "value": "spring-boot-camel-config-1.0.0.redhat-000014", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -64,7 +64,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000005", + "value": "1.0.0.redhat-000014", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json index e54fa0d59..91081e493 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.", + "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution. A Kie Server should be deployed and configured before running the application, more information about how to configure it can be found at https://github.com/fabric8-quickstarts/spring-boot-camel-drools/blob/fis-2.0.x.redhat/README.redhat.md", "tags": "quickstart,java,springboot,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-drools-1.0.0.redhat-000054", + "value": "spring-boot-camel-drools-1.0.0.redhat-000063", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -63,7 +63,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000054", + "value": "1.0.0.redhat-000063", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json index 20ba97dac..8d97400ab 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.", + "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first, one simple way to run a JDG service is following the documentation of the JDG xPaaS image for OpenShift related to the datagrid65-basic template.", "tags": "quickstart,java,springboot,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024", + "value": "spring-boot-camel-infinispan-1.0.0.redhat-000033", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -50,7 +50,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000024", + "value": "1.0.0.redhat-000033", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json index 555647fab..bf722844c 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.", + "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first, one simple way to run a MySQL service is following the documentation of the OpenShift MySQL container image related to the mysql-ephemeral template.", "tags": "quickstart,java,springboot,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055", + "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000064", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -72,7 +72,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000055", + "value": "1.0.0.redhat-000064", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json index cf9a4e903..856264615 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json @@ -3,7 +3,7 @@ "kind": "Template", "metadata": { "annotations": { - "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.", + "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol. This quickstart assumes that the JDV server is already running and configured on OpenShift, more information about to setup a JDV server can be found at https://github.com/fabric8-quickstarts/spring-boot-camel-teiid/blob/fis-2.0.x.redhat/README.redhat.md", "tags": "quickstart,java,springboot,fis", "iconClass": "icon-jboss", "version": "2.0" @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-teiid-1.0.0.redhat-000053", + "value": "spring-boot-camel-teiid-1.0.0.redhat-000062", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -68,7 +68,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000053", + "value": "1.0.0.redhat-000062", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json index c78a96f7c..9c0fe287e 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-1.0.0.redhat-000055", + "value": "spring-boot-camel-1.0.0.redhat-000064", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -43,7 +43,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000055", + "value": "1.0.0.redhat-000064", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json index 620425902..87c0e347a 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json @@ -31,7 +31,7 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-camel-xml-1.0.0.redhat-000055", + "value": "spring-boot-camel-xml-1.0.0.redhat-000064", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { @@ -43,7 +43,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000055", + "value": "1.0.0.redhat-000064", "description": "The application version." }, { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json index 15cfc93fd..8b0261035 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json @@ -31,10 +31,16 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005", + "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000014", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { + "name": "SERVICE_NAME", + "displayName": "Service Name", + "value": "cxf-jaxrs", + "description": "Exposed service name." + }, + { "name": "BUILDER_VERSION", "displayName": "Builder version", "value": "2.0", @@ -43,7 +49,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000005", + "value": "1.0.0.redhat-000014", "description": "The application version." }, { @@ -93,6 +99,59 @@ ], "objects": [ { + "apiVersion": "v1", + "kind": "Route", + "metadata": { + "labels": { + "component": "${APP_NAME}", + "provider": "s2i", + "project": "${APP_NAME}", + "version": "${APP_VERSION}", + "group": "quickstarts" + }, + "name": "${SERVICE_NAME}-route" + }, + "spec": { + "to": { + "kind": "Service", + "name": "${SERVICE_NAME}" + } + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "annotations": { + }, + "labels": { + "component": "${APP_NAME}", + "provider": "s2i", + "project": "${APP_NAME}", + "version": "${APP_VERSION}", + "group": "quickstarts" + }, + "name": "${SERVICE_NAME}" + }, + "spec": { + "clusterIP": "None", + "deprecatedPublicIPs": [], + "ports": [ + { + "port": 9413, + "protocol": "TCP", + "targetPort": 8080 + } + ], + "selector": { + "project": "${APP_NAME}", + "component": "${APP_NAME}", + "provider": "s2i", + "group": "quickstarts" + } + } + }, + { "kind": "ImageStream", "apiVersion": "v1", "metadata": { diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json index c70ee7726..8b36f5f0b 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json @@ -31,10 +31,16 @@ { "name": "GIT_REF", "displayName": "Git Reference", - "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005", + "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000014", "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." }, { + "name": "SERVICE_NAME", + "displayName": "Service Name", + "value": "cxf-jaxws", + "description": "Exposed service name." + }, + { "name": "BUILDER_VERSION", "displayName": "Builder version", "value": "2.0", @@ -43,7 +49,7 @@ { "name": "APP_VERSION", "displayName": "Application Version", - "value": "1.0.0.redhat-000005", + "value": "1.0.0.redhat-000014", "description": "The application version." }, { @@ -93,6 +99,59 @@ ], "objects": [ { + "apiVersion": "v1", + "kind": "Route", + "metadata": { + "labels": { + "component": "${APP_NAME}", + "provider": "s2i", + "project": "${APP_NAME}", + "version": "${APP_VERSION}", + "group": "quickstarts" + }, + "name": "${SERVICE_NAME}-route" + }, + "spec": { + "to": { + "kind": "Service", + "name": "${SERVICE_NAME}" + } + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "annotations": { + }, + "labels": { + "component": "${APP_NAME}", + "provider": "s2i", + "project": "${APP_NAME}", + "version": "${APP_VERSION}", + "group": "quickstarts" + }, + "name": "${SERVICE_NAME}" + }, + "spec": { + "clusterIP": "None", + "deprecatedPublicIPs": [], + "ports": [ + { + "port": 9414, + "protocol": "TCP", + "targetPort": 8080 + } + ], + "selector": { + "project": "${APP_NAME}", + "component": "${APP_NAME}", + "provider": "s2i", + "group": "quickstarts" + } + } + }, + { "kind": "ImageStream", "apiVersion": "v1", "metadata": { diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml index 5cfda1c89..f3fe2dcbe 100644 --- a/roles/openshift_examples/meta/main.yml +++ b/roles/openshift_examples/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_common +dependencies: [] diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 3a866cedf..7a5bebf6f 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -6,19 +6,46 @@ block: - - name: Install docker excluder + - name: Install docker excluder - yum package: name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" state: "{{ r_openshift_excluder_docker_package_state }}" when: - r_openshift_excluder_enable_docker_excluder | bool + - ansible_pkg_mgr == "yum" - - name: Install openshift excluder + + # For DNF we do not need the "*" and if we add it, it causes an error because + # it's not a valid pkg_spec + # + # https://bugzilla.redhat.com/show_bug.cgi?id=1199432 + - name: Install docker excluder - dnf + package: + name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + state: "{{ r_openshift_excluder_docker_package_state }}" + when: + - r_openshift_excluder_enable_docker_excluder | bool + - ansible_pkg_mgr == "dnf" + + - name: Install openshift excluder - yum package: name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" state: "{{ r_openshift_excluder_package_state }}" when: - r_openshift_excluder_enable_openshift_excluder | bool + - ansible_pkg_mgr == "yum" + + # For DNF we do not need the "*" and if we add it, it causes an error because + # it's not a valid pkg_spec + # + # https://bugzilla.redhat.com/show_bug.cgi?id=1199432 + - name: Install openshift excluder - dnf + package: + name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + state: "{{ r_openshift_excluder_package_state }}" + when: + - r_openshift_excluder_enable_openshift_excluder | bool + - ansible_pkg_mgr == "dnf" - set_fact: r_openshift_excluder_install_ran: True diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 251d1dfb4..33028fea4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -55,9 +55,6 @@ def migrate_docker_facts(facts): """ Apply migrations for docker facts """ params = { 'common': ( - 'additional_registries', - 'insecure_registries', - 'blocked_registries', 'options' ), 'node': ( @@ -449,78 +446,6 @@ def normalize_provider_facts(provider, metadata): return facts -def set_flannel_facts_if_unset(facts): - """ Set flannel facts if not already present in facts dict - dict: the facts dict updated with the flannel facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the flannel - facts if they were not already present - - """ - if 'common' in facts: - if 'use_flannel' not in facts['common']: - use_flannel = False - facts['common']['use_flannel'] = use_flannel - return facts - - -def set_calico_facts_if_unset(facts): - """ Set calico facts if not already present in facts dict - dict: the facts dict updated with the calico facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the calico - facts if they were not already present - - """ - if 'common' in facts: - if 'use_calico' not in facts['common']: - use_calico = False - facts['common']['use_calico'] = use_calico - return facts - - -def set_nuage_facts_if_unset(facts): - """ Set nuage facts if not already present in facts dict - dict: the facts dict updated with the nuage facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the nuage - facts if they were not already present - - """ - if 'common' in facts: - if 'use_nuage' not in facts['common']: - use_nuage = False - facts['common']['use_nuage'] = use_nuage - return facts - - -def set_contiv_facts_if_unset(facts): - """ Set contiv facts if not already present in facts dict - dict: the facts dict updated with the contiv facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the contiv - facts if they were not already present - - """ - if 'common' in facts: - if 'use_contiv' not in facts['common']: - use_contiv = False - facts['common']['use_contiv'] = use_contiv - return facts - - def set_node_schedulability(facts): """ Set schedulable facts if not already present in facts dict Args: @@ -549,11 +474,7 @@ def set_selectors(facts): facts if they were not already present """ - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'online': - selector = "type=infra" - else: - selector = "region=infra" + selector = "region=infra" if 'hosted' not in facts: facts['hosted'] = {} @@ -569,14 +490,28 @@ def set_selectors(facts): facts['hosted']['metrics'] = {} if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: facts['hosted']['metrics']['selector'] = None - if 'logging' not in facts['hosted']: - facts['hosted']['logging'] = {} - if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']: - facts['hosted']['logging']['selector'] = None + if 'logging' not in facts: + facts['logging'] = {} + if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: + facts['logging']['selector'] = None if 'etcd' not in facts['hosted']: facts['hosted']['etcd'] = {} if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: facts['hosted']['etcd']['selector'] = None + if 'prometheus' not in facts: + facts['prometheus'] = {} + if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: + facts['prometheus']['selector'] = None + if 'alertmanager' not in facts['prometheus']: + facts['prometheus']['alertmanager'] = {} + # pylint: disable=line-too-long + if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: + facts['prometheus']['alertmanager']['selector'] = None + if 'alertbuffer' not in facts['prometheus']: + facts['prometheus']['alertbuffer'] = {} + # pylint: disable=line-too-long + if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: + facts['prometheus']['alertbuffer']['selector'] = None return facts @@ -590,13 +525,8 @@ def set_dnsmasq_facts_if_unset(facts): """ if 'common' in facts: - if 'use_dnsmasq' not in facts['common']: - facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2'])) if 'master' in facts and 'dns_port' not in facts['master']: - if safe_get_bool(facts['common']['use_dnsmasq']): - facts['master']['dns_port'] = 8053 - else: - facts['master']['dns_port'] = 53 + facts['master']['dns_port'] = 8053 return facts @@ -645,7 +575,7 @@ def set_identity_providers_if_unset(facts): name='allow_all', challenge=True, login=True, kind='AllowAllPasswordIdentityProvider' ) - if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': identity_provider = dict( name='deny_all', challenge=True, login=True, kind='DenyAllPasswordIdentityProvider' @@ -847,47 +777,28 @@ def set_deployment_facts_if_unset(facts): service_type = 'atomic-openshift' if deployment_type == 'origin': service_type = 'origin' - elif deployment_type in ['enterprise']: - service_type = 'openshift' facts['common']['service_type'] = service_type - if 'docker' in facts: - deployment_type = facts['common']['deployment_type'] - if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: - addtl_regs = facts['docker'].get('additional_registries', []) - ent_reg = 'registry.access.redhat.com' - if ent_reg not in addtl_regs: - facts['docker']['additional_registries'] = addtl_regs + [ent_reg] - for role in ('master', 'node'): if role in facts: deployment_type = facts['common']['deployment_type'] if 'registry_url' not in facts[role]: registry_url = 'openshift/origin-${component}:${version}' - if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': registry_url = 'openshift3/ose-${component}:${version}' - elif deployment_type == 'atomic-enterprise': - registry_url = 'aep3_beta/aep-${component}:${version}' facts[role]['registry_url'] = registry_url if 'master' in facts: deployment_type = facts['common']['deployment_type'] openshift_features = ['Builder', 'S2IBuilder', 'WebConsole'] - if 'disabled_features' in facts['master']: - if deployment_type == 'atomic-enterprise': - curr_disabled_features = set(facts['master']['disabled_features']) - facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features)) - else: + if 'disabled_features' not in facts['master']: if facts['common']['deployment_subtype'] == 'registry': facts['master']['disabled_features'] = openshift_features if 'node' in facts: deployment_type = facts['common']['deployment_type'] if 'storage_plugin_deps' not in facts['node']: - if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']: - facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] - else: - facts['node']['storage_plugin_deps'] = [] + facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] return facts @@ -968,27 +879,6 @@ def set_version_facts_if_unset(facts): return facts -def set_manageiq_facts_if_unset(facts): - """ Set manageiq facts. This currently includes common.use_manageiq. - - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with version facts. - Raises: - OpenShiftFactsInternalError: - """ - if 'common' not in facts: - if 'version_gte_3_1_or_1_1' not in facts['common']: - raise OpenShiftFactsInternalError( - "Invalid invocation: The required facts are not set" - ) - if 'use_manageiq' not in facts['common']: - facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1'] - - return facts - - def set_sdn_facts_if_unset(facts, system_facts): """ Set sdn facts if not already present in facts dict @@ -999,15 +889,6 @@ def set_sdn_facts_if_unset(facts, system_facts): dict: the facts dict updated with the generated sdn facts if they were not already present """ - # pylint: disable=too-many-branches - if 'common' in facts: - use_sdn = facts['common']['use_openshift_sdn'] - if not (use_sdn == '' or isinstance(use_sdn, bool)): - use_sdn = safe_get_bool(use_sdn) - facts['common']['use_openshift_sdn'] = use_sdn - if 'sdn_network_plugin_name' not in facts['common']: - plugin = 'redhat/openshift-ovs-subnet' if use_sdn else '' - facts['common']['sdn_network_plugin_name'] = plugin if 'master' in facts: # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length @@ -1709,11 +1590,13 @@ def set_builddefaults_facts(facts): builddefaults['git_no_proxy'] = builddefaults['no_proxy'] # If we're actually defining a builddefaults config then create admission_plugin_config # then merge builddefaults[config] structure into admission_plugin_config + + # 'config' is the 'openshift_builddefaults_json' inventory variable if 'config' in builddefaults: if 'admission_plugin_config' not in facts['master']: - facts['master']['admission_plugin_config'] = dict() + # Scaffold out the full expected datastructure + facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}} facts['master']['admission_plugin_config'].update(builddefaults['config']) - # if the user didn't actually provide proxy values, delete the proxy env variable defaults. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) return facts @@ -1776,7 +1659,7 @@ def set_container_facts_if_unset(facts): facts """ deployment_type = facts['common']['deployment_type'] - if deployment_type in ['enterprise', 'openshift-enterprise']: + if deployment_type == 'openshift-enterprise': master_image = 'openshift3/ose' cli_image = master_image node_image = 'openshift3/node' @@ -1786,16 +1669,6 @@ def set_container_facts_if_unset(facts): router_image = 'openshift3/ose-haproxy-router' registry_image = 'openshift3/ose-docker-registry' deployer_image = 'openshift3/ose-deployer' - elif deployment_type == 'atomic-enterprise': - master_image = 'aep3_beta/aep' - cli_image = master_image - node_image = 'aep3_beta/node' - ovs_image = 'aep3_beta/openvswitch' - etcd_image = 'registry.access.redhat.com/rhel7/etcd' - pod_image = 'aep3_beta/aep-pod' - router_image = 'aep3_beta/aep-haproxy-router' - registry_image = 'aep3_beta/aep-docker-registry' - deployer_image = 'aep3_beta/aep-deployer' else: master_image = 'openshift/origin' cli_image = master_image @@ -1810,7 +1683,9 @@ def set_container_facts_if_unset(facts): facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') # If openshift_docker_use_system_container is set and is True .... if 'use_system_container' in list(facts['docker'].keys()): - if facts['docker']['use_system_container']: + # use safe_get_bool as the inventory variable may not be a + # valid boolean on it's own. + if safe_get_bool(facts['docker']['use_system_container']): # ... set the service name to container-engine facts['docker']['service_name'] = 'container-engine' @@ -1909,14 +1784,17 @@ class OpenShiftFacts(object): """ known_roles = ['builddefaults', 'buildoverrides', - 'clock', 'cloudprovider', 'common', 'docker', 'etcd', 'hosted', 'master', - 'node'] + 'node', + 'logging', + 'loggingops', + 'metrics', + 'prometheus'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1997,10 +1875,6 @@ class OpenShiftFacts(object): facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_project_cfg_facts_if_unset(facts) - facts = set_flannel_facts_if_unset(facts) - facts = set_calico_facts_if_unset(facts) - facts = set_nuage_facts_if_unset(facts) - facts = set_contiv_facts_if_unset(facts) facts = set_node_schedulability(facts) facts = set_selectors(facts) facts = set_identity_providers_if_unset(facts) @@ -2012,7 +1886,6 @@ class OpenShiftFacts(object): facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) facts = set_dnsmasq_facts_if_unset(facts) - facts = set_manageiq_facts_if_unset(facts) facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) facts = set_proxy_facts(facts) @@ -2038,9 +1911,9 @@ class OpenShiftFacts(object): hostname_f = output.strip() if exit_code == 0 else '' hostname_values = [hostname_f, self.system_facts['ansible_nodename'], self.system_facts['ansible_fqdn']] - hostname = choose_hostname(hostname_values, ip_addr) + hostname = choose_hostname(hostname_values, ip_addr).lower() - defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr, + defaults['common'] = dict(ip=ip_addr, public_ip=ip_addr, deployment_type=deployment_type, deployment_subtype=deployment_subtype, @@ -2049,10 +1922,7 @@ class OpenShiftFacts(object): portal_net='172.30.0.0/16', client_binary='oc', admin_binary='oadm', dns_domain='cluster.local', - install_examples=True, - debug_level=2, - config_base='/etc/origin', - data_dir='/var/lib/origin') + config_base='/etc/origin') if 'master' in roles: defaults['master'] = dict(api_use_ssl=True, api_port='8443', @@ -2099,78 +1969,11 @@ class OpenShiftFacts(object): docker['service_name'] = 'docker' defaults['docker'] = docker - if 'clock' in roles: - exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405 - chrony_installed = bool(exit_code == 0) - defaults['clock'] = dict( - enabled=True, - chrony_installed=chrony_installed) - if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) if 'hosted' in roles or self.role == 'hosted': defaults['hosted'] = dict( - metrics=dict( - deploy=False, - duration=7, - resolution='10s', - storage=dict( - kind=None, - volume=dict( - name='metrics', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - loggingops=dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es-ops', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - logging=dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), etcd=dict( storage=dict( kind=None, @@ -2217,6 +2020,129 @@ class OpenShiftFacts(object): router=dict() ) + defaults['logging'] = dict( + storage=dict( + kind=None, + volume=dict( + name='logging-es', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['loggingops'] = dict( + storage=dict( + kind=None, + volume=dict( + name='logging-es-ops', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['metrics'] = dict( + deploy=False, + duration=7, + resolution='10s', + storage=dict( + kind=None, + volume=dict( + name='metrics', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus']['alertmanager'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus-alertmanager', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus']['alertbuffer'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus-alertbuffer', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + return defaults def guess_host_provider(self): @@ -2387,19 +2313,6 @@ class OpenShiftFacts(object): protected_facts_to_overwrite) if 'docker' in new_local_facts: - # remove duplicate and empty strings from registry lists, preserving order - for cat in ['additional', 'blocked', 'insecure']: - key = '{0}_registries'.format(cat) - if key in new_local_facts['docker']: - val = new_local_facts['docker'][key] - if isinstance(val, string_types): - val = [x.strip() for x in val.split(',')] - seen = set() - new_local_facts['docker'][key] = list() - for registry in val: - if registry not in seen and registry != '': - seen.add(registry) - new_local_facts['docker'][key].append(registry) # Convert legacy log_options comma sep string to a list if present: if 'log_options' in new_local_facts['docker'] and \ isinstance(new_local_facts['docker']['log_options'], string_types): diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml new file mode 100644 index 000000000..18fc453b2 --- /dev/null +++ b/roles/openshift_gcp/defaults/main.yml @@ -0,0 +1,58 @@ +--- +openshift_gcp_prefix: '' + +openshift_gcp_create_network: True +openshift_gcp_create_registry_bucket: True +openshift_gcp_kubernetes_cluster_status: owned # or shared +openshift_gcp_node_group_type: master + +openshift_gcp_ssh_private_key: '' + +openshift_gcp_project: '' +openshift_gcp_clusterid: default +openshift_gcp_region: us-central1 +openshift_gcp_zone: us-central1-a + +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" + +openshift_gcp_iam_service_account: '' +openshift_gcp_iam_service_account_keyfile: '' + +openshift_gcp_master_lb_timeout: 2m + +openshift_gcp_infra_network_instance_group: ig-i + +openshift_gcp_image: 'rhel-7' +openshift_gcp_base_image: rhel-7 + +openshift_gcp_registry_bucket_keyfile: '' +openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry" + +openshift_gcp_node_group_config: + - name: master + suffix: m + tags: ocp-master + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: infra + suffix: i + tags: ocp-infra-node ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: node + suffix: n + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 3 + - name: node-flex + suffix: nf + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 0 + +openshift_gcp_startup_script_file: '' +openshift_gcp_user_data_file: '' diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml new file mode 100644 index 000000000..ad205ba33 --- /dev/null +++ b/roles/openshift_gcp/tasks/main.yaml @@ -0,0 +1,43 @@ +# +# This role relies on gcloud invoked via templated bash in order to +# provide a high performance deployment option. The next logical step +# is to transition to a deployment manager template which is then instantiated. +# TODO: use a formal set of role parameters consistent with openshift_aws +# +--- +- name: Templatize DNS script + template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx +- name: Templatize provision script + template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx +- name: Templatize de-provision script + template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx + when: + - state | default('present') == 'absent' + +- name: Provision GCP DNS domain + command: /tmp/openshift_gcp_provision_dns.sh + args: + chdir: "{{ playbook_dir }}/files" + register: dns_provision + when: + - state | default('present') == 'present' + +- name: Ensure that DNS resolves to the hosted zone + assert: + that: + - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout" + msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'." + when: + - state | default('present') == 'present' + +- name: Provision GCP resources + command: /tmp/openshift_gcp_provision.sh + args: + chdir: "{{ playbook_dir }}/files" + when: + - state | default('present') == 'present' + +- name: De-provision GCP resources + command: /tmp/openshift_gcp_provision_remove.sh + when: + - state | default('present') == 'absent' diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh new file mode 100644 index 000000000..a7475aaf5 --- /dev/null +++ b/roles/openshift_gcp/templates/dns.j2.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -euo pipefail + +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" + +# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null +fi + +# Always output the expected nameservers as a comma delimited list +gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh new file mode 100644 index 000000000..4d150bc74 --- /dev/null +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -0,0 +1,325 @@ +#!/bin/bash + +set -euo pipefail + +if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then + # Create SSH key for GCE + if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then + ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N '' + ssh-add "{{ openshift_gcp_ssh_private_key }}" || true + fi + + # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + key_tmp_file='/tmp/ocp-gce-keys' + if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then + if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then + gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + fi + echo -n 'cloud-user:' >> "$key_tmp_file" + cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" + rm -f "$key_tmp_file" + fi +fi + +metadata="" +if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then + echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)" + exit 1 + fi + metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}" +fi +if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then + echo "User data file missing at {{ openshift_gcp_user_data_file }}" + exit 1 + fi + if [[ -n "${metadata}" ]]; then + metadata+="," + else + metadata="--metadata-from-file=" + fi + metadata+="user-data={{ openshift_gcp_user_data_file }}" +fi + +# Select image or image family +image="{{ openshift_gcp_image }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then + echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'" + exit 1 + fi + image="family/${image}" +fi + +### PROVISION THE INFRASTRUCTURE ### + +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" + +# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script" + exit 1 +fi + +# Create network +if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto" +else + echo "Network '{{ openshift_gcp_network_name }}' already exists" +fi + +# Firewall rules in a form: +# ['name']='parameters for "gcloud compute firewall-rules create"' +# For all possible parameters see: gcloud compute firewall-rules create --help +range="" +if [[ -n "{{ openshift_node_port_range }}" ]]; then + range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}" +fi +declare -A FW_RULES=( + ['icmp']='--allow icmp' + ['ssh-external']='--allow tcp:22' + ['ssh-internal']='--allow tcp:22 --source-tags bastion' + ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master" + ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master" + ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node" + ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node" + ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node" +) +for rule in "${!FW_RULES[@]}"; do + ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]} + else + echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists" + fi ) & +done + + +# Master IP +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global +else + echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists" +fi ) & + +# Internal master IP +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" +else + echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists" +fi ) & + +# Router IP +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" +else + echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists" +fi ) & + + +{% for node_group in openshift_gcp_node_group_config %} +# configure {{ node_group.name }} +( + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \ + --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \ + --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ 'ocp-bootstrap,' if (node_group.bootstrap | default(False)) else '' }}{{ node_group.tags }}" \ + --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \ + --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \ + --image "{{ node_group.image | default('${image}') }}" ${metadata} \ + --metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}" + else + echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists" + fi + + # Create instance group + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \ + --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" + else + echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists" + fi +) & +{% endfor %} + +for i in `jobs -p`; do wait $i; done + + +# Configure the master external LB rules +( +# Master health check +if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" +else + echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists" +fi + +gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \ + --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}" + +# Master backend service +if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}" + gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}" +else + echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists" +fi + +# Master tcp proxy target +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend" +else + echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists" +fi + +# Master forwarding rule +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target" +else + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists" +fi +) & + + +# Configure the master internal LB rules +( +# Internal master health check +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" +else + echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists" +fi + +# Internal master target pool +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}" +else + echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists" +fi + +# Internal master forwarding rule +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool" +else + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists" +fi +) & + + +# Configure the infra node rules +( +# Router health check +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" +else + echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists" +fi + +# Router target pool +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}" +else + echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists" +fi + +# Router forwarding rule +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool" +else + echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists" +fi +) & + +for i in `jobs -p`; do wait $i; done + +# set the target pools +( +if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" +else + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" +fi +) & + +# configure DNS +( +# Retry DNS changes until they succeed since this may be a shared resource +while true; do + dns="${TMPDIR:-/tmp}/dns.yaml" + rm -f $dns + + # DNS record for master lb + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') + if [[ ! -f $dns ]]; then + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + fi + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" + else + echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists" + fi + + # DNS record for internal master lb + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + if [[ ! -f $dns ]]; then + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + fi + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" + else + echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists" + fi + + # DNS record for router lb + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + if [[ ! -f $dns ]]; then + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + fi + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." + else + echo "DNS record for '{{ wildcard_zone }}' already exists" + fi + + # Commit all DNS changes, retrying if preconditions are not met + if [[ -f $dns ]]; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + rc=$? + if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then + continue + fi + exit $rc + fi + fi + break +done +) & + +# Create bucket for registry +( +if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}" +else + echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists" +fi +) & + +# wait until all node groups are stable +{% for node_group in openshift_gcp_node_group_config %} +{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %} +# wait for stable {{ node_group.name }} +( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) & +{% else %} +# not waiting for {{ node_group.name }} due to bootstrapping +{% endif %} +{% endfor %} + + +for i in `jobs -p`; do wait $i; done diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh new file mode 100644 index 000000000..c9213b800 --- /dev/null +++ b/roles/openshift_gcp/templates/remove.j2.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +set -euo pipefail + +function teardown_cmd() { + a=( $@ ) + local name=$1 + a=( "${a[@]:1}" ) + local flag=0 + local found= + for i in ${a[@]}; do + if [[ "$i" == "--"* ]]; then + found=true + break + fi + flag=$((flag+1)) + done + if [[ -z "${found}" ]]; then + flag=$((flag+1)) + fi + if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} + fi +} + +function teardown() { + for i in `seq 1 20`; do + if teardown_cmd $@; then + break + fi + sleep 0.5 + done +} + +# Preemptively spin down the instances +{% for node_group in openshift_gcp_node_group_config %} +# scale down {{ node_group.name }} +( + # performs a delete and scale down as one operation to ensure maximum parallelism + if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' 2>/dev/null ); then + exit 0 + fi + instances="${instances%?}" + if [[ -z "${instances}" ]]; then + echo "warning: No instances in {{ node_group.name }}" 1>&2 + exit 0 + fi + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then + echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2 + exit 0 + fi +) & +{% endfor %} + +# Bucket for registry +( +if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}" +fi +) & + +# Project metadata prefixed with {{ openshift_gcp_prefix }} +( + for key in $( gcloud --project "{{ openshift_gcp_project }}" compute project-info describe --flatten=commonInstanceMetadata.items[] '--format=value(commonInstanceMetadata.items.key)' ); do + if [[ "${key}" == "{{ openshift_gcp_prefix }}"* ]]; then + gcloud --project "{{ openshift_gcp_project }}" compute project-info remove-metadata "--keys=${key}" + fi + done +) & + +# Instances and disks used for image building +( + teardown "{{ openshift_gcp_prefix }}build-image-instance" compute instances --zone "{{ openshift_gcp_zone }}" + teardown "{{ openshift_gcp_prefix }}build-image-instance" compute disks --zone "{{ openshift_gcp_zone }}" +) & + +# DNS +( +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" +if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + # Retry DNS changes until they succeed since this may be a shared resource + while true; do + dns="${TMPDIR:-/tmp}/dns.yaml" + rm -f "${dns}" + + # export all dns records that match into a zone format, and turn each line into a set of args for + # record-sets transaction. + gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}" + if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \ + awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input" + then + rm -f "${dns}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" + + # Commit all DNS changes, retrying if preconditions are not met + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + rc=$? + if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then + continue + fi + exit $rc + fi + fi + rm "${dns}.input" + break + done +fi +) & + +( +# Router network rules +teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" + +# Internal master network rules +teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" +) & + +( +# Master SSL network rules +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks +) & + +#Firewall rules +#['name']='parameters for "gcloud compute firewall-rules create"' +#For all possible parameters see: gcloud compute firewall-rules create --help +declare -A FW_RULES=( + ['icmp']="" + ['ssh-external']="" + ['ssh-internal']="" + ['master-internal']="" + ['master-external']="" + ['node-internal']="" + ['infra-node-internal']="" + ['infra-node-external']="" +) +for rule in "${!FW_RULES[@]}"; do + ( if gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then + # retry a few times because this call can be flaky + for i in `seq 1 3`; do + if gcloud -q --project "{{ openshift_gcp_project }}" compute firewall-rules delete "{{ openshift_gcp_prefix }}$rule"; then + break + fi + done + fi ) & +done + +for i in `jobs -p`; do wait $i; done + +{% for node_group in openshift_gcp_node_group_config %} +# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed +( + teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}" + teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates +) & +{% endfor %} + +for i in `jobs -p`; do wait $i; done + +# Images specifically located under this cluster prefix family +for name in $( gcloud --project "{{ openshift_gcp_project }}" compute images list "--filter=family={{ openshift_gcp_prefix }}images" '--format=value(name)' ); do + ( gcloud --project "{{ openshift_gcp_project }}" compute images delete "${name}" ) & +done + +# Network +( teardown "{{ openshift_gcp_network_name }}" compute networks ) & + +for i in `jobs -p`; do wait $i; done
\ No newline at end of file diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf new file mode 100644 index 000000000..b87e5e0b6 --- /dev/null +++ b/roles/openshift_gcp_image_prep/files/partition.conf @@ -0,0 +1,3 @@ +[Service] +ExecStartPost=-/usr/bin/growpart /dev/sda 1 +ExecStartPost=-/sbin/xfs_growfs / diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml new file mode 100644 index 000000000..fee5ab618 --- /dev/null +++ b/roles/openshift_gcp_image_prep/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +# GCE instances are starting with xfs AND barrier=1, which is only for extfs. +- name: Remove barrier=1 from XFS fstab entries + lineinfile: + path: /etc/fstab + regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$' + line: '\1xfs\2 \4' + backrefs: yes + +- name: Ensure the root filesystem has XFS group quota turned on + lineinfile: + path: /boot/grub2/grub.cfg + regexp: '^(.*)linux16 (.*)$' + line: '\1linux16 \2 rootflags=gquota' + backrefs: yes + +- name: Ensure the root partition grows on startup + copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/ diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 8d35db6b5..3ee3b132c 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters. """ import sys import os +import base64 import traceback +import errno +import json from collections import defaultdict from ansible.plugins.action import ActionBase @@ -38,8 +41,13 @@ class ActionModule(ActionBase): # storing the information we need in the result. result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context') + # if the user wants to write check results to files, they provide this directory: + output_dir = task_vars.get("openshift_checks_output_dir") + if output_dir: + output_dir = os.path.join(output_dir, task_vars["ansible_host"]) + try: - known_checks = self.load_known_checks(tmp, task_vars) + known_checks = self.load_known_checks(tmp, task_vars, output_dir) args = self._task.args requested_checks = normalize(args.get('checks', [])) @@ -65,21 +73,20 @@ class ActionModule(ActionBase): for name in resolved_checks: display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"])) - check = known_checks[name] - check_results[name] = run_check(name, check, user_disabled_checks) - if check.changed: - check_results[name]["changed"] = True + check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir) result["changed"] = any(r.get("changed") for r in check_results.values()) if any(r.get("failed") for r in check_results.values()): result["failed"] = True result["msg"] = "One or more checks failed" + write_result_to_output_dir(output_dir, result) return result - def load_known_checks(self, tmp, task_vars): + def load_known_checks(self, tmp, task_vars, output_dir=None): """Find all existing checks and return a mapping of names to instances.""" load_checks() + want_full_results = bool(output_dir) known_checks = {} for cls in OpenShiftCheck.subclasses(): @@ -90,7 +97,13 @@ class ActionModule(ActionBase): "duplicate check name '{}' in: '{}' and '{}'" "".format(name, full_class_name(cls), full_class_name(other_cls)) ) - known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars) + known_checks[name] = cls( + execute_module=self._execute_module, + tmp=tmp, + task_vars=task_vars, + want_full_results=want_full_results, + templar=self._templar + ) return known_checks @@ -185,9 +198,11 @@ def normalize(checks): return [name.strip() for name in checks if name.strip()] -def run_check(name, check, user_disabled_checks): +def run_check(name, check, user_disabled_checks, output_dir=None): """Run a single check if enabled and return a result dict.""" - if name in user_disabled_checks: + + # determine if we're going to run the check (not inactive or disabled) + if name in user_disabled_checks or '*' in user_disabled_checks: return dict(skipped=True, skipped_reason="Disabled by user request") # pylint: disable=broad-except; capturing exceptions broadly is intentional, @@ -201,12 +216,134 @@ def run_check(name, check, user_disabled_checks): if not is_active: return dict(skipped=True, skipped_reason="Not active for this host") + # run the check + result = {} try: - return check.run() + result = check.run() except OpenShiftCheckException as exc: - return dict(failed=True, msg=str(exc)) + check.register_failure(exc) + except Exception as exc: + check.register_failure("\n".join([str(exc), traceback.format_exc()])) + + # process the check state; compose the result hash, write files as needed + if check.changed: + result["changed"] = True + if check.failures or result.get("failed"): + if "msg" in result: # failure result has msg; combine with any registered failures + check.register_failure(result.get("msg")) + result["failures"] = [(fail.name, str(fail)) for fail in check.failures] + result["failed"] = True + result["msg"] = "\n".join(str(fail) for fail in check.failures) + write_to_output_file(output_dir, name + ".failures.json", result["failures"]) + if check.logs: + write_to_output_file(output_dir, name + ".log.json", check.logs) + if check.files_to_save: + write_files_to_save(output_dir, check) + + return result + + +def prepare_output_dir(dirname): + """Create the directory, including parents. Return bool for success/failure.""" + try: + os.makedirs(dirname) + return True + except OSError as exc: + # trying to create existing dir leads to error; + # that error is fine, but for any other, assume the dir is not there + return exc.errno == errno.EEXIST + + +def copy_remote_file_to_dir(check, file_to_save, output_dir, fname): + """Copy file from remote host to local file in output_dir, if given.""" + if not output_dir or not prepare_output_dir(output_dir): + return + local_file = os.path.join(output_dir, fname) + + # pylint: disable=broad-except; do not need to do anything about failure to write dir/file + # and do not want exceptions to break anything. + try: + # NOTE: it would have been nice to copy the file directly without loading it into + # memory, but there does not seem to be a good way to do this via ansible. + result = check.execute_module("slurp", dict(src=file_to_save), register=False) + if result.get("failed"): + display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg"))) + return + + content = result["content"] + if result.get("encoding") == "base64": + content = base64.b64decode(content) + with open(local_file, "wb") as outfile: + outfile.write(content) + except Exception as exc: + display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc)) + return + + +def _no_fail(obj): + # pylint: disable=broad-except; do not want serialization to fail for any reason + try: + return str(obj) + except Exception: + return "[not serializable]" + + +def write_to_output_file(output_dir, filename, data): + """If output_dir provided, write data to file. Serialize as JSON if data is not a string.""" + + if not output_dir or not prepare_output_dir(output_dir): + return + filename = os.path.join(output_dir, filename) + try: + with open(filename, 'w') as outfile: + if isinstance(data, string_types): + outfile.write(data) + else: + json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail) + # pylint: disable=broad-except; do not want serialization/write to break for any reason + except Exception as exc: + display.warning("Could not write output file {}: {}".format(filename, exc)) + + +def write_result_to_output_dir(output_dir, result): + """If output_dir provided, write the result as json to result.json. + + Success/failure of the write is recorded as "output_files" in the result hash afterward. + Otherwise this is much like write_to_output_file. + """ + + if not output_dir: + return + if not prepare_output_dir(output_dir): + result["output_files"] = "Error creating output directory " + output_dir + return + + filename = os.path.join(output_dir, "result.json") + try: + with open(filename, 'w') as outfile: + json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail) + result["output_files"] = "Check results for this host written to " + filename + # pylint: disable=broad-except; do not want serialization/write to break for any reason except Exception as exc: - return dict(failed=True, msg=str(exc), exception=traceback.format_exc()) + result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc) + + +def write_files_to_save(output_dir, check): + """Write files to check subdir in output dir.""" + if not output_dir: + return + output_dir = os.path.join(output_dir, check.name) + seen_file = defaultdict(lambda: 0) + for file_to_save in check.files_to_save: + fname = file_to_save.filename + while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed + seen_file[fname] += 1 + fname = "{}.{}".format(fname, seen_file[fname]) + seen_file[fname] += 1 + if file_to_save.remote_filename: + copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname) + else: + write_to_output_file(output_dir, fname, file_to_save.contents) def full_class_name(cls): diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 349655966..dcaf87eca 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -10,6 +10,7 @@ import traceback from ansible.plugins.callback import CallbackBase from ansible import constants as C from ansible.utils.color import stringc +from ansible.module_utils.six import string_types FAILED_NO_MSG = u'Failed without returning a message.' @@ -140,11 +141,19 @@ def deduplicate_failures(failures): Returns a new list of failures such that identical failures from different hosts are grouped together in a single entry. The relative order of failures is preserved. + + If failures is unhashable, the original list of failures is returned. """ groups = defaultdict(list) for failure in failures: group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) - groups[group_key].append(failure) + try: + groups[group_key].append(failure) + except TypeError: + # abort and return original list of failures when failures has an + # unhashable type. + return failures + result = [] for failure in failures: group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) @@ -159,7 +168,10 @@ def format_failure(failure): """Return a list of pretty-formatted text entries describing a failure, including relevant information about it. Expect that the list of text entries will be joined by a newline separator when output to the user.""" - host = u', '.join(failure['host']) + if isinstance(failure['host'], string_types): + host = failure['host'] + else: + host = u', '.join(failure['host']) play = failure['play'] task = failure['task'] msg = failure['msg'] diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py index c8769b511..db3c0b654 100644 --- a/roles/openshift_health_checker/library/aos_version.py +++ b/roles/openshift_health_checker/library/aos_version.py @@ -26,15 +26,13 @@ from ansible.module_utils.six import string_types YUM_IMPORT_EXCEPTION = None DNF_IMPORT_EXCEPTION = None -PKG_MGR = None try: import yum # pylint: disable=import-error - PKG_MGR = "yum" except ImportError as err: YUM_IMPORT_EXCEPTION = err + try: import dnf # pylint: disable=import-error - PKG_MGR = "dnf" except ImportError as err: DNF_IMPORT_EXCEPTION = err @@ -51,14 +49,19 @@ def main(): module = AnsibleModule( argument_spec=dict( package_list=dict(type="list", required=True), + package_mgr=dict(type="str", required=True), ), supports_check_mode=True ) - if YUM_IMPORT_EXCEPTION and DNF_IMPORT_EXCEPTION: + # determine the package manager to use + package_mgr = module.params['package_mgr'] + if package_mgr not in ('yum', 'dnf'): + module.fail_json(msg="package_mgr must be one of: yum, dnf") + pkg_mgr_exception = dict(yum=YUM_IMPORT_EXCEPTION, dnf=DNF_IMPORT_EXCEPTION)[package_mgr] + if pkg_mgr_exception: module.fail_json( - msg="aos_version module could not import yum or dnf: %s %s" % - (YUM_IMPORT_EXCEPTION, DNF_IMPORT_EXCEPTION) + msg="aos_version module could not import {}: {}".format(package_mgr, pkg_mgr_exception) ) # determine the packages we will look for @@ -78,7 +81,7 @@ def main(): # get the list of packages available and complain if anything is wrong try: - pkgs = _retrieve_available_packages(expected_pkg_names) + pkgs = _retrieve_available_packages(package_mgr, expected_pkg_names) if versioned_pkgs: _check_precise_version_found(pkgs, _to_dict(versioned_pkgs)) _check_higher_version_found(pkgs, _to_dict(versioned_pkgs)) @@ -93,7 +96,7 @@ def _to_dict(pkg_list): return {pkg["name"]: pkg for pkg in pkg_list} -def _retrieve_available_packages(expected_pkgs): +def _retrieve_available_packages(pkg_mgr, expected_pkgs): # The openshift excluder prevents unintended updates to openshift # packages by setting yum excludes on those packages. See: # https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates @@ -103,14 +106,15 @@ def _retrieve_available_packages(expected_pkgs): # be excluded. So, for our purposes here, disable excludes to see # what will really be available during an install or upgrade. - if PKG_MGR == "yum": + if pkg_mgr == "yum": # search for package versions available for openshift pkgs yb = yum.YumBase() # pylint: disable=invalid-name yb.conf.disable_excludes = ['all'] try: - pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs) + pkgs = yb.rpmdb.returnPackages(patterns=expected_pkgs) + pkgs += yb.pkgSack.returnPackages(patterns=expected_pkgs) except yum.Errors.PackageSackError as excinfo: # you only hit this if *none* of the packages are available raise AosVersionException('\n'.join([ @@ -118,7 +122,7 @@ def _retrieve_available_packages(expected_pkgs): 'Check your subscription and repo settings.', str(excinfo), ])) - elif PKG_MGR == "dnf": + elif pkg_mgr == "dnf": dbase = dnf.Base() # pyling: disable=invalid-name dbase.conf.disable_excludes = ['all'] @@ -127,8 +131,11 @@ def _retrieve_available_packages(expected_pkgs): dquery = dbase.sack.query() aquery = dquery.available() + iquery = dquery.installed() - pkgs = list(aquery.filter(name=expected_pkgs)) + available_pkgs = list(aquery.filter(name=expected_pkgs)) + installed_pkgs = list(iquery.filter(name=expected_pkgs)) + pkgs = available_pkgs + installed_pkgs if not pkgs: # pkgs list is empty, raise because no expected packages found diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py index 2e60735d6..c72f4c5b3 100644 --- a/roles/openshift_health_checker/library/ocutil.py +++ b/roles/openshift_health_checker/library/ocutil.py @@ -40,18 +40,17 @@ def main(): module = AnsibleModule( argument_spec=dict( - namespace=dict(type="str", required=True), + namespace=dict(type="str", required=False), config_file=dict(type="str", required=True), cmd=dict(type="str", required=True), extra_args=dict(type="list", default=[]), ), ) - cmd = [ - locate_oc_binary(), - '--config', module.params["config_file"], - '-n', module.params["namespace"], - ] + shlex.split(module.params["cmd"]) + cmd = [locate_oc_binary(), '--config', module.params["config_file"]] + if module.params["namespace"]: + cmd += ['-n', module.params["namespace"]] + cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"] failed = True try: diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 07ec6f7ef..b7b16e0ea 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -2,14 +2,18 @@ Health checks for OpenShift clusters. """ +import json import operator import os +import time +import collections from abc import ABCMeta, abstractmethod, abstractproperty from importlib import import_module from ansible.module_utils import six from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin +from ansible.module_utils.six import string_types from ansible.plugins.filter.core import to_bool as ansible_to_bool @@ -27,7 +31,7 @@ class OpenShiftCheckException(Exception): class OpenShiftCheckExceptionList(OpenShiftCheckException): - """A container for multiple logging errors that may be detected in one check.""" + """A container for multiple errors that may be detected in one check.""" def __init__(self, errors): self.errors = errors super(OpenShiftCheckExceptionList, self).__init__( @@ -40,26 +44,56 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException): return self.errors[index] +FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename") + + +# pylint: disable=too-many-instance-attributes; all represent significantly different state. +# Arguably they could be separated into two hashes, one for storing parameters, and one for +# storing result state; but that smells more like clutter than clarity. @six.add_metaclass(ABCMeta) class OpenShiftCheck(object): - """ - A base class for defining checks for an OpenShift cluster environment. + """A base class for defining checks for an OpenShift cluster environment. - Expect optional params: method execute_module, dict task_vars, and string tmp. + Optional init params: method execute_module, dict task_vars, and string tmp execute_module is expected to have a signature compatible with _execute_module from ansible plugins/action/__init__.py, e.g.: def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args): This is stored so that it can be invoked in subclasses via check.execute_module("name", args) which provides the check's stored task_vars and tmp. - """ - def __init__(self, execute_module=None, task_vars=None, tmp=None): + Optional init param: want_full_results + If the check can gather logs, tarballs, etc., do so when True; but no need to spend + the time if they're not wanted (won't be written to output directory). + """ + # pylint: disable=too-many-arguments + def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False, + templar=None): + # store a method for executing ansible modules from the check self._execute_module = execute_module + # the task variables and tmpdir passed into the health checker task self.task_vars = task_vars or {} + # We may need to template some task_vars + self._templar = templar self.tmp = tmp + # a boolean for disabling the gathering of results (files, computations) that won't + # actually be recorded/used + self.want_full_results = want_full_results + + # mainly for testing purposes; see execute_module_with_retries + self._module_retries = 3 + self._module_retry_interval = 5 # seconds + # state to be recorded for inspection after the check runs: + # # set to True when the check changes the host, for accurate total "changed" count self.changed = False + # list of OpenShiftCheckException for check to report (alternative to returning a failed result) + self.failures = [] + # list of FileToSave - files the check specifies to be written locally if so configured + self.files_to_save = [] + # log messages for the check - tuples of (description, msg) where msg is serializable. + # These are intended to be a sequential record of what the check observed and determined. + self.logs = [] @abstractproperty def name(self): @@ -80,9 +114,20 @@ class OpenShiftCheck(object): """Returns true if this check applies to the ansible-playbook run.""" return True + def is_first_master(self): + """Determine if running on first master. Returns: bool""" + masters = self.get_var("groups", "oo_first_master", default=None) or [None] + return masters[0] == self.get_var("ansible_host") + @abstractmethod def run(self): - """Executes a check, normally implemented as a module.""" + """Executes a check against a host and returns a result hash similar to Ansible modules. + + Actually the direction ahead is to record state in the attributes and + not bother building a result hash. Instead, return an empty hash and let + the action plugin fill it in. Or raise an OpenShiftCheckException. + Returning a hash may become deprecated if it does not prove necessary. + """ return {} @classmethod @@ -94,7 +139,43 @@ class OpenShiftCheck(object): for subclass in subclass.subclasses(): yield subclass - def execute_module(self, module_name=None, module_args=None): + def register_failure(self, error): + """Record in the check that a failure occurred. + + Recorded failures are merged into the result hash for now. They are also saved to output directory + (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json. + """ + # It should be an exception; make it one if not + if not isinstance(error, OpenShiftCheckException): + error = OpenShiftCheckException(str(error)) + self.failures.append(error) + # duplicate it in the logs so it can be seen in the context of any + # information that led to the failure + self.register_log("failure: " + error.name, str(error)) + + def register_log(self, context, msg): + """Record an entry for the check log. + + Notes are intended to serve as context of the whole sequence of what the check observed. + They are be saved as an ordered list in a local check log file. + They are not to included in the result or in the ansible log; it's just for the record. + """ + self.logs.append([context, msg]) + + def register_file(self, filename, contents=None, remote_filename=""): + """Record a file that a check makes available to be saved individually to output directory. + + Either file contents should be passed in, or a file to be copied from the remote host + should be specified. Contents that are not a string are to be serialized as JSON. + + NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning + you should avoid using this on huge files (more than say 10M). + """ + if contents is None and not remote_filename: + raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.") + self.files_to_save.append(FileToSave(filename, contents, remote_filename)) + + def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True): """Invoke an Ansible module from a check. Invoke stored _execute_module, normally copied from the action @@ -106,6 +187,12 @@ class OpenShiftCheck(object): Ansible version). So e.g. check.execute_module("foo", dict(arg1=...)) + + save_as_name specifies a file name for saving the result to an output directory, + if needed, and is intended to uniquely identify the result of invoking execute_module. + If not provided, the module name will be used. + If register is set False, then the result won't be registered in logs or files to save. + Return: result hash from module execution. """ if self._execute_module is None: @@ -113,7 +200,33 @@ class OpenShiftCheck(object): self.__class__.__name__ + " invoked execute_module without providing the method at initialization." ) - return self._execute_module(module_name, module_args, self.tmp, self.task_vars) + result = self._execute_module(module_name, module_args, self.tmp, self.task_vars) + if result.get("changed"): + self.changed = True + for output in ["result", "stdout"]: + # output is often JSON; attempt to decode + try: + result[output + "_json"] = json.loads(result[output]) + except (KeyError, ValueError): + pass + + if register: + self.register_log("execute_module: " + module_name, result) + self.register_file(save_as_name or module_name + ".json", result) + return result + + def execute_module_with_retries(self, module_name, module_args): + """Run execute_module and retry on failure.""" + result = {} + tries = 0 + while True: + res = self.execute_module(module_name, module_args) + if tries > self._module_retries or not res.get("failed"): + result.update(res) + return result + result["last_failed"] = res + tries += 1 + time.sleep(self._module_retry_interval) def get_var(self, *keys, **kwargs): """Get deeply nested values from task_vars. @@ -171,8 +284,23 @@ class OpenShiftCheck(object): 'There is a bug in this check. While trying to convert variable \n' ' "{var}={value}"\n' 'the given converter cannot be used or failed unexpectedly:\n' - '{error}'.format(var=".".join(keys), value=value, error=error) - ) + '{type}: {error}'.format( + var=".".join(keys), + value=value, + type=error.__class__.__name__, + error=error + )) + + @staticmethod + def normalize(name_list): + """Return a clean list of names. + + The input may be a comma-separated string or a sequence. Leading and + trailing whitespace characters are removed. Empty items are discarded. + """ + if isinstance(name_list, string_types): + name_list = name_list.split(',') + return [name.strip() for name in name_list if name.strip()] @staticmethod def get_major_minor_version(openshift_image_tag): @@ -214,7 +342,9 @@ class OpenShiftCheck(object): mount_point = os.path.dirname(mount_point) try: - return mount_for_path[mount_point] + mount = mount_for_path[mount_point] + self.register_log("mount point for " + path, mount) + return mount except KeyError: known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) raise OpenShiftCheckException( diff --git a/roles/openshift_health_checker/openshift_checks/diagnostics.py b/roles/openshift_health_checker/openshift_checks/diagnostics.py new file mode 100644 index 000000000..1cfdc1129 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/diagnostics.py @@ -0,0 +1,62 @@ +""" +A check to run relevant diagnostics via `oc adm diagnostics`. +""" + +import os + +from openshift_checks import OpenShiftCheck, OpenShiftCheckException + + +DIAGNOSTIC_LIST = ( + "AggregatedLogging ClusterRegistry ClusterRoleBindings ClusterRoles " + "ClusterRouter DiagnosticPod NetworkCheck" +).split() + + +class DiagnosticCheck(OpenShiftCheck): + """A check to run relevant diagnostics via `oc adm diagnostics`.""" + + name = "diagnostics" + tags = ["health"] + + def is_active(self): + return super(DiagnosticCheck, self).is_active() and self.is_first_master() + + def run(self): + if self.exec_diagnostic("ConfigContexts"): + # only run the other diagnostics if that one succeeds (otherwise, all will fail) + diagnostics = self.get_var("openshift_check_diagnostics", default=DIAGNOSTIC_LIST) + for diagnostic in self.normalize(diagnostics): + self.exec_diagnostic(diagnostic) + return {} + + def exec_diagnostic(self, diagnostic): + """ + Execute an 'oc adm diagnostics' command on the remote host. + Raises OcNotFound or registers OcDiagFailed. + Returns True on success or False on failure (non-zero rc). + """ + config_base = self.get_var("openshift.common.config_base") + args = { + "config_file": os.path.join(config_base, "master", "admin.kubeconfig"), + "cmd": "adm diagnostics", + "extra_args": [diagnostic], + } + + result = self.execute_module("ocutil", args, save_as_name=diagnostic + ".failure.json") + self.register_file(diagnostic + ".txt", result['result']) + if result.get("failed"): + if result['result'] == '[Errno 2] No such file or directory': + raise OpenShiftCheckException( + "OcNotFound", + "This host is supposed to be a master but does not have the `oc` command where expected.\n" + "Has an installation been run on this host yet?" + ) + + self.register_failure(OpenShiftCheckException( + 'OcDiagFailed', + 'The {diag} diagnostic reported an error:\n' + '{error}'.format(diag=diagnostic, error=result['result']) + )) + return False + return True diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 6d1dea9ce..87e6146d4 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -1,6 +1,7 @@ """Check that there is enough disk space in predefined paths.""" import tempfile +import os.path from openshift_checks import OpenShiftCheck, OpenShiftCheckException @@ -15,31 +16,31 @@ class DiskAvailability(OpenShiftCheck): # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements recommended_disk_space_bytes = { '/var': { - 'masters': 40 * 10**9, - 'nodes': 15 * 10**9, - 'etcd': 20 * 10**9, + 'oo_masters_to_config': 40 * 10**9, + 'oo_nodes_to_config': 15 * 10**9, + 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, # see roles/openshift_cli/library/openshift_container_binary_sync.py. '/usr/local/bin': { - 'masters': 1 * 10**9, - 'nodes': 1 * 10**9, - 'etcd': 1 * 10**9, + 'oo_masters_to_config': 1 * 10**9, + 'oo_nodes_to_config': 1 * 10**9, + 'oo_etcd_to_config': 1 * 10**9, }, # Used as temporary storage in several cases. tempfile.gettempdir(): { - 'masters': 1 * 10**9, - 'nodes': 1 * 10**9, - 'etcd': 1 * 10**9, + 'oo_masters_to_config': 1 * 10**9, + 'oo_nodes_to_config': 1 * 10**9, + 'oo_etcd_to_config': 1 * 10**9, }, } # recommended disk space for each location under an upgrade context recommended_disk_upgrade_bytes = { '/var': { - 'masters': 10 * 10**9, - 'nodes': 5 * 10 ** 9, - 'etcd': 5 * 10 ** 9, + 'oo_masters_to_config': 10 * 10**9, + 'oo_nodes_to_config': 5 * 10 ** 9, + 'oo_etcd_to_config': 5 * 10 ** 9, }, } @@ -61,15 +62,19 @@ class DiskAvailability(OpenShiftCheck): number = float(user_config) user_config = { '/var': { - 'masters': number, - 'nodes': number, - 'etcd': number, + 'oo_masters_to_config': number, + 'oo_nodes_to_config': number, + 'oo_etcd_to_config': number, }, } except TypeError: # If it is not a number, then it should be a nested dict. pass + self.register_log("recommended thresholds", self.recommended_disk_space_bytes) + if user_config: + self.register_log("user-configured thresholds", user_config) + # TODO: as suggested in # https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021, # maybe we could support checking disk availability in paths that are @@ -113,21 +118,25 @@ class DiskAvailability(OpenShiftCheck): 'in your Ansible inventory, and lower the recommended disk space availability\n' 'if necessary for this upgrade.').format(config_bytes) - return { - 'failed': True, - 'msg': ( - 'Available disk space in "{}" ({:.1f} GB) ' - 'is below minimum recommended ({:.1f} GB)' - ).format(path, free_gb, recommended_gb) - } + self.register_failure(msg) return {} + def find_ansible_submounts(self, path): + """Return a list of ansible_mounts that are below the given path.""" + base = os.path.join(path, "") + return [ + mount + for mount in self.get_var("ansible_mounts") + if mount["mount"].startswith(base) + ] + def free_bytes(self, path): """Return the size available in path based on ansible_mounts.""" + submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path)) mount = self.find_ansible_mount(path) try: - return mount['size_available'] + return mount['size_available'] + submounts except KeyError: raise OpenShiftCheckException( 'Unable to retrieve disk availability for "{path}".\n' diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 85a922f86..5beb20503 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -1,5 +1,7 @@ """Check that required Docker images are available.""" +from pipes import quote +from ansible.module_utils import six from openshift_checks import OpenShiftCheck from openshift_checks.mixins import DockerHostMixin @@ -32,6 +34,46 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # we use python-docker-py to check local docker for images, and skopeo # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] + # command for checking if remote registries have an image, without docker pull + skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}" + skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>" + + def __init__(self, *args, **kwargs): + super(DockerImageAvailability, self).__init__(*args, **kwargs) + + self.registries = dict( + # set of registries that need to be checked insecurely (note: not accounting for CIDR entries) + insecure=set(self.ensure_list("openshift_docker_insecure_registries")), + # set of registries that should never be queried even if given in the image + blocked=set(self.ensure_list("openshift_docker_blocked_registries")), + ) + + # ordered list of registries (according to inventory vars) that docker will try for unscoped images + regs = self.ensure_list("openshift_docker_additional_registries") + # currently one of these registries is added whether the user wants it or not. + deployment_type = self.get_var("openshift_deployment_type") + if deployment_type == "origin" and "docker.io" not in regs: + regs.append("docker.io") + elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs: + regs.append("registry.access.redhat.com") + self.registries["configured"] = regs + + # for the oreg_url registry there may be credentials specified + components = self.get_var("oreg_url", default="").split('/') + self.registries["oreg"] = "" if len(components) < 3 else components[0] + + # Retrieve and template registry credentials, if provided + self.skopeo_command_creds = "" + oreg_auth_user = self.get_var('oreg_auth_user', default='') + oreg_auth_password = self.get_var('oreg_auth_password', default='') + if oreg_auth_user != '' and oreg_auth_password != '': + if self._templar is not None: + oreg_auth_user = self._templar.template(oreg_auth_user) + oreg_auth_password = self._templar.template(oreg_auth_password) + self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password)) + + # record whether we could reach a registry or not (and remember results) + self.reachable_registries = {} def is_active(self): """Skip hosts with unsupported deployment types.""" @@ -55,21 +97,28 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): if not missing_images: return {} - registries = self.known_docker_registries() - if not registries: - return {"failed": True, "msg": "Unable to retrieve any docker registries."} - - available_images = self.available_images(missing_images, registries) + available_images = self.available_images(missing_images) unavailable_images = set(missing_images) - set(available_images) if unavailable_images: - return { - "failed": True, - "msg": ( - "One or more required Docker images are not available:\n {}\n" - "Configured registries: {}" - ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)), - } + unreachable = [reg for reg, reachable in self.reachable_registries.items() if not reachable] + unreachable_msg = "Failed connecting to: {}\n".format(", ".join(unreachable)) + blocked_msg = "Blocked registries: {}\n".format(", ".join(self.registries["blocked"])) + msg = ( + "One or more required container images are not available:\n {missing}\n" + "Checked with: {cmd}\n" + "Default registries searched: {registries}\n" + "{blocked}" + "{unreachable}" + ).format( + missing=",\n ".join(sorted(unavailable_images)), + cmd=self.skopeo_example_command, + registries=", ".join(self.registries["configured"]), + blocked=blocked_msg if self.registries["blocked"] else "", + unreachable=unreachable_msg if unreachable else "", + ) + + return dict(failed=True, msg=msg) return {} @@ -95,13 +144,11 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # containerized etcd may not have openshift_image_tag, see bz 1466622 image_tag = self.get_var("openshift_image_tag", default="latest") image_info = DEPLOYMENT_IMAGE_INFO[deployment_type] - if not image_info: - return required # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") image_url = self.get_var("oreg_url", default="") or image_url - if 'nodes' in host_groups: + if 'oo_nodes_to_config' in host_groups: for suffix in NODE_IMAGE_SUFFIXES: required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) # The registry-console is for some reason not prefixed with ose- like the other components. @@ -112,65 +159,87 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # images for containerized components if self.get_var("openshift", "common", "is_containerized"): components = set() - if 'nodes' in host_groups: + if 'oo_nodes_to_config' in host_groups: components.update(["node", "openvswitch"]) - if 'masters' in host_groups: # name is "origin" or "ose" + if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose" components.add(image_info["name"]) for component in components: required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) - if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise + if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag return required def local_images(self, images): """Filter a list of images and return those available locally.""" - return [ - image for image in images - if self.is_image_local(image) - ] + found_images = [] + for image in images: + # docker could have the image name as-is or prefixed with any registry + imglist = [image] + [reg + "/" + image for reg in self.registries["configured"]] + if self.is_image_local(imglist): + found_images.append(image) + return found_images def is_image_local(self, image): """Check if image is already in local docker index.""" result = self.execute_module("docker_image_facts", {"name": image}) - if result.get("failed", False): - return False - - return bool(result.get("images", [])) - - def known_docker_registries(self): - """Build a list of docker registries available according to inventory vars.""" - docker_facts = self.get_var("openshift", "docker") - regs = set(docker_facts["additional_registries"]) - - deployment_type = self.get_var("openshift_deployment_type") - if deployment_type == "origin": - regs.update(["docker.io"]) - elif "enterprise" in deployment_type: - regs.update(["registry.access.redhat.com"]) - - return list(regs) - - def available_images(self, images, default_registries): + return bool(result.get("images")) and not result.get("failed") + + def ensure_list(self, registry_param): + """Return the task var as a list.""" + # https://bugzilla.redhat.com/show_bug.cgi?id=1497274 + # If the result was a string type, place it into a list. We must do this + # as using list() on a string will split the string into its characters. + # Otherwise cast to a list as was done previously. + registry = self.get_var(registry_param, default=[]) + if not isinstance(registry, six.string_types): + return list(registry) + return self.normalize(registry) + + def available_images(self, images): """Search remotely for images. Returns: list of images found.""" return [ image for image in images - if self.is_available_skopeo_image(image, default_registries) + if self.is_available_skopeo_image(image) ] - def is_available_skopeo_image(self, image, default_registries): + def is_available_skopeo_image(self, image): """Use Skopeo to determine if required image exists in known registry(s).""" - registries = default_registries - - # if image already includes a registry, only use that + registries = self.registries["configured"] + # If image already includes a registry, only use that. + # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g. + # registry.access.redhat.com/rhel7 as if the registry were a namespace. + # It's not clear that there's any way to distinguish them, but fortunately + # the current set of images all look like [registry/]namespace/name[:version]. if image.count("/") > 1: registry, image = image.split("/", 1) registries = [registry] for registry in registries: - args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)} - result = self.execute_module("command", args) + if registry in self.registries["blocked"]: + continue # blocked will never be consulted + if registry not in self.reachable_registries: + self.reachable_registries[registry] = self.connect_to_registry(registry) + if not self.reachable_registries[registry]: + continue # do not keep trying unreachable registries + + args = dict(registry=registry, image=image) + args["tls"] = "false" if registry in self.registries["insecure"] else "true" + args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else "" + + result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)}) if result.get("rc", 0) == 0 and not result.get("failed"): return True + if result.get("rc") == 124: # RC 124 == timed out; mark unreachable + self.reachable_registries[registry] = False return False + + def connect_to_registry(self, registry): + """Use ansible wait_for module to test connectivity from host to registry. Returns bool.""" + # test a simple TCP connection + host, _, port = registry.partition(":") + port = port or 443 + args = dict(host=host, port=port, state="started", timeout=30) + result = self.execute_module("wait_for", args) + return result.get("rc", 0) == 0 and not result.get("failed") diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 0558ddf14..6808d8b2f 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -14,7 +14,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck): """ name = "docker_storage" - tags = ["pre-install", "health", "preflight"] + tags = ["health", "preflight"] dependencies = ["python-docker-py"] storage_drivers = ["devicemapper", "overlay", "overlay2"] diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index b4c8957e9..8b20ccb49 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -12,7 +12,7 @@ class EtcdTraffic(OpenShiftCheck): def is_active(self): """Skip hosts that do not have etcd in their group names.""" group_names = self.get_var("group_names", default=[]) - valid_group_names = "etcd" in group_names + valid_group_names = "oo_etcd_to_config" in group_names version = self.get_major_minor_version(self.get_var("openshift_image_tag")) valid_version = version in ((3, 4), (3, 5)) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index e5d93ff3f..3d75da6f9 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -15,8 +15,12 @@ class EtcdVolume(OpenShiftCheck): etcd_mount_path = "/var/lib/etcd" def is_active(self): - etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] - is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts + etcd_hosts = ( + self.get_var("groups", "oo_etcd_to_config", default=[]) or + self.get_var("groups", "oo_masters_to_config", default=[]) or + [] + ) + is_etcd_host = self.get_var("ansible_host") in etcd_hosts return super(EtcdVolume, self).is_active() and is_etcd_host def run(self): diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py index 7fc843fd7..986a01f38 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py +++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py @@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck): for pod_name in pods_by_name.keys(): # Compare what each ES node reports as master and compare for split brain get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master") - master_name_str = self.exec_oc(get_master_cmd, []) + master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json") master_names = (master_name_str or '').split(' ') if len(master_names) > 1: es_master_names.add(master_names[1]) @@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck): # get ES cluster nodes node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes') - cluster_node_data = self.exec_oc(node_cmd, []) + cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json") try: cluster_nodes = json.loads(cluster_node_data)['nodes'] except (ValueError, KeyError): @@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck): errors = [] for pod_name in pods_by_name.keys(): cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true') - cluster_health_data = self.exec_oc(cluster_health_cmd, []) + cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json') try: health_res = json.loads(cluster_health_data) if not health_res or not health_res.get('status'): @@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck): errors = [] for pod_name in pods_by_name.keys(): df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) - disk_output = self.exec_oc(df_cmd, []) + disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json') lines = disk_output.splitlines() # expecting one header looking like 'IUse% Use%' and one body line body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$' diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py index d783e6760..e93cc9028 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py +++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py @@ -46,7 +46,7 @@ class FluentdConfig(LoggingCheck): # if check is running on a master, retrieve all running pods # and check any pod's container for the env var "USE_JOURNAL" group_names = self.get_var("group_names") - if "masters" in group_names: + if "oo_masters_to_config" in group_names: use_journald = self.check_fluentd_env_var() docker_info = self.execute_module("docker_info", {}) diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index ecd8adb64..05ba73ca1 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -30,14 +30,6 @@ class LoggingCheck(OpenShiftCheck): logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False) return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master() - def is_first_master(self): - """Determine if running on first master. Returns: bool""" - # Note: It would be nice to use membership in oo_first_master group, however for now it - # seems best to avoid requiring that setup and just check this is the first master. - hostname = self.get_var("ansible_ssh_host") or [None] - masters = self.get_var("groups", "masters", default=None) or [None] - return masters[0] == hostname - def run(self): return {} @@ -78,7 +70,7 @@ class LoggingCheck(OpenShiftCheck): """Returns the namespace in which logging is configured to deploy.""" return self.get_var("openshift_logging_namespace", default="logging") - def exec_oc(self, cmd_str="", extra_args=None): + def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None): """ Execute an 'oc' command in the remote host. Returns: output of command and namespace, @@ -92,7 +84,7 @@ class LoggingCheck(OpenShiftCheck): "extra_args": list(extra_args) if extra_args else [], } - result = self.execute_module("ocutil", args) + result = self.execute_module("ocutil", args, save_as_name=save_as_name) if result.get("failed"): if result['result'] == '[Errno 2] No such file or directory': raise CouldNotUseOc( diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py index d781db649..cacdf4213 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py @@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck): "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}" ) exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid) - result = self.exec_oc(exec_cmd, []) + result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json") try: count = json.loads(result)["count"] diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py index 765ba072d..e7a8ec976 100644 --- a/roles/openshift_health_checker/openshift_checks/memory_availability.py +++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py @@ -14,9 +14,9 @@ class MemoryAvailability(OpenShiftCheck): # Values taken from the official installation documentation: # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements recommended_memory_bytes = { - "masters": 16 * GIB, - "nodes": 8 * GIB, - "etcd": 8 * GIB, + "oo_masters_to_config": 16 * GIB, + "oo_nodes_to_config": 8 * GIB, + "oo_etcd_to_config": 8 * GIB, } # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal memtotal_adjustment = 1 * GIB diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index e9bae60a3..cfbdea303 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -21,9 +21,11 @@ class DockerHostMixin(object): def is_active(self): """Only run on hosts that depend on Docker.""" - is_containerized = self.get_var("openshift", "common", "is_containerized") - is_node = "nodes" in self.get_var("group_names", default=[]) - return super(DockerHostMixin, self).is_active() and (is_containerized or is_node) + group_names = set(self.get_var("group_names", default=[])) + needs_docker = set(["oo_nodes_to_config"]) + if self.get_var("openshift.common.is_containerized"): + needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"]) + return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) def ensure_dependencies(self): """ @@ -36,7 +38,7 @@ class DockerHostMixin(object): # NOTE: we would use the "package" module but it's actually an action plugin # and it's not clear how to invoke one of those. This is about the same anyway: - result = self.execute_module( + result = self.execute_module_with_retries( self.get_var("ansible_pkg_mgr", default="yum"), {"name": self.dependencies, "state": "present"}, ) @@ -49,5 +51,4 @@ class DockerHostMixin(object): " {deps}\n{msg}" ).format(deps=',\n '.join(self.dependencies), msg=msg) failed = result.get("failed", False) or result.get("rc", 0) != 0 - self.changed = result.get("changed", False) return msg, failed diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 363c12def..416805c4d 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -24,7 +24,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) - master_or_node = 'masters' in group_names or 'nodes' in group_names + master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(OvsVersion, self).is_active() and master_or_node def run(self): diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index a86180b00..090e438ff 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -20,13 +20,13 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): packages = set() - if "masters" in group_names: + if "oo_masters_to_config" in group_names: packages.update(self.master_packages(rpm_prefix)) - if "nodes" in group_names: + if "oo_nodes_to_config" in group_names: packages.update(self.node_packages(rpm_prefix)) args = {"packages": sorted(set(packages))} - return self.execute_module("check_yum_update", args) + return self.execute_module_with_retries("check_yum_update", args) @staticmethod def master_packages(rpm_prefix): diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py index 1e9aecbe0..8464e8a5e 100644 --- a/roles/openshift_health_checker/openshift_checks/package_update.py +++ b/roles/openshift_health_checker/openshift_checks/package_update.py @@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck): def run(self): args = {"packages": []} - return self.execute_module("check_yum_update", args) + return self.execute_module_with_retries("check_yum_update", args) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 8b780114f..2f09b22fc 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -36,7 +36,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) - master_or_node = 'masters' in group_names or 'nodes' in group_names + master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(PackageVersion, self).is_active() and master_or_node def run(self): @@ -46,6 +46,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): check_multi_minor_release = deployment_type in ['openshift-enterprise'] args = { + "package_mgr": self.get_var("ansible_pkg_mgr"), "package_list": [ { "name": "openvswitch", @@ -75,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): ], } - return self.execute_module("aos_version", args) + return self.execute_module_with_retries("aos_version", args) def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index c109ebd24..40ad27d5d 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -3,10 +3,12 @@ import pytest from ansible.playbook.play_context import PlayContext from openshift_health_check import ActionModule, resolve_checks -from openshift_checks import OpenShiftCheckException +from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file +from openshift_checks import OpenShiftCheckException, FileToSave -def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False): +def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, + run_logs=None, run_files=None, changed=False, get_var_return=None): """Returns a new class that is compatible with OpenShiftCheck for testing.""" _name, _tags = name, tags @@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru class FakeCheck(object): name = _name tags = _tags or [] - changed = False - def __init__(self, execute_module=None, task_vars=None, tmp=None): - pass + def __init__(self, **_): + self.changed = False + self.failures = [] + self.logs = run_logs or [] + self.files_to_save = run_files or [] def is_active(self): + if isinstance(is_active, Exception): + raise is_active return is_active def run(self): @@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru raise run_exception return run_return + def get_var(*args, **_): + return get_var_return + + def register_failure(self, exc): + self.failures.append(OpenShiftCheckException(str(exc))) + return + return FakeCheck @@ -81,6 +94,7 @@ def skipped(result): {}, ]) def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch): + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -98,23 +112,33 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck']) -def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch): - checks = [fake_check(is_active=False)] +@pytest.mark.parametrize('is_active, skipped_reason', [ + (False, "Not active for this host"), + (Exception("borked"), "exception"), +]) +def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch): + checks = [fake_check(is_active=is_active)] monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) result = plugin.run(tmp=None, task_vars=task_vars) - assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host") + assert result['checks']['fake_check'].get('skipped') + assert skipped_reason in result['checks']['fake_check'].get('skipped_reason') assert not failed(result) assert not changed(result) assert not skipped(result) -def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch): +@pytest.mark.parametrize('to_disable', [ + 'fake_check', + ['fake_check', 'spam'], + '*,spam,eggs', +]) +def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch): checks = [fake_check('fake_check', is_active=True)] monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) - task_vars['openshift_disable_check'] = 'fake_check' + task_vars['openshift_disable_check'] = to_disable result = plugin.run(tmp=None, task_vars=task_vars) assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request") @@ -123,10 +147,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch): assert not skipped(result) +def test_action_plugin_run_list_checks(monkeypatch): + task = FakeTask('openshift_health_check', {'checks': []}) + plugin = ActionModule(task, None, PlayContext(), None, None, None) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {}) + result = plugin.run() + + assert failed(result, msg_has="Available checks") + assert not changed(result) + assert not skipped(result) + + def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch): check_return_value = {'ok': 'test'} - check_class = fake_check(run_return=check_return_value) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + check_class = fake_check(run_return=check_return_value, run_files=[None]) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -140,7 +175,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch): def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch): check_return_value = {'ok': 'test'} check_class = fake_check(run_return=check_return_value, changed=True) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -153,9 +188,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch): def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch): - check_return_value = {'failed': True} + check_return_value = {'failed': True, 'msg': 'this is a failure'} check_class = fake_check(run_return=check_return_value) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) @@ -166,24 +201,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch): assert not skipped(result) -def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch): +@pytest.mark.parametrize('exc_class, expect_traceback', [ + (OpenShiftCheckException, False), + (Exception, True), +]) +def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch): exception_msg = 'fake check has an exception' - run_exception = OpenShiftCheckException(exception_msg) + run_exception = exc_class(exception_msg) check_class = fake_check(run_exception=run_exception, changed=True) - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) result = plugin.run(tmp=None, task_vars=task_vars) assert failed(result['checks']['fake_check'], msg_has=exception_msg) + assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg']) assert failed(result, msg_has=['failed']) assert changed(result['checks']['fake_check']) assert changed(result) assert not skipped(result) +def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch): + check_class = fake_check( + run_return={}, + run_logs=[('thing', 'note')], + run_files=[ + FileToSave('save.file', 'contents', None), + FileToSave('save.file', 'duplicate', None), + FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception + ], + ) + task_vars['openshift_checks_output_dir'] = str(tmpdir) + check_class.get_var = lambda self, name, **_: task_vars.get(name) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()}) + monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) + + plugin.run(tmp=None, task_vars=task_vars) + assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir()) + assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit()) + assert any(path.basename == 'save.file' for path in tmpdir.visit()) + assert any(path.basename == 'save.file.2' for path in tmpdir.visit()) + + def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch): - monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {}) + monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {}) result = plugin.run(tmp=None, task_vars=task_vars) @@ -249,3 +311,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception): resolve_checks(names, all_checks) for word in words_in_exception: assert word in str(excinfo.value) + + +@pytest.mark.parametrize('give_output_dir, result, expect_file', [ + (False, None, False), + (True, dict(content="c3BhbQo=", encoding="base64"), True), + (True, dict(content="encoding error", encoding="base64"), False), + (True, dict(content="spam", no_encoding=None), True), + (True, dict(failed=True, msg="could not slurp"), False), +]) +def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir): + check = fake_check()() + check.execute_module = lambda *args, **_: result + copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file") + assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir()) + + +def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys): + + class Spam(object): + def __str__(self): + raise Exception("break str") + + test = {1: object(), 2: Spam()} + test[3] = test + write_result_to_output_dir(str(tmpdir), test) + assert "Error writing" in test["output_files"] + + output_dir = tmpdir.join("eggs") + output_dir.write("spam") # so now it's not a dir + write_to_output_file(str(output_dir), "somefile", "somedata") + assert "Could not write" in capsys.readouterr()[1] + + monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False) + write_result_to_output_dir(str(tmpdir), test) + assert "Error creating" in test["output_files"] diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py new file mode 100644 index 000000000..800889fa7 --- /dev/null +++ b/roles/openshift_health_checker/test/diagnostics_test.py @@ -0,0 +1,50 @@ +import pytest + +from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException + + +@pytest.fixture() +def task_vars(): + return dict( + openshift=dict( + common=dict(config_base="/etc/origin/") + ) + ) + + +def test_module_succeeds(task_vars): + check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars) + check.is_first_master = lambda: True + assert check.is_active() + check.exec_diagnostic("spam") + assert not check.failures + + +def test_oc_not_there(task_vars): + def exec_module(*_): + return {"failed": True, "result": "[Errno 2] No such file or directory"} + + check = DiagnosticCheck(exec_module, task_vars) + with pytest.raises(OpenShiftCheckException) as excinfo: + check.exec_diagnostic("spam") + assert excinfo.value.name == "OcNotFound" + + +def test_module_fails(task_vars): + def exec_module(*_): + return {"failed": True, "result": "something broke"} + + check = DiagnosticCheck(exec_module, task_vars) + check.exec_diagnostic("spam") + assert check.failures and check.failures[0].name == "OcDiagFailed" + + +def test_names_executed(task_vars): + task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs" + + def exec_module(module, args, *_): + assert "extra_args" in args + assert args["extra_args"][0] in diagnostics + return {"result": "success"} + + DiagnosticCheck(exec_module, task_vars).run() diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index f4fd2dfed..7acdb40ec 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -4,11 +4,11 @@ from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckE @pytest.mark.parametrize('group_names,is_active', [ - (['masters'], True), - (['nodes'], True), - (['etcd'], True), - (['masters', 'nodes'], True), - (['masters', 'etcd'], True), + (['oo_masters_to_config'], True), + (['oo_nodes_to_config'], True), + (['oo_etcd_to_config'], True), + (['oo_masters_to_config', 'oo_nodes_to_config'], True), + (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), @@ -39,7 +39,7 @@ def test_is_active(group_names, is_active): ]) def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): task_vars = dict( - group_names=['masters'], + group_names=['oo_masters_to_config'], ansible_mounts=ansible_mounts, ) @@ -52,7 +52,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [ ( - ['masters'], + ['oo_masters_to_config'], 0, [{ 'mount': '/', @@ -60,7 +60,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, [{ 'mount': '/', @@ -68,7 +68,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, [{ 'mount': '/', @@ -76,7 +76,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 1, # configure lower threshold [{ 'mount': '/', @@ -84,7 +84,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, [{ # not enough space on / ... @@ -96,6 +96,24 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): 'size_available': 20 * 10**9 + 1, }], ), + ( + ['oo_masters_to_config'], + 0, + [{ + 'mount': '/', + 'size_available': 2 * 10**9, + }, { # not enough directly on /var + 'mount': '/var', + 'size_available': 10 * 10**9 + 1, + }, { + # but subdir mounts add up to enough + 'mount': '/var/lib/docker', + 'size_available': 20 * 10**9 + 1, + }, { + 'mount': '/var/lib/origin', + 'size_available': 20 * 10**9 + 1, + }], + ), ]) def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts): task_vars = dict( @@ -104,15 +122,16 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ansible_mounts=ansible_mounts, ) - result = DiskAvailability(fake_execute_module, task_vars).run() + check = DiskAvailability(fake_execute_module, task_vars) + check.run() - assert not result.get('failed', False) + assert not check.failures @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [ ( 'test with no space available', - ['masters'], + ['oo_masters_to_config'], 0, [{ 'mount': '/', @@ -122,7 +141,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with a higher configured required value', - ['masters'], + ['oo_masters_to_config'], 100, # set a higher threshold [{ 'mount': '/', @@ -132,7 +151,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with 1GB available, but "0" GB space requirement', - ['nodes'], + ['oo_nodes_to_config'], 0, [{ 'mount': '/', @@ -142,7 +161,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with no space available, but "0" GB space requirement', - ['etcd'], + ['oo_etcd_to_config'], 0, [{ 'mount': '/', @@ -152,7 +171,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with enough space for a node, but not for a master', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], 0, [{ 'mount': '/', @@ -162,7 +181,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test failure with enough space on "/", but not enough on "/var"', - ['etcd'], + ['oo_etcd_to_config'], 0, [{ # enough space on / ... @@ -183,17 +202,18 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ansible_mounts=ansible_mounts, ) - result = DiskAvailability(fake_execute_module, task_vars).run() + check = DiskAvailability(fake_execute_module, task_vars) + check.run() - assert result['failed'] + assert check.failures for chunk in 'below recommended'.split() + expect_chunks: - assert chunk in result.get('msg', '') + assert chunk in str(check.failures[0]) @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ ( 'test without enough space for master under "upgrade" context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', @@ -205,7 +225,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ), ( 'test with enough space for master under "upgrade" context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', @@ -217,7 +237,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ), ( 'test with not enough space for master, and non-upgrade context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "health", [{ 'mount': '/', @@ -237,11 +257,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont ) check = DiskAvailability(fake_execute_module, task_vars) - result = check.run() + check.run() - assert result.get("failed", False) == failed + assert bool(check.failures) == failed for word in extra_words: - assert word in result.get('msg', '') + assert word in str(check.failures[0]) def fake_execute_module(*args): diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 8d0a53df9..dec99e5db 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -3,24 +3,37 @@ import pytest from openshift_checks.docker_image_availability import DockerImageAvailability +@pytest.fixture() +def task_vars(): + return dict( + openshift=dict( + common=dict( + service_type='origin', + is_containerized=False, + is_atomic=False, + ), + docker=dict(), + ), + openshift_deployment_type='origin', + openshift_image_tag='', + group_names=['oo_nodes_to_config', 'oo_masters_to_config'], + ) + + @pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ - ("origin", True, [], True), - ("openshift-enterprise", True, [], True), - ("enterprise", True, [], False), - ("online", True, [], False), ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), ("openshift-enterprise", False, [], False), - ("origin", False, ["nodes", "masters"], True), - ("openshift-enterprise", False, ["etcd"], False), + ("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True), + ("openshift-enterprise", False, ["oo_etcd_to_config"], False), + ("origin", True, ["nfs"], False), + ("openshift-enterprise", True, ["lb"], False), ]) -def test_is_active(deployment_type, is_containerized, group_names, expect_active): - task_vars = dict( - openshift=dict(common=dict(is_containerized=is_containerized)), - openshift_deployment_type=deployment_type, - group_names=group_names, - ) +def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): + task_vars['openshift_deployment_type'] = deployment_type + task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['group_names'] = group_names assert DockerImageAvailability(None, task_vars).is_active() == expect_active @@ -30,10 +43,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active (True, False), (False, True), ]) -def test_all_images_available_locally(is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, is_containerized, is_atomic): def execute_module(module_name, module_args, *_): if module_name == "yum": - return {"changed": True} + return {} assert module_name == "docker_image_facts" assert 'name' in module_args @@ -42,19 +55,9 @@ def test_all_images_available_locally(is_containerized, is_atomic): 'images': [module_args['name']], } - result = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=is_containerized, - is_atomic=is_atomic, - ), - docker=dict(additional_registries=["docker.io"]), - ), - openshift_deployment_type='origin', - openshift_image_tag='3.4', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['openshift']['common']['is_atomic'] = is_atomic + result = DockerImageAvailability(execute_module, task_vars).run() assert not result.get('failed', False) @@ -63,56 +66,39 @@ def test_all_images_available_locally(is_containerized, is_atomic): False, True, ]) -def test_all_images_available_remotely(available_locally): +def test_all_images_available_remotely(task_vars, available_locally): def execute_module(module_name, *_): if module_name == 'docker_image_facts': return {'images': [], 'failed': available_locally} - return {'changed': False} + return {} - result = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]), - ), - openshift_deployment_type='origin', - openshift_image_tag='v3.4', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"] + task_vars['openshift_image_tag'] = 'v3.4' + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + result = check.run() assert not result.get('failed', False) -def test_all_images_unavailable(): - def execute_module(module_name=None, *_): - if module_name == "command": - return { - 'failed': True, - } +def test_all_images_unavailable(task_vars): + def execute_module(module_name=None, *args): + if module_name == "wait_for": + return {} + elif module_name == "command": + return {'failed': True} - return { - 'changed': False, - } + return {} # docker_image_facts failure - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["docker.io"]), - ), - openshift_deployment_type="openshift-enterprise", - openshift_image_tag='latest', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift_docker_additional_registries'] = ["docker.io"] + task_vars['openshift_deployment_type'] = "openshift-enterprise" + task_vars['openshift_image_tag'] = 'latest' + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + actual = check.run() assert actual['failed'] - assert "required Docker images are not available" in actual['msg'] + assert "required container images are not available" in actual['msg'] @pytest.mark.parametrize("message,extra_words", [ @@ -125,67 +111,70 @@ def test_all_images_unavailable(): ["dependencies can be installed via `yum`"] ), ]) -def test_skopeo_update_failure(message, extra_words): +def test_skopeo_update_failure(task_vars, message, extra_words): def execute_module(module_name=None, *_): if module_name == "yum": return { "failed": True, "msg": message, - "changed": False, } - return {'changed': False} + return {} - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=["unknown.io"]), - ), - openshift_deployment_type="openshift-enterprise", - openshift_image_tag='', - group_names=['nodes', 'masters'], - )).run() + task_vars['openshift_docker_additional_registries'] = ["unknown.io"] + task_vars['openshift_deployment_type'] = "openshift-enterprise" + check = DockerImageAvailability(execute_module, task_vars) + check._module_retry_interval = 0 + actual = check.run() assert actual["failed"] for word in extra_words: assert word in actual["msg"] -@pytest.mark.parametrize("deployment_type,registries", [ - ("origin", ["unknown.io"]), - ("openshift-enterprise", ["registry.access.redhat.com"]), - ("openshift-enterprise", []), -]) -def test_registry_availability(deployment_type, registries): +@pytest.mark.parametrize( + "image, registries, connection_test_failed, skopeo_failed, " + "expect_success, expect_registries_reached", [ + ( + "spam/eggs:v1", ["test.reg"], + True, True, + False, + {"test.reg": False, "docker.io": False}, + ), + ( + "spam/eggs:v1", ["test.reg"], + False, True, + False, + {"test.reg": True, "docker.io": True}, + ), + ( + "eggs.reg/spam/eggs:v1", ["test.reg"], + False, False, + True, + {"eggs.reg": True}, + ), + ]) +def test_registry_availability(image, registries, connection_test_failed, skopeo_failed, + expect_success, expect_registries_reached): def execute_module(module_name=None, *_): - return { - 'changed': False, - } + if module_name == "wait_for": + return dict(msg="msg", failed=connection_test_failed) + elif module_name == "command": + return dict(msg="msg", failed=skopeo_failed) - actual = DockerImageAvailability(execute_module, task_vars=dict( - openshift=dict( - common=dict( - service_type='origin', - is_containerized=False, - is_atomic=False, - ), - docker=dict(additional_registries=registries), - ), - openshift_deployment_type=deployment_type, - openshift_image_tag='', - group_names=['nodes', 'masters'], - )).run() + tv = task_vars() + tv.update({"openshift_docker_additional_registries": registries}) + check = DockerImageAvailability(execute_module, tv) + check._module_retry_interval = 0 - assert not actual.get("failed", False) + available = check.is_available_skopeo_image(image) + assert available == expect_success + assert expect_registries_reached == check.reachable_registries @pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ ( # standard set of stuff required on nodes - "origin", False, ['nodes'], None, + "origin", False, ['oo_nodes_to_config'], "", set([ 'openshift/origin-pod:vtest', 'openshift/origin-deployer:vtest', @@ -195,7 +184,7 @@ def test_registry_availability(deployment_type, registries): ]) ), ( # set a different URL for images - "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}', + "origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}', set([ 'foo.io/openshift/origin-pod:vtest', 'foo.io/openshift/origin-deployer:vtest', @@ -205,7 +194,7 @@ def test_registry_availability(deployment_type, registries): ]) ), ( - "origin", True, ['nodes', 'masters', 'etcd'], None, + "origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], "", set([ # images running on top of openshift 'openshift/origin-pod:vtest', @@ -221,7 +210,7 @@ def test_registry_availability(deployment_type, registries): ]) ), ( # enterprise images - "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45', + "openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45', set([ 'foo.io/openshift3/ose-pod:f13ac45', 'foo.io/openshift3/ose-deployer:f13ac45', @@ -235,7 +224,7 @@ def test_registry_availability(deployment_type, registries): ]) ), ( - "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', + "openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', set([ 'registry.access.redhat.com/rhel7/etcd', # lb does not yet come in a containerized version @@ -257,7 +246,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex openshift_image_tag='vtest', ) - assert expected == DockerImageAvailability("DUMMY", task_vars).required_images() + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() def test_containerized_etcd(): @@ -268,7 +257,7 @@ def test_containerized_etcd(): ), ), openshift_deployment_type="origin", - group_names=['etcd'], + group_names=['oo_etcd_to_config'], ) expected = set(['registry.access.redhat.com/rhel7/etcd']) - assert expected == DockerImageAvailability("DUMMY", task_vars).required_images() + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index e0dccc062..8fa68c378 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -5,9 +5,9 @@ from openshift_checks.docker_storage import DockerStorage @pytest.mark.parametrize('is_containerized, group_names, is_active', [ - (False, ["masters", "etcd"], False), - (False, ["masters", "nodes"], True), - (True, ["etcd"], True), + (False, ["oo_masters_to_config", "oo_etcd_to_config"], False), + (False, ["oo_masters_to_config", "oo_nodes_to_config"], True), + (True, ["oo_etcd_to_config"], True), ]) def test_is_active(is_containerized, group_names, is_active): task_vars = dict( diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py index 09bacd9ac..3fa5e8929 100644 --- a/roles/openshift_health_checker/test/elasticsearch_test.py +++ b/roles/openshift_health_checker/test/elasticsearch_test.py @@ -72,7 +72,7 @@ def test_check_elasticsearch(): assert_error_in_list('NoRunningPods', excinfo.value) # canned oc responses to match so all the checks pass - def exec_oc(cmd, args): + def exec_oc(cmd, args, **_): if '_cat/master' in cmd: return 'name logging-es' elif '/_nodes' in cmd: @@ -97,7 +97,7 @@ def test_check_running_es_pods(): def test_check_elasticsearch_masters(): pods = [plain_es_pod] - check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str']) + check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str']) assert not check.check_elasticsearch_masters(pods_by_name(pods)) @@ -117,7 +117,7 @@ def test_check_elasticsearch_masters(): ]) def test_check_elasticsearch_masters_error(pods, expect_error): test_pods = list(pods) - check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str']) + check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str']) assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods))) @@ -129,7 +129,7 @@ es_node_list = { def test_check_elasticsearch_node_list(): - check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list)) + check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list)) assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod])) @@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list(): ), ]) def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error): - check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list)) + check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list)) assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods))) def test_check_elasticsearch_cluster_health(): test_health_data = [{"status": "green"}] - check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0))) + check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0))) assert not check.check_es_cluster_health(pods_by_name([plain_es_pod])) @@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health(): ]) def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error): test_health_data = list(health_data) - check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0))) + check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0))) assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods))) def test_check_elasticsearch_diskspace(): - check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n') + check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n') assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])) @@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace(): ), ]) def test_check_elasticsearch_diskspace_errors(disk_data, expect_error): - check = canned_elasticsearch(exec_oc=lambda *_: disk_data) + check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data) assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))) diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index fae3e578d..dd6f4ad81 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -4,14 +4,14 @@ from openshift_checks.etcd_traffic import EtcdTraffic @pytest.mark.parametrize('group_names,version,is_active', [ - (['masters'], "3.5", False), - (['masters'], "3.6", False), - (['nodes'], "3.4", False), - (['etcd'], "3.4", True), - (['etcd'], "1.5", True), - (['etcd'], "3.1", False), - (['masters', 'nodes'], "3.5", False), - (['masters', 'etcd'], "3.5", True), + (['oo_masters_to_config'], "3.5", False), + (['oo_masters_to_config'], "3.6", False), + (['oo_nodes_to_config'], "3.4", False), + (['oo_etcd_to_config'], "3.4", True), + (['oo_etcd_to_config'], "1.5", True), + (['oo_etcd_to_config'], "3.1", False), + (['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False), + (['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True), ([], "3.4", False), ]) def test_is_active(group_names, version, is_active): @@ -23,9 +23,9 @@ def test_is_active(group_names, version, is_active): @pytest.mark.parametrize('group_names,matched,failed,extra_words', [ - (["masters"], True, True, ["Higher than normal", "traffic"]), - (["masters", "etcd"], False, False, []), - (["etcd"], False, False, []), + (["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]), + (["oo_masters_to_config", "oo_etcd_to_config"], False, False, []), + (["oo_etcd_to_config"], False, False, []), ]) def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words): def execute_module(module_name, *_): diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py index 10db253bc..b5b4858d6 100644 --- a/roles/openshift_health_checker/test/fluentd_config_test.py +++ b/roles/openshift_health_checker/test/fluentd_config_test.py @@ -82,7 +82,7 @@ def test_check_logging_config_non_master(name, use_journald, logging_driver, ext return {} task_vars = dict( - group_names=["nodes", "etcd"], + group_names=["oo_nodes_to_config", "oo_etcd_to_config"], openshift_logging_fluentd_use_journal=use_journald, openshift=dict( common=dict(config_base=""), @@ -128,7 +128,7 @@ def test_check_logging_config_non_master_failed(name, use_journald, logging_driv return {} task_vars = dict( - group_names=["nodes", "etcd"], + group_names=["oo_nodes_to_config", "oo_etcd_to_config"], openshift_logging_fluentd_use_journal=use_journald, openshift=dict( common=dict(config_base=""), @@ -192,7 +192,7 @@ def test_check_logging_config_master(name, pods, logging_driver, extra_words): return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), @@ -274,7 +274,7 @@ def test_check_logging_config_master_failed(name, pods, logging_driver, words): return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), @@ -331,7 +331,7 @@ def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py index 1a1c190f6..59c703214 100644 --- a/roles/openshift_health_checker/test/logging_check_test.py +++ b/roles/openshift_health_checker/test/logging_check_test.py @@ -98,21 +98,19 @@ def test_oc_failure(problem, expect): assert expect in str(excinfo) -groups_with_first_master = dict(masters=['this-host', 'other-host']) -groups_with_second_master = dict(masters=['other-host', 'this-host']) -groups_not_a_master = dict(masters=['other-host']) +groups_with_first_master = dict(oo_first_master=['this-host']) +groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host']) @pytest.mark.parametrize('groups, logging_deployed, is_active', [ (groups_with_first_master, True, True), (groups_with_first_master, False, False), (groups_not_a_master, True, False), - (groups_with_second_master, True, False), (groups_not_a_master, True, False), ]) def test_is_active(groups, logging_deployed, is_active): task_vars = dict( - ansible_ssh_host='this-host', + ansible_host='this-host', groups=groups, openshift_hosted_logging_deploy=logging_deployed, ) diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py index 22566b295..c48ade9b8 100644 --- a/roles/openshift_health_checker/test/logging_index_time_test.py +++ b/roles/openshift_health_checker/test/logging_index_time_test.py @@ -102,7 +102,7 @@ def test_with_running_pods(): ), ], ids=lambda argval: argval[0]) def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout): - check = canned_loggingindextime(lambda *_: json.dumps(json_response)) + check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response)) check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout) @@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout): ) ], ids=lambda argval: argval[0]) def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error): - check = canned_loggingindextime(lambda *_: json.dumps(json_response)) + check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response)) with pytest.raises(OpenShiftCheckException) as error: check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout) @@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error): def test_curl_kibana_with_uuid(): - check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404})) + check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404})) check.generate_uuid = lambda: SAMPLE_UUID assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod) @@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid(): ), ], ids=lambda argval: argval[0]) def test_failed_curl_kibana_with_uuid(name, json_response, expect_error): - check = canned_loggingindextime(lambda *_: json.dumps(json_response)) + check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response)) check.generate_uuid = lambda: SAMPLE_UUID with pytest.raises(OpenShiftCheckException) as error: diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py index aee2f0416..5ec83dd79 100644 --- a/roles/openshift_health_checker/test/memory_availability_test.py +++ b/roles/openshift_health_checker/test/memory_availability_test.py @@ -4,11 +4,11 @@ from openshift_checks.memory_availability import MemoryAvailability @pytest.mark.parametrize('group_names,is_active', [ - (['masters'], True), - (['nodes'], True), - (['etcd'], True), - (['masters', 'nodes'], True), - (['masters', 'etcd'], True), + (['oo_masters_to_config'], True), + (['oo_nodes_to_config'], True), + (['oo_etcd_to_config'], True), + (['oo_masters_to_config', 'oo_nodes_to_config'], True), + (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), @@ -22,32 +22,32 @@ def test_is_active(group_names, is_active): @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [ ( - ['masters'], + ['oo_masters_to_config'], 0, 17200, ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, 8200, ), ( - ['nodes'], + ['oo_nodes_to_config'], 1, # configure lower threshold 2000, # too low for recommended but not for configured ), ( - ['nodes'], + ['oo_nodes_to_config'], 2, # configure threshold where adjustment pushes it over 1900, ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, 8200, ), ( - ['masters', 'nodes'], + ['oo_masters_to_config', 'oo_nodes_to_config'], 0, 17000, ), @@ -66,43 +66,43 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [ ( - ['masters'], + ['oo_masters_to_config'], 0, 0, ['0.0 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, 100, ['0.1 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 24, # configure higher threshold 20 * 1024, # enough to meet recommended but not configured ['20.0 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 24, # configure higher threshold 22 * 1024, # not enough for adjustment to push over threshold ['22.0 GiB'], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, 6 * 1024, ['6.0 GiB'], ), ( - ['etcd', 'masters'], + ['oo_etcd_to_config', 'oo_masters_to_config'], 0, 9 * 1024, # enough memory for etcd, not enough for a master ['9.0 GiB'], ), ( - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], 0, # enough memory for a node, not enough for a master 11 * 1024, diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py index 789784c77..bc0c3b26c 100644 --- a/roles/openshift_health_checker/test/openshift_check_test.py +++ b/roles/openshift_health_checker/test/openshift_check_test.py @@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected): assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected -@pytest.mark.parametrize("keys, convert", [ - (("bar", "baz"), int), - (("bar.baz"), float), - (("foo"), "bogus"), - (("foo"), lambda a, b: 1), - (("foo"), lambda a: 1 / 0), +def convert_oscexc(_): + raise OpenShiftCheckException("known failure") + + +def convert_exc(_): + raise Exception("failure unknown") + + +@pytest.mark.parametrize("keys, convert, expect_text", [ + (("bar", "baz"), int, "Cannot convert"), + (("bar.baz",), float, "Cannot convert"), + (("foo",), "bogus", "TypeError"), + (("foo",), lambda a, b: 1, "TypeError"), + (("foo",), lambda a: 1 / 0, "ZeroDivisionError"), + (("foo",), convert_oscexc, "known failure"), + (("foo",), convert_exc, "failure unknown"), ]) -def test_get_var_convert_error(task_vars, keys, convert): - with pytest.raises(OpenShiftCheckException): +def test_get_var_convert_error(task_vars, keys, convert, expect_text): + with pytest.raises(OpenShiftCheckException) as excinfo: dummy_check(task_vars).get_var(*keys, convert=convert) + assert expect_text in str(excinfo.value) + + +def test_register(task_vars): + check = dummy_check(task_vars) + + check.register_failure(OpenShiftCheckException("spam")) + assert "spam" in str(check.failures[0]) + + with pytest.raises(OpenShiftCheckException) as excinfo: + check.register_file("spam") # no file contents specified + assert "not specified" in str(excinfo.value) + + # normally execute_module registers the result file; test disabling that + check._execute_module = lambda *args, **_: dict() + check.execute_module("eggs", module_args={}, register=False) + assert not check.files_to_save diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index e1bf29d2a..5a82a43bf 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): openshift_release=openshift_release, openshift_image_tag='v' + openshift_release, ) - return_value = object() + return_value = {} # note: check.execute_module modifies return hash contents def execute_module(module_name=None, module_args=None, *_): assert module_name == 'rpm_version' @@ -67,14 +67,14 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): @pytest.mark.parametrize('group_names,is_containerized,is_active', [ - (['masters'], False, True), + (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs - (['masters'], True, False), - (['nodes'], False, True), - (['masters', 'nodes'], False, True), - (['masters', 'etcd'], False, True), + (['oo_masters_to_config'], True, False), + (['oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_etcd_to_config'], False, True), ([], False, False), - (['etcd'], False, False), + (['oo_etcd_to_config'], False, False), (['lb'], False, False), (['nfs'], False, False), ]) diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index 1fe648b75..9815acb38 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -26,7 +26,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='origin')), - group_names=['masters'], + group_names=['oo_masters_to_config'], ), set(['origin-master']), set(['origin-node']), @@ -34,7 +34,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='atomic-openshift')), - group_names=['nodes'], + group_names=['oo_nodes_to_config'], ), set(['atomic-openshift-node']), set(['atomic-openshift-master']), @@ -42,21 +42,21 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='atomic-openshift')), - group_names=['masters', 'nodes'], + group_names=['oo_masters_to_config', 'oo_nodes_to_config'], ), set(['atomic-openshift-master', 'atomic-openshift-node']), set(), ), ]) def test_package_availability(task_vars, must_have_packages, must_not_have_packages): - return_value = object() + return_value = {} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'check_yum_update' assert 'packages' in module_args assert set(module_args['packages']).issuperset(must_have_packages) assert not set(module_args['packages']).intersection(must_not_have_packages) - return return_value + return {'foo': return_value} result = PackageAvailability(execute_module, task_vars).run() - assert result is return_value + assert result['foo'] is return_value diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py index 06489b0d7..85d3c9cab 100644 --- a/roles/openshift_health_checker/test/package_update_test.py +++ b/roles/openshift_health_checker/test/package_update_test.py @@ -2,14 +2,14 @@ from openshift_checks.package_update import PackageUpdate def test_package_update(): - return_value = object() + return_value = {} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'check_yum_update' assert 'packages' in module_args # empty list of packages means "generic check if 'yum update' will work" assert module_args['packages'] == [] - return return_value + return {'foo': return_value} result = PackageUpdate(execute_module).run() - assert result is return_value + assert result['foo'] is return_value diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index 6054d3f3e..3cf4ce033 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -5,6 +5,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep def task_vars_for(openshift_release, deployment_type): return dict( + ansible_pkg_mgr='yum', openshift=dict(common=dict(service_type=deployment_type)), openshift_release=openshift_release, openshift_image_tag='v' + openshift_release, @@ -27,6 +28,7 @@ def test_openshift_version_not_supported(): def test_invalid_openshift_release_format(): task_vars = dict( + ansible_pkg_mgr='yum', openshift=dict(common=dict(service_type='origin')), openshift_image_tag='v0', openshift_deployment_type='origin', @@ -50,7 +52,7 @@ def test_invalid_openshift_release_format(): ]) def test_package_version(openshift_release): - return_value = object() + return_value = {"foo": object()} def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_): assert module_name == 'aos_version' @@ -64,7 +66,7 @@ def test_package_version(openshift_release): check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin')) result = check.run() - assert result is return_value + assert result == return_value @pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [ @@ -77,7 +79,7 @@ def test_package_version(openshift_release): ]) def test_docker_package_version(deployment_type, openshift_release, expected_docker_version): - return_value = object() + return_value = {"foo": object()} def execute_module(module_name=None, module_args=None, *_): assert module_name == 'aos_version' @@ -91,18 +93,18 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type)) result = check.run() - assert result is return_value + assert result == return_value @pytest.mark.parametrize('group_names,is_containerized,is_active', [ - (['masters'], False, True), + (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs - (['masters'], True, False), - (['nodes'], False, True), - (['masters', 'nodes'], False, True), - (['masters', 'etcd'], False, True), + (['oo_masters_to_config'], True, False), + (['oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_etcd_to_config'], False, True), ([], False, False), - (['etcd'], False, False), + (['oo_etcd_to_config'], False, False), (['lb'], False, False), (['nfs'], False, False), ]) diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py index 0fc258133..69f27653c 100644 --- a/roles/openshift_health_checker/test/zz_failure_summary_test.py +++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py @@ -65,6 +65,21 @@ import pytest }, ], ), + # if a failure contain an unhashable value, it will not be deduplicated + ( + [ + { + 'host': 'master1', + 'msg': {'unhashable': 'value'}, + }, + ], + [ + { + 'host': 'master1', + 'msg': {'unhashable': 'value'}, + }, + ], + ), ]) def test_deduplicate_failures(failures, deduplicated): assert deduplicate_failures(failures) == deduplicated diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 3e5d7f860..29ae58556 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -39,7 +39,6 @@ variables also control configuration behavior: Dependencies ------------ -* openshift_common * openshift_hosted_facts Example Playbook diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index f3747eead..589ad3f51 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -1,14 +1,33 @@ --- -r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +########## +# Common # +########## +openshift_hosted_infra_selector: "region=infra" +r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}" +r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}" -r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +openshift_default_projects: + default: + default_node_selector: '' + logging: + default_node_selector: '' + openshift-infra: + default_node_selector: '' -openshift_hosted_router_wait: True -openshift_hosted_registry_wait: True +# openshift_additional_projects shares the same format as openshift_default_projects +openshift_additional_projects: {} -registry_volume_claim: 'registry-claim' +openshift_config_base: "/etc/origin" +openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master" +openshift_cluster_domain: 'cluster.local' + +########## +# Router # +########## +r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" openshift_hosted_router_edits: - key: spec.strategy.rollingParams.intervalSeconds @@ -36,14 +55,48 @@ openshift_hosted_routers: certificate: "{{ openshift_hosted_router_certificate | default({}) }}" openshift_hosted_router_certificate: {} -openshift_hosted_registry_cert_expire_days: 730 openshift_hosted_router_create_certificate: True r_openshift_hosted_router_os_firewall_deny: [] r_openshift_hosted_router_os_firewall_allow: [] +############ +# Registry # +############ + +r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +openshift_hosted_registry_name: docker-registry +openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" +openshift_hosted_registry_cert_expire_days: 730 + r_openshift_hosted_registry_os_firewall_deny: [] r_openshift_hosted_registry_os_firewall_allow: - service: Docker Registry Port port: 5000/tcp cond: "{{ r_openshift_hosted_use_calico }}" + +openshift_hosted_registry_serviceaccount: registry +openshift_hosted_registry_volumes: [] +openshift_hosted_registry_env_vars: {} + +# These edits are being specified only to prevent 'changed' on rerun +openshift_hosted_registry_edits: +- key: spec.strategy.rollingParams + value: + intervalSeconds: 1 + maxSurge: "25%" + maxUnavailable: "25%" + timeoutSeconds: 600 + updatePeriodSeconds: 1 + action: put + +openshift_hosted_registry_force: +- False + +openshift_push_via_dns: False + +# NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles +openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}" +openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}" diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py index 7f41529ac..7f41529ac 100644 --- a/roles/openshift_hosted/filter_plugins/filters.py +++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index 28fd396d6..1d70ef7eb 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -12,7 +12,6 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_cli - role: openshift_hosted_facts - role: lib_openshift - role: lib_os_firewall diff --git a/roles/openshift_hosted/tasks/create_projects.yml b/roles/openshift_hosted/tasks/create_projects.yml new file mode 100644 index 000000000..1b25d0c64 --- /dev/null +++ b/roles/openshift_hosted/tasks/create_projects.yml @@ -0,0 +1,14 @@ +--- +- name: Create default projects + oc_project: + name: "{{ item.key }}" + node_selector: + - "{{ item.value.default_node_selector }}" + with_dict: "{{ openshift_default_projects }}" + +- name: Create additional projects + oc_project: + name: "{{ item.key }}" + node_selector: + - "{{ item.value.default_node_selector }}" + with_dict: "{{ openshift_additional_projects }}" diff --git a/roles/openshift_hosted/tasks/router/firewall.yml b/roles/openshift_hosted/tasks/firewall.yml index ff90f3372..1eb2c92c8 100644 --- a/roles/openshift_hosted/tasks/router/firewall.yml +++ b/roles/openshift_hosted/tasks/firewall.yml @@ -8,7 +8,7 @@ protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}" + with_items: "{{ l_openshift_hosted_fw_allow }}" - name: Remove iptables rules os_firewall_manage_iptables: @@ -17,9 +17,9 @@ protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}" + with_items: "{{ l_openshift_hosted_fw_deny }}" -- when: r_openshift_hosted_router_firewall_enabled | bool and r_openshift_hosted_router_use_firewalld | bool +- when: l_openshift_hosted_firewall_enabled | bool and l_openshift_hosted_use_firewalld | bool block: - name: Add firewalld allow rules firewalld: @@ -28,7 +28,7 @@ immediate: true state: enabled when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}" + with_items: "{{ l_openshift_hosted_fw_allow }}" - name: Remove firewalld allow rules firewalld: @@ -37,4 +37,4 @@ immediate: true state: disabled when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}" + with_items: "{{ l_openshift_hosted_fw_deny }}" diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml index 6efe2f63c..d306adf42 100644 --- a/roles/openshift_hosted/tasks/main.yml +++ b/roles/openshift_hosted/tasks/main.yml @@ -1,13 +1,9 @@ --- -- name: Create projects - oc_project: - name: "{{ item.key }}" - node_selector: - - "{{ item.value.default_node_selector }}" - with_dict: "{{ openshift_projects }}" - -- include: router/router.yml - when: openshift_hosted_manage_router | default(true) | bool - -- include: registry/registry.yml - when: openshift_hosted_manage_registry | default(true) | bool +# This role is intended to be used with include_role. +# include_role: +# name: openshift_hosted +# tasks_from: "{{ item }}" +# with_items: +# - create_projects.yml +# - router.yml +# - registry.yml diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 3e424da12..f1aa9c5a8 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -1,7 +1,11 @@ --- - name: setup firewall include: firewall.yml - static: yes + vars: + l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}" + l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}" + l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}" + l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}" - when: openshift.hosted.registry.replicas | default(none) is none block: @@ -36,30 +40,22 @@ - name: set openshift_hosted facts set_fact: openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}" - openshift_hosted_registry_name: docker-registry - openshift_hosted_registry_serviceaccount: registry openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}" openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}" - openshift_hosted_registry_volumes: [] - openshift_hosted_registry_env_vars: {} - openshift_hosted_registry_edits: - # These edits are being specified only to prevent 'changed' on rerun - - key: spec.strategy.rollingParams - value: - intervalSeconds: 1 - maxSurge: "25%" - maxUnavailable: "25%" - timeoutSeconds: 600 - updatePeriodSeconds: 1 - action: put - openshift_hosted_registry_force: - - False - name: Update registry environment variables when pushing via dns set_fact: openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}" - when: openshift_push_via_dns | default(false) | bool + when: openshift_push_via_dns | bool + +- name: Update registry proxy settings for dc/docker-registry + set_fact: + openshift_hosted_registry_env_vars: "{{ {'HTTPS_PROXY': (openshift.common.https_proxy | default('')), + 'HTTP_PROXY': (openshift.common.http_proxy | default('')), + 'NO_PROXY': (openshift.common.no_proxy | default(''))} + | combine(openshift_hosted_registry_env_vars) }}" + when: (openshift.common.https_proxy | default(False)) or (openshift.common.http_proxy | default('')) != '' - name: Create the registry service account oc_serviceaccount: @@ -129,36 +125,17 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" -- when: openshift_hosted_registry_wait - block: - - name: Ensure OpenShift registry correctly rolls out (best-effort today) - command: | - oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig - async: 600 - poll: 15 - failed_when: false - - - name: Determine the latest version of the OpenShift registry deployment - command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .status.latestVersion }' - register: openshift_hosted_registry_latest_version - - - name: Sanity-check that the OpenShift registry rolled out correctly - command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' - register: openshift_hosted_registry_rc_phase - until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" - delay: 15 - retries: 40 - failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout" +- name: setup registry list + set_fact: + r_openshift_hosted_registry_list: + - name: "{{ openshift_hosted_registry_name }}" + namespace: "{{ openshift_hosted_registry_namespace }}" + +- name: Wait for pod (Registry) + include: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" + l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" - include: storage/glusterfs.yml when: diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router.yml index e57ed733e..2aceef9e4 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -1,7 +1,11 @@ --- - name: setup firewall include: firewall.yml - static: yes + vars: + l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}" + l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}" + l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_router_os_firewall_allow }}" + l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_router_os_firewall_deny }}" - name: Retrieve list of openshift nodes matching router selector oc_obj: @@ -18,6 +22,15 @@ openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}" openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" +- name: Get the certificate contents for router + copy: + backup: True + dest: "/etc/origin/master/{{ item | basename }}" + src: "{{ item }}" + with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | + oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" + when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} + # This is for when we desire a cluster signed cert # The certificate is generated and placed in master_config_dir/ - block: @@ -39,18 +52,9 @@ certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}" - - # End Block - when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {} - -- name: Get the certificate contents for router - copy: - backup: True - dest: "/etc/origin/master/{{ item | basename }}" - src: "{{ item }}" - with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | - oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" - when: not openshift_hosted_router_create_certificate | bool + when: + - openshift_hosted_router_create_certificate | bool + - openshift_hosted_router_certificate == {} - name: Create the router service account(s) oc_serviceaccount: @@ -82,7 +86,7 @@ replicas: "{{ item.replicas }}" namespace: "{{ item.namespace | default('default') }}" # This option is not yet implemented - # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}" + # force_subdomain: "{{ openshift_hosted_router_force_subdomain | default(none) }}" service_account: "{{ item.serviceaccount | default('router') }}" selector: "{{ item.selector | default(none) }}" images: "{{ item.images | default(omit) }}" @@ -94,38 +98,8 @@ stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" -- when: openshift_hosted_router_wait - block: - - name: Ensure OpenShift router correctly rolls out (best-effort today) - command: | - {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ - --namespace {{ item.namespace | default('default') }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig - async: 600 - poll: 15 - with_items: "{{ openshift_hosted_routers }}" - failed_when: false - - - name: Determine the latest version of the OpenShift router deployment - command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ - --namespace {{ item.namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .status.latestVersion }' - register: openshift_hosted_routers_latest_version - with_items: "{{ openshift_hosted_routers }}" - - - name: Poll for OpenShift router deployment success - command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ - --namespace {{ item.0.namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' - register: openshift_hosted_router_rc_phase - until: "'Running' not in openshift_hosted_router_rc_phase.stdout" - delay: 15 - retries: 40 - failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" - with_together: - - "{{ openshift_hosted_routers }}" - - "{{ openshift_hosted_routers_latest_version.results }}" +- name: Wait for pod (Routers) + include: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" + l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/secure.yml index a8a6f6fc8..0da8ac8a7 100644 --- a/roles/openshift_hosted/tasks/registry/secure.yml +++ b/roles/openshift_hosted/tasks/secure.yml @@ -1,7 +1,7 @@ --- - name: Configure facts for docker-registry set_fact: - openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}" + openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}" openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}" openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}" @@ -38,11 +38,11 @@ - "{{ docker_registry_service.results.clusterip }}" - "{{ docker_registry_route.results[0].spec.host }}" - "{{ openshift_hosted_registry_name }}.default.svc" - - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}" + - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}" - "{{ openshift_hosted_registry_routehost }}" cert: "{{ docker_registry_cert_path }}" key: "{{ docker_registry_key_path }}" - expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}" + expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift_deployment_type) | bool else omit }}" register: registry_self_cert when: docker_registry_self_signed diff --git a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml b/roles/openshift_hosted/tasks/secure/passthrough.yml index 5b44fda10..5b44fda10 100644 --- a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml +++ b/roles/openshift_hosted/tasks/secure/passthrough.yml diff --git a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml b/roles/openshift_hosted/tasks/secure/reencrypt.yml index 48e5b0fba..48e5b0fba 100644 --- a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml +++ b/roles/openshift_hosted/tasks/secure/reencrypt.yml diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index c2954fde1..c2954fde1 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml index 8553a8098..8553a8098 100644 --- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml +++ b/roles/openshift_hosted/tasks/storage/object_storage.yml diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2 index f3e82ad4f..f3e82ad4f 120000 --- a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 +++ b/roles/openshift_hosted/tasks/storage/registry_config.j2 diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml index 318969885..8e905d905 100644 --- a/roles/openshift_hosted/tasks/registry/storage/s3.yml +++ b/roles/openshift_hosted/tasks/storage/s3.yml @@ -3,7 +3,7 @@ assert: that: - openshift.hosted.registry.storage.s3.bucket | default(none) is not none - - openshift.hosted.registry.storage.s3.region | default(none) is not none + - openshift.hosted.registry.storage.s3.bucket | default(none) is not none msg: | When using S3 storage, the following variables are required: openshift_hosted_registry_storage_s3_bucket diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml new file mode 100644 index 000000000..056c79334 --- /dev/null +++ b/roles/openshift_hosted/tasks/wait_for_pod.yml @@ -0,0 +1,36 @@ +--- +- when: l_openshift_hosted_wait_for_pod | default(False) | bool + block: + - name: Ensure OpenShift pod correctly rolls out (best-effort today) + command: | + {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace | default('default') }} \ + --config {{ openshift_master_config_dir }}/admin.kubeconfig + async: 600 + poll: 15 + with_items: "{{ l_openshift_hosted_wfp_items }}" + failed_when: false + + - name: Determine the latest version of the OpenShift pod deployment + command: | + {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace }} \ + --config {{ openshift_master_config_dir }}/admin.kubeconfig \ + -o jsonpath='{ .status.latestVersion }' + register: l_openshift_hosted_wfp_latest_version + with_items: "{{ l_openshift_hosted_wfp_items }}" + + - name: Poll for OpenShift pod deployment success + command: | + {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + --namespace {{ item.0.namespace }} \ + --config {{ openshift_master_config_dir }}/admin.kubeconfig \ + -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' + register: openshift_hosted_wfp_rc_phase + until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout" + delay: 15 + retries: 40 + failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout" + with_together: + - "{{ l_openshift_hosted_wfp_items }}" + - "{{ l_openshift_hosted_wfp_latest_version.results }}" diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2 index 61da452de..222b63b8a 100644 --- a/roles/openshift_hosted/templates/registry_config.j2 +++ b/roles/openshift_hosted/templates/registry_config.j2 @@ -53,7 +53,7 @@ storage: {% if openshift_hosted_registry_storage_swift_domain is defined %} domain: {{ openshift_hosted_registry_storage_swift_domain }} {% endif -%} -{% if openshift_hosted_registry_storage_swift_domainid %} +{% if openshift_hosted_registry_storage_swift_domainid is defined %} domainid: {{ openshift_hosted_registry_storage_swift_domainid }} {% endif -%} {% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %} @@ -63,17 +63,15 @@ storage: keyfile: /etc/registry/gcs.json {% endif -%} {% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %} - rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }} + rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory | default('/registry') }} {% endif -%} {% endif -%} auth: openshift: realm: openshift middleware: -{% if openshift.common.version_gte_3_3_or_1_3 | bool %} registry: - name: openshift -{% endif %} repository: - name: openshift options: @@ -87,7 +85,7 @@ middleware: baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }} privatekey: /etc/origin/cloudfront.pem keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }} -{% elif openshift.common.version_gte_3_3_or_1_3 | bool %} +{% else %} storage: - name: openshift {% endif -%} diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml index 0821d0e7e..0e756d9e1 100644 --- a/roles/openshift_hosted/vars/main.yml +++ b/roles/openshift_hosted/vars/main.yml @@ -1,13 +1,2 @@ --- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" registry_config_secret_name: registry-config - -openshift_default_projects: - default: - default_node_selector: '' - logging: - default_node_selector: '' - openshift-infra: - default_node_selector: '' - -openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts(openshift_default_projects) }}" diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 631bf3e2a..8fc70cecb 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,16 +1,19 @@ --- +# openshift_*_selector variables have been deprecated in favor of +# openshift_hosted_*_selector variables. - set_fact: - openshift_hosted_router_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined - set_fact: - openshift_hosted_registry_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - name: Set hosted facts openshift_facts: - role: hosted + role: "{{ item }}" openshift_env: "{{ hostvars | oo_merge_hostvars(vars, inventory_hostname) | oo_openshift_env }}" openshift_env_structures: - 'openshift.hosted.router.*' + with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md deleted file mode 100644 index 680303853..000000000 --- a/roles/openshift_hosted_logging/README.md +++ /dev/null @@ -1,40 +0,0 @@ -###Required vars: - -- openshift_hosted_logging_hostname: kibana.example.com -- openshift_hosted_logging_elasticsearch_cluster_size: 1 -- openshift_hosted_logging_master_public_url: https://localhost:8443 - -###Optional vars: -- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/ -- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead -- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software. -- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key -- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3 -- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying. -- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory). -- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead. -- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`. -- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster). -- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534). -- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value) -- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to. -- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true". -- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value) -- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value) -- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs. -- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE* -- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location. -- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources. - -When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some -additional vars. These work the same as above for their non-ops counterparts, -but apply to the OPS cluster instance: -- openshift_hosted_logging_ops_hostname: kibana-ops.example.com -- openshift_hosted_logging_elasticsearch_ops_cluster_size -- openshift_hosted_logging_elasticsearch_ops_instance_ram -- openshift_hosted_logging_elasticsearch_ops_pvc_size -- openshift_hosted_logging_elasticsearch_ops_pvc_prefix -- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic -- openshift_hosted_logging_elasticsearch_ops_nodeselector -- openshift_hosted_logging_kibana_ops_nodeselector -- openshift_hosted_logging_curator_ops_nodeselector diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_logging/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml deleted file mode 100644 index d7e83fe9a..000000000 --- a/roles/openshift_hosted_logging/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml deleted file mode 100644 index 044c8043c..000000000 --- a/roles/openshift_hosted_logging/meta/main.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -dependencies: - - { role: openshift_common } - - { role: openshift_master_facts } diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml deleted file mode 100644 index 70b0d67a4..000000000 --- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Checking for logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging" - register: logging_project - failed_when: "'FAILED' in logging_project.stderr" - -- name: "Changing projects" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging" - - -- name: "Cleanup any previous logging infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}" - with_items: - - kibana - - fluentd - - elasticsearch - ignore_errors: yes - -- name: "Cleanup existing support infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support" - ignore_errors: yes - -- name: "Cleanup existing secrets" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy" - ignore_errors: yes - register: clean_result - failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr - -- name: "Cleanup existing logging deployers" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all" - - -- name: "Cleanup logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging" - - -- name: "Remove deployer template" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift" - register: delete_output - failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr - - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False - -- debug: msg="Success!" diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml deleted file mode 100644 index 78b624109..000000000 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead" - when: target_registry is defined and target_registry - -- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size" - when: "openshift_hosted_logging_hostname is not defined or - openshift_hosted_logging_elasticsearch_cluster_size is not defined or - openshift_hosted_logging_master_public_url is not defined" - -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Check for logging project already exists" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}' - register: logging_project_result - ignore_errors: True - -- name: "Create logging project" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging - when: logging_project_result.stdout == "" - -- name: "Changing projects" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging - -- name: "Creating logging deployer secret" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} - register: secret_output - failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr - -- name: "Create templates for logging accounts and the deployer" - command: > - {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig - -f {{ hosted_base }}/logging-deployer.yaml - --config={{ mktemp.stdout }}/admin.kubeconfig - -n logging - register: logging_import_template - failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0" - changed_when: "'created' in logging_import_template.stdout" - -- name: "Process the logging accounts template" - shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig - process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f - - register: process_deployer_accounts - failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr - -- name: "Set permissions for logging-deployer service account" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer - register: permiss_output - failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr - -- name: "Set permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd_output - failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr - -- name: "Set additional permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig - add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd2_output - failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr - -- name: "Add rolebinding-reader to aggregated-logging-elasticsearch" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user rolebinding-reader \ - system:serviceaccount:logging:aggregated-logging-elasticsearch - register: rolebinding_reader_output - failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr - -- name: "Create ConfigMap for deployer parameters" - command: > - {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} - register: deployer_configmap_output - failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr - -- name: "Process the deployer template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" - register: process_deployer - failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr - -- name: "Wait for image pull and deployer pod" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed" - register: result - until: result.rc == 0 - retries: 20 - delay: 15 - -- name: "Process imagestream template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: process_is - failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr - -- name: "Set insecured registry" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - -- name: "Wait for imagestreams to become available" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: result - until: result.rc == 0 - failed_when: result.rc == 1 and 'not found' not in result.stderr - retries: 20 - delay: 5 - -- name: "Wait for component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es - - kibana - - curator - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for ops component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es-ops - - kibana-ops - - curator-ops - when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for fluentd DaemonSet to exist" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 5 - -- name: "Deploy fluentd by labeling the node" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}" - -- name: "Wait for fluentd to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- include: update_master_config.yaml - -- debug: - msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually" - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml deleted file mode 100644 index 42568597a..000000000 --- a/roles/openshift_hosted_logging/tasks/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Cleanup logging deployment - include: "{{ role_path }}/tasks/cleanup_logging.yaml" - when: openshift_hosted_logging_cleanup | default(false) | bool - -- name: Deploy logging - include: "{{ role_path }}/tasks/deploy_logging.yaml" - when: not openshift_hosted_logging_cleanup | default(false) | bool diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml deleted file mode 100644 index 1122e059c..000000000 --- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Adding Kibana route information to loggingPublicURL - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.loggingPublicURL - yaml_value: "https://{{ logging_hostname }}" - notify: restart master diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml deleted file mode 100644 index 4b350b244..000000000 --- a/roles/openshift_hosted_logging/vars/main.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}" -ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}" -iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}" -oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}" -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}" -kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}" -pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}" -es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}" -es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}" -es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}" -es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}" -es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}" -es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}" -es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}" -es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}" -es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}" -es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}" -es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}" -es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}" -es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}" -fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}" -kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}" -kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}" -cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}" -cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}" -ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}" -journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}" -journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}" -ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}" -deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}" diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml index 11478263c..72754df2e 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml index 0e3d006a7..298f8039e 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml index 28feac4e6..dace26793 100644 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 8bf98ba41..f821efd6b 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index bbaf76c17..019d836fe 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index 9c12865bf..4027f524b 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_common +dependencies: [] diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 41a2b12a2..239b16427 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -24,4 +24,10 @@ r_openshift_loadbalancer_os_firewall_allow: port: "{{ openshift_master_api_port | default(8443) }}/tcp" - service: nuage mon port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" - cond: "{{ openshift_use_nuage | default(false) | bool }}" + cond: "{{ r_openshift_lb_use_nuage | bool }}" + +# NOTE +# r_openshift_lb_use_nuage_default may be defined external to this role. +# openshift_use_nuage, if defined, may affect other roles or play behavior. +r_openshift_lb_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +r_openshift_lb_use_nuage: "{{ r_openshift_lb_use_nuage_default }}" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 70aef02cd..6c5bb8693 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -12,15 +12,14 @@ generation for Elasticsearch (it uses JKS) as well as openssl to sign certificat As part of the installation, it is recommended that you add the Fluentd node selector label to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels). -###Required vars: +### Required vars: - `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging. -- `openshift_logging_upgrade_logging`: When `True` the `openshift_logging` role will upgrade Aggregated Logging. -When both `openshift_logging_install_logging` and `openshift_logging_upgrade_logging` are `False` the `openshift_logging` role will uninstall Aggregated Logging. - -###Optional vars: +When `openshift_logging_install_logging` is set to `False` the `openshift_logging` role will uninstall Aggregated Logging. +### Optional vars: +- `openshift_logging_purge_logging`: When `openshift_logging_install_logging` is set to 'False' to trigger uninstalation and `openshift_logging_purge_logging` is set to 'True', it will completely and irreversibly remove all logging persistent data including PVC. Defaults to 'False'. - `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'. - `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`. - `openshift_logging_elasticsearch_image_prefix`: Setting the image prefix for Elasticsearch image. Defaults to `openshift_logging_image_prefix`. @@ -45,25 +44,24 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'. - `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'. - `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'. -- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'. +- `openshift_logging_curator_cpu_request`: The minimum amount of CPU to allocate to Curator. Default is '100m'. - `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified. - `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land. - `openshift_logging_image_pull_secret`: The name of an existing pull secret to link to the logging service accounts - `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'. -- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. +- `openshift_logging_kibana_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified. - `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. - `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'. -- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. +- `openshift_logging_kibana_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. - `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. - `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect. - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. -- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. +- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'. - `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'. -- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'. - `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver. - `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false. - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. @@ -71,6 +69,9 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m. - `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination. +- `openshift_logging_fluentd_audit_container_engine`: When `openshift_logging_fluentd_audit_container_engine` is set to `True`, the audit log of the container engine will be collected and stored in ES. +- `openshift_logging_fluentd_audit_file`: Location of audit log file. The default is `/var/log/audit/audit.log` +- `openshift_logging_fluentd_audit_pos_file`: Location of fluentd in_tail position file for the audit log file. The default is `/var/log/audit/audit.log.pos` - `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'. - `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'. @@ -79,7 +80,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'. - `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'. -- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set +- `openshift_logging_es_cpu_request`: The minimum amount of CPU to allocate for an ES pod cluster member. Defaults to 1 CPU. - `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'. - `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'. - `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'. @@ -92,6 +93,12 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. - `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'. +- `openshift_logging_install_eventrouter`: Coupled with `openshift_logging_install_logging`. When both are 'True', eventrouter will be installed. When both are 'False', eventrouter will be uninstalled. +Other combinations will keep the eventrouter untouched. + +Detailed eventrouter configuration can be found in +- `roles/openshift_logging_eventrouter/README.md` + When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the same as above for their non-ops counterparts, but apply to the OPS cluster instance: - `openshift_logging_es_ops_host`: logging-es-ops @@ -100,7 +107,7 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta - `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert - `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key - `openshift_logging_es_ops_cluster_size`: 1 -- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set +- `openshift_logging_es_ops_cpu_request`: The minimum amount of CPU to allocate for an ES ops pod cluster member. Defaults to 1 CPU. - `openshift_logging_es_ops_memory_limit`: 8Gi - `openshift_logging_es_ops_pvc_dynamic`: False - `openshift_logging_es_ops_pvc_size`: "" @@ -108,9 +115,9 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta - `openshift_logging_es_ops_recover_after_time`: 5m - `openshift_logging_es_ops_storage_group`: 65534 - `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'. -- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. +- `openshift_logging_kibana_ops_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified. - `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. -- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. +- `openshift_logging_kibana_ops_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1. @@ -165,11 +172,11 @@ Elasticsearch OPS too, if using an OPS cluster: send the raw logs to mux for processing. We do not currently recommend using this mode, and ansible will warn you about this. - `openshift_logging_mux_hostname`: Default is "mux." + - `openshift_master_default_subdomain`. This is the hostname *external*_ + `openshift_master_default_subdomain`. This is the hostname *external* clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 -- `openshift_logging_mux_cpu_limit`: 100m +- `openshift_logging_mux_cpu_request`: 100m - `openshift_logging_mux_memory_limit`: 512Mi - `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the first value in the list is the namespace to use for undefined projects, @@ -195,3 +202,103 @@ Elasticsearch OPS too, if using an OPS cluster: Defaults to 'logging-mux'. - `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux. Defaults to '65534'. + +### remote syslog forwarding +- `openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +- `openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server +- `openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514` +- `openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug` +- `openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0` +- `openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +- `openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +- `openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +- `openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message + +The corresponding openshift\_logging\_mux\_* parameters are below. + +- `openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +- `openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server +- `openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514` +- `openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug` +- `openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0` +- `openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message + +Image update procedure +---------------------- +An upgrade of the logging stack from older version to newer is an automated process and should be performed by calling appropriate ansible playbook and setting required ansible variables in your inventory as documented in https://docs.openshift.org/. + +Following text describes manual update of the logging images without version upgrade. To determine the current version of images being used you can. +``` +oc describe pod | grep 'Image ID:' +``` +This will get the repo digest that can later be compared to the inspected image details. + +A way to determine when was your image last updated: +``` +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +<registry>/openshift3/logging-fluentd v3.7 ff2e249fc45a About an hour ago 235.2 MB + +$ docker inspect ff2e249fc45a +[ + { + . . . + "RepoDigests": [ + "<registry>/openshift3/logging-fluentd@sha256:4346f0aa9694f32735115705ad324803b1a6ff08343c3288f7a62c3a5cb70495" + ], + . . . + "Config": { + . . . + "Labels": { + . . . + "build-date": "2017-10-12T14:38:22.414827", + . . . + "release": "0.143.3.0", + . . . + "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/openshift3/logging-fluentd/images/v3.7.0-0.143.3.0", + . . . + "version": "v3.7.0" + } + }, + . . . +``` + +Pull a new image to see if registry has any newer images with the same tag: +``` +$ docker pull <registry>/openshift3/logging-fluentd:v3.7 +``` + +If there was an update, you need to run the `docker pull` on each node. + +It is recommended that you now rerun the `openshift_logging` playbook to ensure that any necessary config changes are also picked up. + +To manually redeploy your pod you can do the following: +- for a DC you can do: +``` +oc rollout latest <dc_name> +``` + +- for a RC you can scale down and scale back up +``` +oc scale --replicas=0 <rc_name> + +... wait for scale down + +oc scale --replicas=<original_replica_count> <rc_name> +``` + +- for a DS you can delete the pod or unlabel and relabel your node +``` +oc delete pod --selector=<ds_selector> +``` + +Changelog +--------- +Tue Oct 26, 2017 +- Make CPU request equal limit if limit is greater then request + +Tue Oct 10, 2017 +- Default imagePullPolicy changed from Always to IfNotPresent diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index f07d7e6da..626732d16 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -1,14 +1,16 @@ --- -openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}" +openshift_logging_use_ops: False openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" -openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}" openshift_logging_namespace: logging openshift_logging_nodeselector: null openshift_logging_labels: {} openshift_logging_label_key: "" openshift_logging_label_value: "" -openshift_logging_install_logging: True -openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_install_logging: False + +openshift_logging_purge_logging: False +openshift_logging_image_pull_secret: "" openshift_logging_curator_default_days: 30 openshift_logging_curator_run_hour: 0 @@ -16,25 +18,29 @@ openshift_logging_curator_run_minute: 0 openshift_logging_curator_run_timezone: UTC openshift_logging_curator_script_log_level: INFO openshift_logging_curator_log_level: ERROR -openshift_logging_curator_cpu_limit: 100m -openshift_logging_curator_memory_limit: null -openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_cpu_limit: null +openshift_logging_curator_memory_limit: 256Mi +openshift_logging_curator_cpu_request: 100m +openshift_logging_curator_nodeselector: {} -openshift_logging_curator_ops_cpu_limit: 100m -openshift_logging_curator_ops_memory_limit: null -openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_ops_cpu_limit: null +openshift_logging_curator_ops_memory_limit: 256Mi +openshift_logging_curator_ops_cpu_request: 100m +openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi +openshift_logging_kibana_cpu_request: 100m openshift_logging_kibana_proxy_debug: false openshift_logging_kibana_proxy_cpu_limit: null -openshift_logging_kibana_proxy_memory_limit: 96Mi +openshift_logging_kibana_proxy_memory_limit: 256Mi +openshift_logging_kibana_proxy_cpu_request: 100m openshift_logging_kibana_replica_count: 1 openshift_logging_kibana_edge_term_policy: Redirect -openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" -openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_kibana_nodeselector: {} +openshift_logging_kibana_ops_nodeselector: {} #The absolute path on the control node to the cert file to use #for the public facing kibana certs @@ -48,12 +54,14 @@ openshift_logging_kibana_key: "" #for the public facing kibana certs openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi +openshift_logging_kibana_ops_cpu_request: 100m openshift_logging_kibana_ops_proxy_debug: false openshift_logging_kibana_ops_proxy_cpu_limit: null -openshift_logging_kibana_ops_proxy_memory_limit: 96Mi +openshift_logging_kibana_ops_proxy_memory_limit: 256Mi +openshift_logging_kibana_ops_proxy_cpu_request: 100m openshift_logging_kibana_ops_replica_count: 1 #The absolute path on the control node to the cert file to use @@ -68,37 +76,36 @@ openshift_logging_kibana_ops_key: "" #for the public facing ops kibana certs openshift_logging_kibana_ops_ca: "" -openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" -openshift_logging_fluentd_cpu_limit: 100m +openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} +openshift_logging_fluentd_cpu_limit: null openshift_logging_fluentd_memory_limit: 512Mi -openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" -openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" +openshift_logging_fluentd_cpu_request: 100m +openshift_logging_fluentd_journal_source: "" +openshift_logging_fluentd_journal_read_from_head: "" openshift_logging_fluentd_hosts: ['--all'] -openshift_logging_fluentd_buffer_queue_limit: 1024 -openshift_logging_fluentd_buffer_size_limit: 1m +openshift_logging_fluentd_buffer_queue_limit: 32 +openshift_logging_fluentd_buffer_size_limit: 8m openshift_logging_es_host: logging-es openshift_logging_es_port: 9200 openshift_logging_es_ca: /etc/fluent/keys/ca openshift_logging_es_client_cert: /etc/fluent/keys/cert openshift_logging_es_client_key: /etc/fluent/keys/key -openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" -openshift_logging_es_cpu_limit: 1000m +openshift_logging_es_cluster_size: 1 +openshift_logging_es_cpu_limit: null +openshift_logging_es_cpu_request: "1" # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console' openshift_logging_es_log_appenders: ['file'] -openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" -openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}" -openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}" -openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}" -openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" +openshift_logging_es_memory_limit: "8Gi" +openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}" +openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}" +openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" +openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}" openshift_logging_es_recover_after_time: 5m -openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_storage_group: "65534" +openshift_logging_es_nodeselector: {} # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml openshift_logging_es_config: {} -openshift_logging_es_number_of_shards: 1 -openshift_logging_es_number_of_replicas: 0 # for exposing es to external (outside of the cluster) clients openshift_logging_es_allow_external: False @@ -124,16 +131,17 @@ openshift_logging_es_ops_port: 9200 openshift_logging_es_ops_ca: /etc/fluent/keys/ca openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert openshift_logging_es_ops_client_key: /etc/fluent/keys/key -openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" -openshift_logging_es_ops_cpu_limit: 1000m -openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}" -openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}" -openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" -openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}" -openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" +openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}" +openshift_logging_es_ops_cpu_limit: null +openshift_logging_es_ops_memory_limit: 8Gi +openshift_logging_es_ops_cpu_request: "1" +openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}" +openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" +openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}" +openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" openshift_logging_es_ops_recover_after_time: 5m -openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_ops_storage_group: "65534" +openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients openshift_logging_es_ops_allow_external: False @@ -152,15 +160,16 @@ openshift_logging_es_ops_key: "" openshift_logging_es_ops_ca_ext: "" # storage related defaults -openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_logging_storage_access_modes: ['ReadWriteOnce'] # mux - secure_forward listener service openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" openshift_logging_mux_port: 24284 -openshift_logging_mux_cpu_limit: 500m -openshift_logging_mux_memory_limit: 1Gi +openshift_logging_mux_cpu_limit: null +openshift_logging_mux_memory_limit: 512Mi +openshift_logging_mux_cpu_request: 100m # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this openshift_logging_mux_default_namespaces: ["mux-undefined"] diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index eac086e81..e1a5ea726 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -3,6 +3,7 @@ ''' import random +import re def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'): @@ -17,6 +18,47 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'): return dict(kind='emptydir') +def min_cpu(left, right): + '''Return the minimum cpu value of the two values given''' + message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M" + pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$") + millis_per_core = 1000 + if not right: + return left + m_left = pattern.match(left) + if not m_left: + raise RuntimeError(message.format("left", left)) + m_right = pattern.match(right) + if not m_right: + raise RuntimeError(message.format("right", right)) + left_value = float(m_left.group(1)) + right_value = float(m_right.group(1)) + if m_left.group(2) not in ["M", "m"]: + left_value = left_value * millis_per_core + if m_right.group(2) not in ["M", "m"]: + right_value = right_value * millis_per_core + response = left + if left_value != min(left_value, right_value): + response = right + return response + + +def walk(source, path, default, delimiter='.'): + '''Walk the sourch hash given the path and return the value or default if not found''' + if not isinstance(source, dict): + raise RuntimeError('The source is not a walkable dict: {} path: {}'.format(source, path)) + keys = path.split(delimiter) + max_depth = len(keys) + cur_depth = 0 + while cur_depth < max_depth: + if keys[cur_depth] in source: + source = source[keys[cur_depth]] + cur_depth = cur_depth + 1 + else: + return default + return source + + def random_word(source_alpha, length): ''' Returns a random word given the source of characters to pick from and resulting length ''' return ''.join(random.choice(source_alpha) for i in range(length)) @@ -45,6 +87,21 @@ def map_from_pairs(source, delim="="): return dict(item.split(delim) for item in source.split(",")) +def serviceaccount_name(qualified_sa): + ''' Returns the simple name from a fully qualified name ''' + return qualified_sa.split(":")[-1] + + +def serviceaccount_namespace(qualified_sa, default=None): + ''' Returns the namespace from a fully qualified name ''' + seg = qualified_sa.split(":") + if len(seg) > 1: + return seg[-2] + if default: + return default + return seg[-1] + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -56,5 +113,9 @@ class FilterModule(object): 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, 'map_from_pairs': map_from_pairs, - 'es_storage': es_storage + 'min_cpu': min_cpu, + 'es_storage': es_storage, + 'serviceaccount_name': serviceaccount_name, + 'serviceaccount_namespace': serviceaccount_namespace, + 'walk': walk } diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test new file mode 100644 index 000000000..bac25c012 --- /dev/null +++ b/roles/openshift_logging/filter_plugins/test @@ -0,0 +1,49 @@ +import unittest +from openshift_logging import walk +from openshift_logging import min_cpu + +class TestFilterMethods(unittest.TestCase): + + + def test_min_cpu_for_none(self): + source = "1000M" + self.assertEquals(min_cpu(source, None), "1000M") + + def test_min_cpu_for_millis(self): + source = "1" + self.assertEquals(min_cpu(source, "0.1"), "0.1") + + + def test_min_cpu_for_whole(self): + source = "120M" + self.assertEquals(min_cpu(source, "2"), "120M") + + + def test_walk_find_key(self): + source = {'foo': {'bar.xyz': 'myvalue'}} + self.assertEquals(walk(source,'foo#bar.xyz', 123, delimiter='#'), 'myvalue') + + + def test_walk_return_default(self): + source = {'foo': {'bar.xyz': 'myvalue'}} + self.assertEquals(walk(source,'foo#bar.abc', 123, delimiter='#'), 123) + + + def test_walk_limit_max_depth(self): + source = {'foo': {'bar.xyz': 'myvalue'}} + self.assertEquals(walk(source,'foo#bar.abc#dontfindme', 123, delimiter='#'), 123) + + def test_complex_hash(self): + source = { + 'elasticsearch': { + 'configmaps': { + 'logging-elasticsearch': { + 'elasticsearch.yml': "a string value" + } + } + } + } + self.assertEquals(walk(source,'elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', 123, delimiter='#'), "a string value") + +if __name__ == '__main__': + unittest.main() diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 35accfb78..98d0d1c4f 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -171,22 +171,25 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: spec = dc_item["spec"]["template"]["spec"] facts = dict( + name=name, selector=dc_item["spec"]["selector"], replicas=dc_item["spec"]["replicas"], serviceAccount=spec["serviceAccount"], containers=dict(), volumes=dict() ) + if "nodeSelector" in spec: + facts["nodeSelector"] = spec["nodeSelector"] + if "supplementalGroups" in spec["securityContext"]: + facts["storageGroups"] = spec["securityContext"]["supplementalGroups"] + facts["spec"] = spec if "volumes" in spec: for vol in spec["volumes"]: clone = copy.deepcopy(vol) clone.pop("name", None) facts["volumes"][vol["name"]] = clone for container in spec["containers"]: - facts["containers"][container["name"]] = dict( - image=container["image"], - resources=container["resources"], - ) + facts["containers"][container["name"]] = container self.add_facts_for(comp, "deploymentconfigs", name, facts) def facts_for_services(self, namespace): @@ -204,7 +207,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): def facts_for_configmaps(self, namespace): ''' Gathers facts for configmaps in logging namespace ''' self.default_keys_for("configmaps") - a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR]) + a_list = self.oc_command("get", "configmaps", namespace=namespace) if len(a_list["items"]) == 0: return for item in a_list["items"]: diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index d4b33616a..fcb4c94d3 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -14,4 +14,4 @@ content: metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" with_items: "{{ __logging_ops_projects.results }}" - when: "{{ item.results.stderr is not defined }}" + when: item.results.stderr is not defined diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 6d023a02d..ffed956a4 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -14,6 +14,16 @@ - templates - ds +# return all persistent volume claims as well if purge is set +- name: delete logging pvc objects + oc_obj: + state: absent + kind: pvc + namespace: "{{ openshift_logging_namespace }}" + selector: "logging-infra" + when: + - openshift_logging_purge_logging | default(false) | bool + # delete the oauthclient - name: delete oauthclient kibana-proxy oc_obj: @@ -82,6 +92,7 @@ with_items: - rolebinding-reader - daemonset-admin + - prometheus-metrics-viewer # delete our configmaps - name: delete configmaps @@ -95,3 +106,9 @@ - logging-elasticsearch - logging-fluentd - logging-mux + +## EventRouter +- include_role: + name: openshift_logging_eventrouter + when: + not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 9c8f0986a..f526fd734 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -139,10 +139,10 @@ # TODO: make idempotent - name: Generate proxy session - set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} + set_fact: session_secret={{ 200 | oo_random_word}} check_mode: no # TODO: make idempotent - name: Generate oauth client secret - set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} + set_fact: oauth_secret={{ 64 | oo_random_word}} check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index a77df9986..b98e281a3 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -36,6 +36,14 @@ - openshift_logging_label_key != "" - openshift_logging_label_value is defined +- name: Annotate Logging Project to allow overcommit + oc_edit: + kind: ns + name: "{{ openshift_logging_namespace }}" + separator: '#' + content: + metadata#annotations#quota.openshift.io/cluster-resource-override-enabled: "false" + - name: Create logging cert directory file: path: "{{ openshift.common.config_base }}/logging" @@ -69,19 +77,23 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" + openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" + openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" - openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" + openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" + _es_containers: "{{ outer_item.0.containers}}" + _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" + loop_control: + loop_var: outer_item when: - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0 @@ -91,15 +103,15 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" - openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} + loop_control: + loop_var: outer_item - set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }} with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} @@ -123,8 +135,8 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" + openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" + openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" @@ -134,17 +146,27 @@ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" + openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}" openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}" openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}" + openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}" + openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}" + _es_containers: "{{ outer_item.0.containers}}" + _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" + loop_control: + loop_var: outer_item + when: - openshift_logging_use_ops | bool - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0 @@ -155,7 +177,7 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" @@ -165,6 +187,8 @@ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}" openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" @@ -173,6 +197,8 @@ openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}" with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} + loop_control: + loop_var: outer_item when: - openshift_logging_use_ops | bool @@ -204,11 +230,13 @@ openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}" openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}" openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}" + openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}" openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}" openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}" openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}" openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}" openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}" + openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}" openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}" openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}" openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}" @@ -240,6 +268,7 @@ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}" + openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}" openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}" openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}" when: @@ -268,4 +297,12 @@ openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}" openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}" + +## EventRouter +- include_role: + name: openshift_logging_eventrouter + when: + openshift_logging_install_eventrouter | default(false) | bool + + - include: update_master_config.yaml diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index f475024dd..15f6a23e6 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -30,10 +30,11 @@ check_mode: no become: no -- include: "{{ role_path }}/tasks/install_logging.yaml" - when: openshift_logging_install_logging | default(false) | bool +- include: install_logging.yaml + when: + - openshift_logging_install_logging | default(false) | bool -- include: "{{ role_path }}/tasks/delete_logging.yaml" +- include: delete_logging.yaml when: - not openshift_logging_install_logging | default(false) | bool diff --git a/roles/openshift_logging/templates/jks_pod.j2 b/roles/openshift_logging/templates/jks_pod.j2 index 8b1c74211..e4110b7b3 100644 --- a/roles/openshift_logging/templates/jks_pod.j2 +++ b/roles/openshift_logging/templates/jks_pod.j2 @@ -8,7 +8,7 @@ spec: containers: - name: jks-cert-gen image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent command: ["sh", "{{generated_certs_dir}}/generate-jks.sh"] securityContext: privileged: true diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 49e8a18af..f60fa8d7d 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.6') }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml index 17807b644..9cae9f936 100644 --- a/roles/openshift_logging_curator/defaults/main.yml +++ b/roles/openshift_logging_curator/defaults/main.yml @@ -9,8 +9,9 @@ openshift_logging_curator_namespace: logging ### Common settings openshift_logging_curator_nodeselector: "" -openshift_logging_curator_cpu_limit: 100m -openshift_logging_curator_memory_limit: null +openshift_logging_curator_cpu_limit: null +openshift_logging_curator_cpu_request: 100m +openshift_logging_curator_memory_limit: 256Mi openshift_logging_curator_es_host: "logging-es" openshift_logging_curator_es_port: 9200 diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index 6e8fab2b5..fcaf18ed4 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -90,6 +90,7 @@ es_host: "{{ openshift_logging_curator_es_host }}" es_port: "{{ openshift_logging_curator_es_port }}" curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" + curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}" curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}" curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}" curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2 index 6431f86d9..462128366 100644 --- a/roles/openshift_logging_curator/templates/curator.j2 +++ b/roles/openshift_logging_curator/templates/curator.j2 @@ -38,12 +38,27 @@ spec: - name: "curator" image: {{image}} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} resources: +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %} limits: +{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %} cpu: "{{curator_cpu_limit}}" -{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + requests: +{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %} + cpu: "{{curator_cpu_request}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} {% endif %} env: - diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 97525479e..95bf462d1 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_5" -__allowed_curator_versions: ["3_5", "3_6"] +__latest_curator_version: "3_6" +__allowed_curator_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 75bd479be..9fc6fd1d8 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -6,7 +6,8 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_ openshift_logging_elasticsearch_namespace: logging openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}" -openshift_logging_elasticsearch_cpu_limit: 1000m +openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('') }}" +openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_cpu_request | default('1000m') }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}" openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}" @@ -33,13 +34,17 @@ openshift_logging_elasticsearch_pvc_size: "" openshift_logging_elasticsearch_pvc_dynamic: false openshift_logging_elasticsearch_pvc_pv_selector: {} openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce'] -openshift_logging_elasticsearch_storage_group: '65534' +openshift_logging_elasticsearch_storage_group: ['65534'] openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" # config the es plugin to write kibana index based on the index mode openshift_logging_elasticsearch_kibana_index_mode: 'unique' +openshift_logging_elasticsearch_proxy_cpu_request: "100m" +openshift_logging_elasticsearch_proxy_memory_limit: "64Mi" +openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus" + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_elasticsearch_ops_deployment: false diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh deleted file mode 100644 index 339b5a1b2..000000000 --- a/roles/openshift_logging_elasticsearch/files/es_migration.sh +++ /dev/null @@ -1,79 +0,0 @@ -CA=${1:-/etc/openshift/logging/ca.crt} -KEY=${2:-/etc/openshift/logging/system.admin.key} -CERT=${3:-/etc/openshift/logging/system.admin.crt} -openshift_logging_es_host=${4:-logging-es} -openshift_logging_es_port=${5:-9200} -namespace=${6:-logging} - -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# skip indices that contain a uuid -# get a list of unique project -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -function get_list_of_indices() { - curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ - awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ - '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ - sort -u -} - -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# get a list of unique project.uuid -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -function get_list_of_proj_uuid_indices() { - curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ - awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ - '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ - sort -u -} - -if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then - echo "No Elasticsearch pods found running. Cannot update common data model." - exit 1 -fi - -count=$(get_list_of_indices | wc -l) -if [ $count -eq 0 ]; then - echo No matching indices found - skipping update_for_uuid -else - echo Creating aliases for $count index patterns . . . - { - echo '{"actions":[' - get_list_of_indices | \ - while IFS=. read proj ; do - # e.g. make test.uuid.* an alias of test.* so we can search for - # /test.uuid.*/_search and get both the test.uuid.* and - # the test.* indices - uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null) - [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}" - done - echo ']}' - } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" -fi - -count=$(get_list_of_proj_uuid_indices | wc -l) -if [ $count -eq 0 ] ; then - echo No matching indexes found - skipping update_for_common_data_model - exit 0 -fi - -echo Creating aliases for $count index patterns . . . -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# get a list of unique project.uuid -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -{ - echo '{"actions":[' - get_list_of_proj_uuid_indices | \ - while IFS=. read proj uuid ; do - # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for - # /project.test.uuid.*/_search and get both the test.uuid.* and - # the project.test.uuid.* indices - echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}" - done - echo ']}' -} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 1e800b1d6..e7ef443bd 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -17,6 +17,17 @@ - include: determine_version.yaml +- name: Set default image variables based on deployment_type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set elasticsearch_prefix image facts + set_fact: + openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}" + openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}" + # allow passing in a tempdir - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX @@ -37,6 +48,7 @@ # we want to make sure we have all the necessary components here # service account + - name: Create ES service account oc_serviceaccount: state: present @@ -51,7 +63,7 @@ name: "aggregated-logging-elasticsearch" namespace: "{{ openshift_logging_elasticsearch_namespace }}" when: - - openshift_logging_image_pull_secret == '' + - openshift_logging_image_pull_secret == '' # rolebinding reader - copy: @@ -65,7 +77,7 @@ kind: clusterrole namespace: "{{ openshift_logging_elasticsearch_namespace }}" files: - - "{{ tempdir }}/rolebinding-reader.yml" + - "{{ tempdir }}/rolebinding-reader.yml" delete_after: true # SA roles @@ -77,6 +89,38 @@ resource_name: rolebinding-reader user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch" +- oc_adm_policy_user: + state: present + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + resource_kind: cluster-role + resource_name: system:auth-delegator + user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace}}:aggregated-logging-elasticsearch" + +# logging-metrics-reader role +- template: + src: logging-metrics-role.j2 + dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml" + vars: + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + role_namespace: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_namespace(openshift_logging_elasticsearch_namespace) }}" + role_user: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_name }}" + +- name: Create logging-metrics-reader-role + command: > + {{ openshift.common.client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + -n "{{ openshift_logging_elasticsearch_namespace }}" + create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" + register: prometheus_out + check_mode: no + ignore_errors: yes + +- fail: + msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}" + when: + - "prometheus_out.stderr | length > 0" + - "'already exists' not in prometheus_out.stderr" + # View role and binding - name: Generate logging-elasticsearch-view-role template: @@ -87,8 +131,8 @@ roleRef: name: view subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch + - kind: ServiceAccount + name: aggregated-logging-elasticsearch changed_when: no - name: Set logging-elasticsearch-view-role role @@ -98,18 +142,18 @@ kind: rolebinding namespace: "{{ openshift_logging_elasticsearch_namespace }}" files: - - "{{ tempdir }}/logging-elasticsearch-view-role.yaml" + - "{{ tempdir }}/logging-elasticsearch-view-role.yaml" delete_after: true # configmap - assert: that: - - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes + - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}" - assert: that: - - "{{ openshift_logging_es_log_appenders | length > 0 }}" + - "{{ openshift_logging_es_log_appenders | length > 0 }}" msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}" - template: @@ -120,13 +164,17 @@ when: es_logging_contents is undefined changed_when: no +- set_fact: + __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}" + __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}" + - template: src: elasticsearch.yml.j2 dest: "{{ tempdir }}/elasticsearch.yml" vars: allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}" + es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}" + es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}" es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" when: es_config_contents is undefined @@ -161,22 +209,22 @@ name: "logging-elasticsearch" namespace: "{{ openshift_logging_elasticsearch_namespace }}" files: - - name: key - path: "{{ generated_certs_dir }}/logging-es.jks" - - name: truststore - path: "{{ generated_certs_dir }}/truststore.jks" - - name: searchguard.key - path: "{{ generated_certs_dir }}/elasticsearch.jks" - - name: searchguard.truststore - path: "{{ generated_certs_dir }}/truststore.jks" - - name: admin-key - path: "{{ generated_certs_dir }}/system.admin.key" - - name: admin-cert - path: "{{ generated_certs_dir }}/system.admin.crt" - - name: admin-ca - path: "{{ generated_certs_dir }}/ca.crt" - - name: admin.jks - path: "{{ generated_certs_dir }}/system.admin.jks" + - name: key + path: "{{ generated_certs_dir }}/logging-es.jks" + - name: truststore + path: "{{ generated_certs_dir }}/truststore.jks" + - name: searchguard.key + path: "{{ generated_certs_dir }}/elasticsearch.jks" + - name: searchguard.truststore + path: "{{ generated_certs_dir }}/truststore.jks" + - name: admin-key + path: "{{ generated_certs_dir }}/system.admin.key" + - name: admin-cert + path: "{{ generated_certs_dir }}/system.admin.crt" + - name: admin-ca + path: "{{ generated_certs_dir }}/ca.crt" + - name: admin.jks + path: "{{ generated_certs_dir }}/system.admin.jks" # services - name: Set logging-{{ es_component }}-cluster service @@ -190,7 +238,7 @@ labels: logging-infra: 'support' ports: - - port: 9300 + - port: 9300 - name: Set logging-{{ es_component }} service oc_service: @@ -203,8 +251,34 @@ labels: logging-infra: 'support' ports: - - port: 9200 - targetPort: "restapi" + - port: 9200 + targetPort: "restapi" + +- name: Set logging-{{ es_component}}-prometheus service + oc_service: + state: present + name: "logging-{{es_component}}-prometheus" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + labels: + logging-infra: 'support' + ports: + - name: proxy + port: 443 + targetPort: 4443 + selector: + component: "{{ es_component }}-prometheus" + provider: openshift + +- oc_edit: + kind: service + name: "logging-{{es_component}}-prometheus" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + separator: '#' + content: + metadata#annotations#service.alpha.openshift.io/serving-cert-secret-name: "prometheus-tls" + metadata#annotations#prometheus.io/scrape: "true" + metadata#annotations#prometheus.io/scheme: "https" + metadata#annotations#prometheus.io/path: "_prometheus/metrics" - name: Check to see if PVC already exists oc_obj: @@ -218,49 +292,49 @@ # so we check for the presence of 'stderr' to determine if the obj exists or not # the RC for existing and not existing is both 0 - when: - - logging_elasticsearch_pvc.results.stderr is defined - - openshift_logging_elasticsearch_storage_type == "pvc" + - logging_elasticsearch_pvc.results.stderr is defined + - openshift_logging_elasticsearch_storage_type == "pvc" block: - # storageclasses are used by default but if static then disable - # storageclasses with the storageClassName set to "" in pvc.j2 - - name: Creating ES storage template - static - template: - src: pvc.j2 - dest: "{{ tempdir }}/templates/logging-es-pvc.yml" - vars: - obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" - size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" - access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" - pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" - storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" - when: - - not openshift_logging_elasticsearch_pvc_dynamic | bool - - # Storageclasses are used by default if configured - - name: Creating ES storage template - dynamic - template: - src: pvc.j2 - dest: "{{ tempdir }}/templates/logging-es-pvc.yml" - vars: - obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" - size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" - access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" - pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" - when: - - openshift_logging_elasticsearch_pvc_dynamic | bool - - - name: Set ES storage - oc_obj: - state: present - kind: pvc - name: "{{ openshift_logging_elasticsearch_pvc_name }}" - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - files: - - "{{ tempdir }}/templates/logging-es-pvc.yml" - delete_after: true + # storageclasses are used by default but if static then disable + # storageclasses with the storageClassName set to "" in pvc.j2 + - name: Creating ES storage template - static + template: + src: pvc.j2 + dest: "{{ tempdir }}/templates/logging-es-pvc.yml" + vars: + obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" + size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" + access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" + pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" + storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" + when: + - not openshift_logging_elasticsearch_pvc_dynamic | bool + + # Storageclasses are used by default if configured + - name: Creating ES storage template - dynamic + template: + src: pvc.j2 + dest: "{{ tempdir }}/templates/logging-es-pvc.yml" + vars: + obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" + size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" + access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" + pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" + when: + - openshift_logging_elasticsearch_pvc_dynamic | bool + + - name: Set ES storage + oc_obj: + state: present + kind: pvc + name: "{{ openshift_logging_elasticsearch_pvc_name }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + files: + - "{{ tempdir }}/templates/logging-es-pvc.yml" + delete_after: true - set_fact: - es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}" + es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" when: openshift_logging_elasticsearch_deployment_name == "" - set_fact: @@ -278,9 +352,13 @@ logging_component: elasticsearch deploy_name: "{{ es_deploy_name }}" image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}" - es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}" + proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}" + es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}" + es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}" es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" + es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}" + es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}" deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 @@ -291,7 +369,7 @@ namespace: "{{ openshift_logging_elasticsearch_namespace }}" kind: dc files: - - "{{ tempdir }}/templates/logging-es-dc.yml" + - "{{ tempdir }}/templates/logging-es-dc.yml" delete_after: true - name: Retrieving the cert to use when generating secrets for the {{ es_component }} component @@ -299,37 +377,37 @@ src: "{{ generated_certs_dir }}/{{ item.file }}" register: key_pairs with_items: - - { name: "ca_file", file: "ca.crt" } - - { name: "es_key", file: "system.logging.es.key" } - - { name: "es_cert", file: "system.logging.es.crt" } + - { name: "ca_file", file: "ca.crt" } + - { name: "es_key", file: "system.logging.es.key" } + - { name: "es_cert", file: "system.logging.es.crt" } when: openshift_logging_es_allow_external | bool - set_fact: es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}" when: - - openshift_logging_es_key | trim | length > 0 - - openshift_logging_es_allow_external | bool + - openshift_logging_es_key | trim | length > 0 + - openshift_logging_es_allow_external | bool changed_when: false - set_fact: es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}" when: - - openshift_logging_es_cert | trim | length > 0 - - openshift_logging_es_allow_external | bool + - openshift_logging_es_cert | trim | length > 0 + - openshift_logging_es_allow_external | bool changed_when: false - set_fact: es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}" when: - - openshift_logging_es_ca_ext | trim | length > 0 - - openshift_logging_es_allow_external | bool + - openshift_logging_es_ca_ext | trim | length > 0 + - openshift_logging_es_allow_external | bool changed_when: false - set_fact: es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}" when: - - es_ca is not defined - - openshift_logging_es_allow_external | bool + - es_ca is not defined + - openshift_logging_es_allow_external | bool changed_when: false - name: Generating Elasticsearch {{ es_component }} route template @@ -360,7 +438,7 @@ namespace: "{{ openshift_logging_elasticsearch_namespace }}" kind: route files: - - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml" + - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml" when: openshift_logging_es_allow_external | bool ## Placeholder for migration when necessary ## diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 index 0c06a7677..65b08d970 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -24,7 +24,8 @@ network: cloud: kubernetes: - service: ${SERVICE_DNS} + pod_label: ${POD_LABEL} + pod_port: 9300 namespace: ${NAMESPACE} discovery: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index cbe6b89f2..0c7d8b46e 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -29,7 +29,9 @@ spec: serviceAccountName: aggregated-logging-elasticsearch securityContext: supplementalGroups: - - {{openshift_logging_elasticsearch_storage_group}} +{% for group in es_storage_groups %} + - {{group}} +{% endfor %} {% if es_node_selector is iterable and es_node_selector | length > 0 %} nodeSelector: {% for key, value in es_node_selector.iteritems() %} @@ -37,18 +39,56 @@ spec: {% endfor %} {% endif %} containers: + - name: proxy + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | oo_random_word | b64encode }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - name: "elasticsearch" image: {{image}} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent resources: limits: - memory: "{{es_memory_limit}}" -{% if es_cpu_limit is defined and es_cpu_limit is not none %} +{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %} cpu: "{{es_cpu_limit}}" {% endif %} + memory: "{{es_memory_limit}}" requests: - memory: "512Mi" + cpu: "{{es_cpu_request}}" + memory: "{{es_memory_limit}}" +{% if es_container_security_context %} + securityContext: {{ es_container_security_context | to_yaml }} +{% endif %} ports: - containerPort: 9200 @@ -90,12 +130,21 @@ spec: name: "RECOVER_AFTER_TIME" value: "{{openshift_logging_elasticsearch_recover_after_time}}" - + name: "READINESS_PROBE_TIMEOUT" + value: "30" + - + name: "POD_LABEL" + value: "component={{component}}" + - name: "IS_MASTER" value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" - name: "HAS_DATA" value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" + - + name: "PROMETHEUS_USER" + value: "{{openshift_logging_elasticsearch_prometheus_sa}}" volumeMounts: - name: elasticsearch @@ -106,7 +155,17 @@ spec: readOnly: true - name: elasticsearch-storage mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/java/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 10 + timeoutSeconds: 30 + periodSeconds: 5 volumes: + - name: proxy-tls + secret: + secretName: prometheus-tls - name: elasticsearch secret: secretName: logging-elasticsearch diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 new file mode 100644 index 000000000..d9800e5a5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: List +items: +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: prometheus-metrics-viewer + namespace: {{ namespace }} + rules: + - apiGroups: + - metrics.openshift.io + resources: + - prometheus + verbs: + - view +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + name: prometheus-metrics-viewer + namespace: {{ namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-metrics-viewer + subjects: + - kind: ServiceAccount + namespace: {{ role_namespace }} + name: {{ role_user }} diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml new file mode 100644 index 000000000..b7d105caf --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/" +__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 20fa63543..09e2ee4d0 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_5" -__allowed_es_versions: ["3_5", "3_6"] +__latest_es_version: "3_6" +__allowed_es_versions: ["3_5", "3_6", "3_7"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml new file mode 100644 index 000000000..2fd960bb5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_elasticsearch_proxy_image_version: "v3.7" diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md new file mode 100644 index 000000000..611bdaee0 --- /dev/null +++ b/roles/openshift_logging_eventrouter/README.md @@ -0,0 +1,20 @@ +Event router +------------ + +A pod forwarding kubernetes events to EFK aggregated logging stack. + +- **eventrouter** is deployed to default project, has a service account and its own role to read events +- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT +- **fluentd** ingests as logs from **eventrouter** container (as it would any other container), and writes them to the appropriate index for the **eventrouter**'s namespace (in the 'default' namespace, the *.operations* index is used) + +- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled. + +Configuration variables: + +- `openshift_logging_eventrouter_image_prefix`: The prefix for the eventrouter logging image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'. +- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'. +- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. +- `openshift_logging_eventrouter_cpu_request`: The minimum amount of CPU to allocate to eventrouter. Defaults to '100m'. +- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'. +- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'. diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml new file mode 100644 index 000000000..4c0350c98 --- /dev/null +++ b/roles/openshift_logging_eventrouter/defaults/main.yaml @@ -0,0 +1,10 @@ +--- +openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}" +openshift_logging_eventrouter_replicas: 1 +openshift_logging_eventrouter_sink: stdout +openshift_logging_eventrouter_nodeselector: "" +openshift_logging_eventrouter_cpu_limit: null +openshift_logging_eventrouter_cpu_request: 100m +openshift_logging_eventrouter_memory_limit: 128Mi +openshift_logging_eventrouter_namespace: default diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml new file mode 100644 index 000000000..cc01c010d --- /dev/null +++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml @@ -0,0 +1,103 @@ +# this openshift template should match (except nodeSelector) jinja2 template in +# ../templates/eventrouter-template.j2 +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml new file mode 100644 index 000000000..cf0abbde9 --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml @@ -0,0 +1,40 @@ +--- +# delete eventrouter +- name: Delete EventRouter service account + oc_serviceaccount: + state: absent + name: "aggregated-logging-eventrouter" + namespace: "{{ openshift_logging_eventrouter_namespace }}" + +- name: Delete event-reader cluster role + oc_clusterrole: + state: absent + name: event-reader + +- name: Unset privileged permissions for EventRouter + oc_adm_policy_user: + namespace: "{{ openshift_logging_eventrouter_namespace }}" + resource_kind: cluster-role + resource_name: event-reader + state: absent + user: "system:serviceaccount:{{ openshift_logging_eventrouter_namespace }}:aggregated-logging-eventrouter" + +- name: Delete EventRouter configmap + oc_configmap: + state: absent + name: logging-eventrouter + namespace: "{{ openshift_logging_eventrouter_namespace }}" + +- name: Delete EventRouter DC + oc_obj: + state: absent + name: logging-eventrouter + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: dc + +- name: Delete EventRouter Template + oc_obj: + state: absent + name: eventrouter-template + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: template diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml new file mode 100644 index 000000000..cbbc6a8ec --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml @@ -0,0 +1,59 @@ +--- +# initial checks +- assert: + msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed + that: openshift_logging_eventrouter_sink in __eventrouter_sinks + +# allow passing in a tempdir +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX + register: mktemp + changed_when: False + +- set_fact: + tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory + file: + state: directory + path: "{{ tempdir }}/templates" + mode: 0755 + changed_when: False + +# create EventRouter deployment config +- name: Generate EventRouter template + template: + src: eventrouter-template.j2 + dest: "{{ tempdir }}/templates/eventrouter-template.yaml" + vars: + node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}" + +- name: Create EventRouter template + oc_obj: + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: template + name: eventrouter-template + state: present + files: + - "{{ tempdir }}/templates/eventrouter-template.yaml" + +- name: Process EventRouter template + oc_process: + state: present + template_name: eventrouter-template + namespace: "{{ openshift_logging_eventrouter_namespace }}" + params: + IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}" + REPLICAS: "{{ openshift_logging_eventrouter_replicas }}" + CPU: "{{ openshift_logging_eventrouter_cpu_request }}" + MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}" + NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}" + SINK: "{{ openshift_logging_eventrouter_sink }}" + +## Placeholder for migration when necessary ## + +- name: Delete temp directory + file: + name: "{{ tempdir }}" + state: absent + changed_when: False diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml new file mode 100644 index 000000000..58e5a559f --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include: "{{ role_path }}/tasks/install_eventrouter.yaml" + when: openshift_logging_install_eventrouter | default(false) | bool + +- include: "{{ role_path }}/tasks/delete_eventrouter.yaml" + when: not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 new file mode 100644 index 000000000..5a4f7f762 --- /dev/null +++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 @@ -0,0 +1,109 @@ +# this jinja2 template should always match (except nodeSelector) openshift template in +# ../files/eventrouter-template.yaml +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter +{% if node_selector is iterable and node_selector | length > 0 %} + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: ${MEMORY} + requires: + cpu: ${CPU} + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/vars/main.yaml b/roles/openshift_logging_eventrouter/vars/main.yaml new file mode 100644 index 000000000..bdf561fe3 --- /dev/null +++ b/roles/openshift_logging_eventrouter/vars/main.yaml @@ -0,0 +1,2 @@ +--- +__eventrouter_sinks: ["glog", "stdout"] diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 30d3d854a..861935c99 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -8,7 +8,8 @@ openshift_logging_fluentd_namespace: logging ### Common settings openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" -openshift_logging_fluentd_cpu_limit: 100m +openshift_logging_fluentd_cpu_limit: null +openshift_logging_fluentd_cpu_request: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_hosts: ['--all'] @@ -50,11 +51,13 @@ openshift_logging_fluentd_aggregating_key_path: none openshift_logging_fluentd_aggregating_passphrase: none ### Deprecating in 3.6 -openshift_logging_fluentd_es_copy: false - # following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly #fluentd_config_contents: #fluentd_throttle_contents: #fluentd_secureforward_contents: -openshift_logging_fluentd_file_buffer_limit: 1Gi +openshift_logging_fluentd_file_buffer_limit: 256Mi + +# Configure fluentd to tail audit log file and filter out container engine's logs from there +# These logs are then stored in ES operation index +openshift_logging_fluentd_audit_container_engine: False diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 74b4d7db4..2f89c3f9f 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -1,5 +1,8 @@ --- - fail: + msg: The ES_COPY feature is no longer supported. Please remove the variable from your inventory + when: openshift_logging_fluentd_es_copy is defined +- fail: msg: Only one Fluentd nodeselector key pair should be provided when: openshift_logging_fluentd_nodeselector.keys() | count > 1 @@ -105,7 +108,6 @@ src: secure-forward.conf dest: "{{ tempdir }}/secure-forward.conf" when: fluentd_secureforward_contents is undefined - changed_when: no - copy: @@ -152,7 +154,6 @@ path: "{{ generated_certs_dir }}/system.logging.fluentd.crt" # create Fluentd daemonset - # this should change based on the type of fluentd deployment to be done... # TODO: pass in aggregation configurations - name: Generate logging-fluentd daemonset definition @@ -170,6 +171,12 @@ ops_port: "{{ openshift_logging_fluentd_ops_port }}" fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" + fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" + fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}" + fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}" + audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}" + audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}" + audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}" check_mode: no changed_when: no diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 index 46de94d60..6e07b403a 100644 --- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 +++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 @@ -49,7 +49,9 @@ @include configs.d/openshift/filter-viaq-data-model.conf @include configs.d/openshift/filter-post-*.conf ## +</label> +<label @OUTPUT> ## matches @include configs.d/openshift/output-pre-*.conf @include configs.d/openshift/output-operations.conf diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 88e039e3f..10283316c 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -29,13 +29,30 @@ spec: containers: - name: "{{ daemonset_container_name }}" image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}" - imagePullPolicy: Always + imagePullPolicy: IfNotPresent securityContext: privileged: true +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} resources: +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %} limits: - cpu: {{ openshift_logging_fluentd_cpu_limit }} - memory: {{ openshift_logging_fluentd_memory_limit }} +{% if fluentd_cpu_limit is not none %} + cpu: "{{fluentd_cpu_limit}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + requests: +{% if fluentd_cpu_request is not none %} + cpu: "{{fluentd_cpu_request}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} volumeMounts: - name: runlogjournal mountPath: /run/log/journal @@ -64,7 +81,9 @@ spec: readOnly: true - name: filebufferstorage mountPath: /var/lib/fluentd -{% if openshift_logging_mux_client_mode is defined %} +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} - name: muxcerts mountPath: /etc/fluent/muxkeys readOnly: true @@ -92,8 +111,6 @@ spec: value: "{{ openshift_logging_fluentd_ops_client_key }}" - name: "OPS_CA" value: "{{ openshift_logging_fluentd_ops_ca }}" - - name: "ES_COPY" - value: "false" - name: "JOURNAL_SOURCE" value: "{{ openshift_logging_fluentd_journal_source | default('') }}" - name: "JOURNAL_READ_FROM_HEAD" @@ -113,11 +130,85 @@ spec: containerName: "{{ daemonset_container_name }}" resource: limits.memory - name: "FILE_BUFFER_LIMIT" - value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}" -{% if openshift_logging_mux_client_mode is defined %} + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}" +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} - name: "MUX_CLIENT_MODE" value: "{{ openshift_logging_mux_client_mode }}" {% endif %} +{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} + - name: "TRANSFORM_EVENTS" + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_fluentd_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_fluentd_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" +{% endif %} + +{% if audit_container_engine %} + - name: "AUDIT_CONTAINER_ENGINE" + value: "{{ audit_container_engine | lower }}" +{% endif %} + +{% if audit_container_engine %} + - name: "NODE_NAME" + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + +{% if audit_log_file != '' %} + - name: AUDIT_FILE + value: "{{ audit_log_file }}" +{% endif %} + +{% if audit_pos_log_file != '' %} + - name: AUDIT_POS_FILE + value: "{{ audit_pos_log_file }}" +{% endif %} + volumes: - name: runlogjournal hostPath: @@ -146,7 +237,9 @@ spec: - name: dockerdaemoncfg hostPath: path: /etc/docker -{% if openshift_logging_mux_client_mode is defined %} +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} - name: muxcerts secret: secretName: logging-mux diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index ec8e565c3..92a426952 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_5" -__allowed_fluentd_versions: ["3_5", "3_6"] +__latest_fluentd_version: "3_6" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index ee265bb14..1366e96cd 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -9,6 +9,7 @@ openshift_logging_kibana_namespace: logging openshift_logging_kibana_nodeselector: "" openshift_logging_kibana_cpu_limit: null +openshift_logging_kibana_cpu_request: 100m openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" @@ -28,6 +29,7 @@ openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_kibana_proxy_debug: false openshift_logging_kibana_proxy_cpu_limit: null +openshift_logging_kibana_proxy_cpu_request: 100m openshift_logging_kibana_proxy_memory_limit: 256Mi #The absolute path on the control node to the cert file to use diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index e17e8c1f2..8ef8ede9a 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -230,8 +230,10 @@ es_host: "{{ openshift_logging_kibana_es_host }}" es_port: "{{ openshift_logging_kibana_es_port }}" kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" + kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}" kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}" kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}" + kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}" kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 512d99d06..4ff86729a 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -37,16 +37,27 @@ spec: - name: "kibana" image: {{ image }} - imagePullPolicy: Always -{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} + imagePullPolicy: IfNotPresent +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} resources: +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} limits: -{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} +{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} cpu: "{{ kibana_cpu_limit }}" -{% endif %} -{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} memory: "{{ kibana_memory_limit }}" -{% endif %} +{% endif %} +{% endif %} +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + requests: +{% if kibana_cpu_request is not none and kibana_cpu_request != "" %} + cpu: "{{ kibana_cpu_request }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} {% endif %} env: - name: "ES_HOST" @@ -73,16 +84,27 @@ spec: - name: "kibana-proxy" image: {{ proxy_image }} - imagePullPolicy: Always -{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} + imagePullPolicy: IfNotPresent +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} resources: +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} limits: -{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} +{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} cpu: "{{ kibana_proxy_cpu_limit }}" -{% endif %} -{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} memory: "{{ kibana_proxy_memory_limit }}" -{% endif %} +{% endif %} +{% endif %} +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + requests: +{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %} + cpu: "{{ kibana_proxy_cpu_request }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} {% endif %} ports: - diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index 87b281c4b..241877a02 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_5" -__allowed_kibana_versions: ["3_5", "3_6"] +__latest_kibana_version: "3_6" +__allowed_kibana_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 68412aec8..9de686576 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -9,10 +9,11 @@ openshift_logging_mux_namespace: logging ### Common settings openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" -openshift_logging_mux_cpu_limit: 500m -openshift_logging_mux_memory_limit: 2Gi -openshift_logging_mux_buffer_queue_limit: 1024 -openshift_logging_mux_buffer_size_limit: 1m +openshift_logging_mux_cpu_limit: null +openshift_logging_mux_cpu_request: 100m +openshift_logging_mux_memory_limit: 512Mi +openshift_logging_mux_buffer_queue_limit: 32 +openshift_logging_mux_buffer_size_limit: 8m openshift_logging_mux_replicas: 1 @@ -57,11 +58,11 @@ openshift_logging_mux_file_buffer_storage_type: "emptydir" openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc" # required if the PVC does not already exist -openshift_logging_mux_file_buffer_pvc_size: 4Gi +openshift_logging_mux_file_buffer_pvc_size: 1Gi openshift_logging_mux_file_buffer_pvc_dynamic: false openshift_logging_mux_file_buffer_pvc_pv_selector: {} openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce'] openshift_logging_mux_file_buffer_storage_group: '65534' openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux" -openshift_logging_mux_file_buffer_limit: 2Gi +openshift_logging_mux_file_buffer_limit: 256Mi diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf index aeaa705ee..bf61c9811 100644 --- a/roles/openshift_logging_mux/files/fluent.conf +++ b/roles/openshift_logging_mux/files/fluent.conf @@ -25,7 +25,9 @@ @include configs.d/openshift/filter-viaq-data-model.conf @include configs.d/openshift/filter-post-*.conf ## +</label> +<label @OUTPUT> ## matches @include configs.d/openshift/output-pre-*.conf @include configs.d/openshift/output-operations.conf diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 2ec863afa..5b257139e 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -171,6 +171,7 @@ ops_host: "{{ openshift_logging_mux_ops_host }}" ops_port: "{{ openshift_logging_mux_ops_port }}" mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}" + mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}" mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}" mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}" mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index 70afe5cee..cfb13d59b 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -36,16 +36,27 @@ spec: containers: - name: "mux" image: {{image}} - imagePullPolicy: Always -{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} + imagePullPolicy: IfNotPresent +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} resources: +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} limits: -{% if mux_cpu_limit is not none %} +{% if mux_cpu_limit is not none %} cpu: "{{mux_cpu_limit}}" -{% endif %} -{% if mux_memory_limit is not none %} +{% endif %} +{% if mux_memory_limit is not none %} memory: "{{mux_memory_limit}}" -{% endif %} +{% endif %} +{% endif %} +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + requests: +{% if mux_cpu_request is not none %} + cpu: "{{mux_cpu_request}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} {% endif %} ports: - containerPort: "{{ openshift_logging_mux_port }}" @@ -117,6 +128,52 @@ spec: resource: limits.memory - name: "FILE_BUFFER_LIMIT" value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" + +{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_mux_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_mux_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_mux_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_mux_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_mux_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" +{% endif %} + volumes: - name: config configMap: diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index 4234b74e2..e7b57f4b5 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_5" -__allowed_mux_versions: ["3_5", "3_6"] +__latest_mux_version: "3_6" +__allowed_mux_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index 7789d2232..088d0b171 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -1,8 +1,4 @@ --- -- fail: - msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." - when: not openshift.common.version_gte_3_1_or_1_1 | bool - - name: Add Management Infrastructure project oc_project: name: management-infra @@ -61,4 +57,3 @@ resource_kind: "{{ item.resource_kind }}" user: "{{ item.user }}" with_items: "{{manage_iq_openshift_3_2_tasks}}" - when: openshift.common.version_gte_3_2_or_1_2 | bool diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 7ccc2fc3b..f142f89f0 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -3,6 +3,9 @@ manage_iq_tasks: - resource_kind: role resource_name: admin user: management-admin +- resource_kind: role + resource_name: admin + user: system:serviceaccount:management-infra:management-admin - resource_kind: cluster-role resource_name: management-infra-admin user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md new file mode 100644 index 000000000..05ca27913 --- /dev/null +++ b/roles/openshift_management/README.md @@ -0,0 +1,582 @@ +# CloudForms Availability + +As noted in [Limitations - Product Choice](#product-choice), +[CloudForms](https://www.redhat.com/en/technologies/management/cloudforms) +(CFME) 4.6 is not yet released. Until such time, this role is limited +to installing [ManageIQ](http://manageiq.org) (MIQ), the open source +project that CFME is based on. + +After CFME 4.6 is available to customers this role will enable +(optional) logic which will install CFME or MIQ based on your +deployment type (`openshift_deployment_type`): + +* `openshift-enterprise` → CloudForms +* `origin` → ManageIQ + + +# Table of Contents + + * [Introduction](#introduction) + * [Important Notes](#important-notes) + * [Requirements](#requirements) + * [Role Variables](#role-variables) + * [Getting Started](#getting-started) + * [All Defaults](#all-defaults) + * [External NFS Storage](#external-nfs-storage) + * [Override PV sizes](#override-pv-sizes) + * [Override Memory Requirements](#override-memory-requirements) + * [External PostgreSQL Database](#external-postgresql-database) + * [Limitations](#limitations) + * [Product Choice](#product-choice) + * [Configuration](#configuration) + * [Database](#database) + * [Podified](#podified) + * [External](#external) + * [Storage Classes](#storage-classes) + * [NFS (Default)](#nfs-default) + * [NFS External](#nfs-external) + * [Cloud Provider](#cloud-provider) + * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only) + * [Customization](#customization) + * [Container Provider](#container-provider) + * [Manually](#manually) + * [Automatically](#automatically) + * [Multiple Providers](#multiple-providers) + * [Uninstall](#uninstall) + * [Additional Information](#additional-information) + +# Introduction + +This role will allow a user to install CFME 4.6 or MIQ on an OCP +3.7 cluster. The role provides customization options for overriding +default deployment parameters. This role allows the user to deploy +different installation flavors: + +* **Fully Podified** - In this way all application services are ran as + pods in the container platform. +* **External Database** - In this way the application utilizes an + externally hosted database server. All other services are ran in the + container platform. + +This role includes the following storage class options: + +* NFS - **Default** - local, on cluster +* NFS External - NFS somewhere else, like a storage appliance +* Cloud Provider - Use automatic storage provisioning from your cloud + provider (*gce* or *aws*) +* Preconfigured - **expert only**, assumes you created everything ahead + of time + +You may skip ahead to the [Getting Started](#getting-started) section +now for examples of how to set up your Ansible inventory for various +deployment configurations. However, you are **strongly urged** to +first read through the [Configuration](#configuration) and +[Customization](#customization) sections as well as the following +[Important Notes](#important-notes). + +## Important Notes + +Not all parameters are present in **both** template versions (podified +db and external db). For example, while the podified database template +has a `POSTGRESQL_MEM_REQ` parameter, no such parameter is present in +the external db template, as there is no need for this information due +to there being no databases that require pods. + +*Be extra careful* if you are overriding template +parameters. Including parameters not defined in a template **will +cause errors**. If you do receive an error during the `Ensure the CFME +App is created` task, we recommend running the +[uninstall scripts](#uninstall) first before running the installer +again. + + +# Requirements + +The **default** requirements are listed in the table below. These can +be overridden through customization parameters (See +[Customization](#customization), below). + +**Note** that the application performance will suffer, or possibly +even fail to deploy, if these requirements are not satisfied. + + +| Item | Requirement | Description | Customization Parameter | +|---------------------|---------------|----------------------------------------------|-------------------------------| +| Application Memory | `≥ 4.0 Gi` | Minimum required memory for the application | `APPLICATION_MEM_REQ` | +| Application Storage | `≥ 5.0 Gi` | Minimum PV size required for the application | `APPLICATION_VOLUME_CAPACITY` | +| PostgreSQL Memory | `≥ 6.0 Gi` | Minimum required memory for the database | `POSTGRESQL_MEM_REQ` | +| PostgreSQL Storage | `≥ 15.0 Gi` | Minimum PV size required for the database | `DATABASE_VOLUME_CAPACITY` | +| Cluster Hosts | `≥ 3` | Number of hosts in your cluster | | + +The implications of this table are summarized below: + +* You need several cluster nodes +* Your cluster nodes must have lots of memory available +* You will need several GiB's of storage available, either locally or + on your cloud provider +* PV sizes can be changed by providing override values to template + parameters (see also: [Customization](#customization)) + +# Role Variables + +The following is a table of the publicly exposed variables that may be +used in your Ansible inventory to control the behavior of this +installer. + + +| Variable | Required | Default | Description | +|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| +| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | +| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. | +| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) | +| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) | +| **PRODUCT CHOICE** | | | | | +| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> | +| **STORAGE CLASSES** | | | | | +| `openshift_management_storage_class` | **No** | `nfs` | Storage type to use, choices: <ul><li>`nfs` - Best used for proof-of-concept installs. Will setup NFS on a cluster host (defaults to your first master in the inventory file) to back the required PVCs. The application requires a PVC and the database (which may be hosted externally) may require a second. PVC minimum required sizes are 5GiB for the MIQ application, and 15GiB for the PostgreSQL database (20GiB minimum available space on a volume/partition if used specifically for NFS purposes)</li> <li>`nfs_external` - You are using an external NFS server, such as a netapp appliance. See the [Configuration - Storage Classes](#storage-classes) section below for required information.</li> <li>`preconfigured` - This CFME role will do NOTHING to modify storage settings. This option assumes expert knowledge and that you have done everything required ahead of time.</li> <li>`cloudprovider` - You are using an OCP cloudprovider integration for your storage class. For this to work you must have already configured the required inventory parameters for your cloud provider. Ensure `openshift_cloudprovider_kind` is defined (aws or gce) and that the applicable cloudprovider parameters are provided. | +| `openshift_management_storage_nfs_external_hostname` | **No** | `false` | If you are using an *external NFS server*, such as a netapp appliance, then you must set the hostname here. Leave the value as `false` if you are not using external NFS. <br /> *Additionally*: **External NFS REQUIRES** that you create the NFS exports that will back the application PV and optionally the database PV. +| `openshift_management_storage_nfs_base_dir` | **No** | `/exports/` | If you are using **External NFS** then you may set the base path to the exports location here. <br />**Local NFS Note**: You *may* also change this value if you want to change the default path used for local NFS exports. | +| `openshift_management_storage_nfs_local_hostname` | **No** | `false` | If you do not have an `[nfs]` group in your inventory, or want to simply manually define the local NFS host in your cluster, set this parameter to the hostname of the preferred NFS server. The server must be a part of your OCP/Origin cluster. | +| **CUSTOMIZATION OPTIONS** | | | | | +| `openshift_management_template_parameters` | **No** | `{}` | A dictionary of any parameters you want to override in the application/pv templates. + +* <sup>[1]</sup> The `cfme-template`s will be available and + automatically detected once CFME 4.6 is released + + +# Getting Started + +Below are some inventory snippets that can help you get started right +away. + +If you want to install CFME/MIQ at the same time you install your +OCP/Origin cluster, ensure that `openshift_management_install_management` is set +to `true` in your inventory. Call the standard +`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ +installation. + +If you are installing CFME/MIQ on an *already provisioned cluster* +then you can call the CFME/MIQ playbook directly: + +``` +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml +``` + +*Note: Use `miq-template` in the following examples for ManageIQ installs* + +## All Defaults + +This example is the simplest. All of the default values and choices +are used. This will result in a fully podified CFME installation. All +application components, as well as the PostgreSQL database will be +created as pods in the container platform. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +``` + +## External NFS Storage + +This is as the previous example, except that instead of using local +NFS services in the cluster it will use an external NFS server (such +as a storage appliance). Note the two new parameters: + +* `openshift_management_storage_class` - set to `nfs_external` +* `openshift_management_storage_nfs_external_hostname` - set to the hostname + of the NFS server + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +openshift_management_storage_class=nfs_external +openshift_management_storage_nfs_external_hostname=nfs.example.com +``` + +If the external NFS host exports directories under a different parent +directory, such as `/exports/hosted/prod` then we would add an +additional parameter, `openshift_management_storage_nfs_base_dir`: + +```ini +# ... +openshift_management_storage_nfs_base_dir=/exports/hosted/prod +``` + +## Override PV sizes + +This example will override the PV sizes. Note that we set the PV sizes +in the template parameters, `openshift_management_template_parameters`. This +ensures that the application/db will be able to make claims on created +PVs without clobbering each other. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +openshift_management_template_parameters={'APPLICATION_VOLUME_CAPACITY': '10Gi', 'DATABASE_VOLUME_CAPACITY': '25Gi'} +``` + +## Override Memory Requirements + +In a test or proof-of-concept installation you may need to reduce the +application/database memory requirements to fit within your +capacity. Note that reducing memory limits can result in reduced +performance or a complete failure to initialize the application. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template +openshift_management_template_parameters={'APPLICATION_MEM_REQ': '3000Mi', 'POSTGRESQL_MEM_REQ': '1Gi', 'ANSIBLE_MEM_REQ': '512Mi'} +``` + +Here we have instructed the installer to process the application +template with the parameter `APPLICATION_MEM_REQ` set to `3000Mi`, +`POSTGRESQL_MEM_REQ` set to `1Gi`, and `ANSIBLE_MEM_REQ` set to +`512Mi`. + +These parameters can be combined with the PV size override parameters +displayed in the previous example. + +## External PostgreSQL Database + +To use an external database you must change the +`openshift_management_app_template` parameter value to `miq-template-ext-db` +or `cfme-template-ext-db`. + +Additionally, database connection information **must** be supplied in +the `openshift_management_template_parameters` customization parameter. See +[Customization - Database - External](#external) for more +information. + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template-ext-db +openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'} +``` + +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. + +# Limitations + +This release is the first OpenShift CFME release in the OCP 3.7 +series. It is not complete yet. + +## Product Choice + +Due to staggered release dates, **CFME support is not +integrated**. Presently this role will only deploy a ManageIQ +installation. This role will be updated once CFME 4.6 is released and +this limitation note will be removed. + +# Configuration + +Before you can deploy CFME you must decide *how* you want to deploy +it. There are two major decisions to make: + +1. Do you want an external, or a podified database? +1. Which storage class will back your PVs? + +## Database + +### Podified + +Any `POSTGRES_*` or `DATABASE_*` template parameters in +[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or +[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml) +may be customized through the `openshift_management_template_parameters` +hash. + +### External + +Any `POSTGRES_*` or `DATABASE_*` template parameters in +[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml) +or +[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml) +may be customized through the `openshift_management_template_parameters` +hash. + +External PostgreSQL databases require you to provide database +connection parameters. You must set the required connection keys in +the `openshift_management_template_parameters` parameter in your +inventory. The following keys are required: + +* `DATABASE_USER` +* `DATABASE_PASSWORD` +* `DATABASE_IP` +* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`* +* `DATABASE_NAME` + +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. + +Your inventory would contain a line similar to this: + +```ini +[OSEv3:vars] +openshift_management_app_template=cfme-template-ext-db +openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'} +``` + +**Note** the new value for the `openshift_management_app_template` +parameter, `cfme-template-ext-db` (ManageIQ installations would use +`miq-template-ext-db` instead). + +At run time you may run into errors similar to this: + +``` +TASK [openshift_management : Ensure the CFME App is created] *********************************** +task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 +Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 ******* +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""} +``` + +Or like this: + +``` +TASK [openshift_management : Ensure the CFME App is created] *********************************** +task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 +Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 ******* +fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg": +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""}, +``` + +While intimidating at first, there are useful bits of information in +here. Examine the error output closely and we can tell exactly what is +wrong. + +In the first example we see `Endpoints in version \"v1\" cannot be +handled as a Endpoints: [pos 218]: json: decNum: got first char +...`. This is because in my example I used the value `foo` for the +parameter `DATABASE_PORT`. + +In the second example we see `The Endpoints \"postgresql\" is invalid: +subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP +address ...`. This is because in my example I used the value `doo` in +the `DATABASE_IP` field. + +Luckily for us when the templates are processed behind the scenes they +are also running type checking validation. So, don't worry, just look +closely at the errors and ensure you are providing the correct values +for each parameter. + +## Storage Classes + +OpenShift CFME supports several storage class options. + +### NFS (Default) + +The NFS storage class is best suited for proof-of-concept and +test/demo deployments. It is also the **default** storage class for +deployments. No additional configuration is required for this +choice. + +Customization is provided through the following role variables: + +* `openshift_management_storage_nfs_base_dir` +* `openshift_management_storage_nfs_local_hostname` + +### NFS External + +External NFS leans on pre-configured NFS servers to provide exports +for the required PVs. For external NFS you must have: + +* For CFME: a `cfme-app` and optionally a `cfme-db` (for podified database) exports +* For ManageIQ: an `miq-app` and optionally an `miq-db` (for podified database) exports + +Configuration is provided through the following role variables: + +* `openshift_management_storage_nfs_external_hostname` +* `openshift_management_storage_nfs_base_dir` + +The `openshift_management_storage_nfs_external_hostname` parameter must be +set to the hostname or IP of your external NFS server. + +If `/exports` is not the parent directory to your exports then you +must set the base directory via the +`openshift_management_storage_nfs_base_dir` parameter. + +For example, if your server export is `/exports/hosted/prod/cfme-app` +then you must set +`openshift_management_storage_nfs_base_dir=/exports/hosted/prod`. + +### Cloud Provider + +CFME can also use a cloud provider storage to back required PVs. For +this functionality to work you must have also configured the +`openshift_cloudprovider_kind` variable and all associated parameters +specific to your chosen cloud provider. + +Using this storage class, when the application is created the required +PVs will automatically be provisioned using the configured cloud +provider storage integration. + +There are no additional variables to configure the behavior of this +storage class. + +### Preconfigured (Expert Configuration Only) + +The *preconfigured* storage class implies that you know exactly what +you're doing and that all storage requirements have been taken care +ahead of time. Typically this means that you've already created the +correctly sized PVs. + +There are no additional variables to configure the behavior of this +storage class. + +# Customization + +Application and database parameters may be customized by means of the +`openshift_management_template_parameters` inventory parameter. + +**For example**, if you wanted to reduce the memory requirement of the +PostgreSQL pod then you could configure the parameter like this: + +`openshift_management_template_parameters={'POSTGRESQL_MEM_REQ': '1Gi'}` + +When the CFME template is processed `1Gi` will be used for the value +of the `POSTGRESQL_MEM_REQ` template parameter. + +Any parameter in the `parameters` section of the +[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or +[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml) +may be overridden through the `openshift_management_template_parameters` +hash. This applies to **CloudForms** installations as well: +[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml), +[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml). + +# Container Provider + +There are two methods for enabling container provider integration. You +can manually add OCP/Origin as a container provider, or you can try +the playbooks included with this role. + +## Manually + +See the online documentation for steps to manually add you cluster as +a container provider: + +* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers) + +## Automatically + +Automated container provider integration can be accomplished using the +playbooks included with this role. + +This playbook will: + +1. Gather the necessary authentication secrets +1. Find the public routes to the Management app and the cluster API +1. Make a REST call to add this cluster as a container provider + + +``` +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml +``` + +## Multiple Providers + +As well as providing playbooks to integrate your *current* container +platform into the management service, this role includes a **tech +preview** script which allows you to add multiple container platforms +as container providers in any arbitrary MIQ/CFME server. + +Using the multiple-provider script requires manual configuration and +setting an `EXTRA_VARS` parameter on the command-line. + + +1. Copy the + [container_providers.yml](files/examples/container_providers.yml) + example somewhere, such as `/tmp/cp.yml` +1. If you changed your CFME/MIQ name or password, update the + `hostname`, `user`, and `password` parameters in the + `management_server` key in the `container_providers.yml` file copy +1. Fill in an entry under the `container_providers` key for *each* OCP + or Origin cluster you want to add as container providers + +**Parameters Which MUST Be Configured:** + +* `auth_key` - This is the token of a service account which has admin capabilities on the cluster. +* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname. +* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique. + +*Note*: You can obtain the `auth_key` bearer token from your clusters + with this command: `oc serviceaccounts get-token -n management-infra + management-admin` + +**Parameters Which MAY Be Configured:** + +* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443` +* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time. + + +Let's see an example describing the following scenario: + +* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml` +* You're adding two OCP clusters +* Your management server runs on `mgmt.example.com` + +You would customize `/tmp/cp.yml` as such: + +```yaml +--- +container_providers: + - connection_configurations: + - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "ocp-prod.example.com" + name: OCP Production + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" + - connection_configurations: + - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "ocp-test.example.com" + name: OCP Testing + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: + hostname: "mgmt.example.com" + user: admin + password: b3tt3r_p4SSw0rd +``` + +Then you will run the many-container-providers integration script. You +**must** provide the path to the container providers configuration +file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e` +(or `--extra-vars`) parameter to set `container_providers_config` to +the config file path. + +``` +$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ + playbooks/byo/openshift-management/add_many_container_providers.yml +``` + +Afterwards you will find two new container providers in your +management service. Navigate to `Compute` → `Containers` → `Providers` +to see an overview. + +# Uninstall + +This role includes a playbook to uninstall and erase the CFME/MIQ +installation: + +* `playbooks/byo/openshift-management/uninstall.yml` + +NFS export definitions and data stored on NFS exports are not +automatically removed. You are urged to manually erase any data from +old application or database deployments before attempting to +initialize a new deployment. + +# Additional Information + +The upstream project, +[@manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods), +contains a wealth of additional information useful for managing and +operating your CFME installation. Topics include: + +* [Verifying Successful Installation](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful) +* [Disabling Image Change Triggers](https://github.com/ManageIQ/manageiq-pods#disable-image-change-triggers) +* [Scaling CFME](https://github.com/ManageIQ/manageiq-pods#scale-miq) +* [Backing up and Restoring the DB](https://github.com/ManageIQ/manageiq-pods#backup-and-restore-of-the-miq-database) +* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting) diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml new file mode 100644 index 000000000..8ba65b386 --- /dev/null +++ b/roles/openshift_management/defaults/main.yml @@ -0,0 +1,104 @@ +--- +# Namespace for the CFME project +openshift_management_project: openshift-management +# Namespace/project description +openshift_management_project_description: CloudForms Management Engine + +###################################################################### +# BASE TEMPLATE AND DATABASE OPTIONS +###################################################################### +# Which flavor of CFME would you like? You may install CFME using a +# podified PostgreSQL server, or you may choose to use an existing +# PostgreSQL server. +# +# Choose 'miq-template' for a podified database install +# Choose 'miq-template-ext-db' for an external database install +openshift_management_app_template: miq-template +# If you are using the miq-template-ext-db template then you must add +# the required database parameters to the +# openshift_management_template_parameters variable. + +###################################################################### +# STORAGE OPTIONS +###################################################################### +# DEFAULT - 'nfs' +# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. +openshift_management_storage_class: nfs +# * nfs - Best used for proof-of-concept installs. Will setup NFS on a +# cluster host (defaults to your first master in the inventory file) +# to back the required PVCs. The application requires a PVC and the +# database (which may be hosted externally) may require a +# second. PVC minimum required sizes are: 5GiB for the MIQ +# application, and 15GiB for the PostgreSQL database (20GiB minimum +# available space on an volume/partition if used specifically for +# NFS purposes) +# +# * nfs_external - You are using an external NFS server, such as a +# netapp appliance. See the STORAGE - NFS OPTIONS section below for +# required information. +# +# * preconfigured - This CFME role will do NOTHING to modify storage +# settings. This option assumes expert knowledge and that you have +# done everything required ahead of time. +# +# * cloudprovider - You are using an OCP cloudprovider integration for +# your storage class. For this to work you must have already +# configured the required inventory parameters for your cloud +# provider +# +# Ensure 'openshift_cloudprovider_kind' is defined (aws or gce) and +# that the applicable cloudprovider parameters are provided. + +#--------------------------------------------------------------------- +# STORAGE - NFS OPTIONS +#--------------------------------------------------------------------- +# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a +# netapp appliance, then you must set the hostname here. Leave the +# value as 'false' if you are not using external NFS. +openshift_management_storage_nfs_external_hostname: false +# [OPTIONAL] - If you are using external NFS then you must set the base +# path to the exports location here. +# +# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports +# that will back the application PV and optionally the database +# pv. Export path definitions, relative to +# {{ openshift_management_storage_nfs_base_dir }} +# +# LOCAL NFS NOTE: +# +# You may may also change this value if you want to change the default +# path used for local NFS exports. +openshift_management_storage_nfs_base_dir: /exports +# +# LOCAL NFS NOTE: +# +# You may override the automatically selected LOCAL NFS server by +# setting this variable. Useful for testing specific task files. +openshift_management_storage_nfs_local_hostname: false + +###################################################################### +# DEFAULT ACCOUNT INFORMATION +###################################################################### +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +openshift_management_username: admin +openshift_management_password: smartvm + +###################################################################### +# SCAFFOLDING - These are parameters we pre-seed that a user may or +# may not set later +###################################################################### +# A hash of parameters you want to override or set in the +# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in +# your inventory file as a simple hash. Acceptable values are defined +# under the .parameters list in files/miq-template{-ext-db}.yaml +# Example: +# +# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} +openshift_management_template_parameters: {} diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml new file mode 100644 index 000000000..661f62e4d --- /dev/null +++ b/roles/openshift_management/files/examples/container_providers.yml @@ -0,0 +1,22 @@ +--- +container_providers: + - connection_configurations: + - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "OCP/Origin cluster hostname (providing API access)" + name: openshift-management + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" +# Copy and update for as many OCP or Origin providers as you want to +# add to your management service + # - connection_configurations: + # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} + # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + # hostname: "OCP/Origin cluster hostname (providing API access)" + # name: openshift-management + # port: 8443 + # type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: + hostname: "Management server hostname (providing API access)" + user: admin + password: smartvm diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml new file mode 100644 index 000000000..c3bc1d20c --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-backup +spec: + template: + metadata: + name: cloudforms-backup + spec: + containers: + - name: postgresql + image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml new file mode 100644 index 000000000..92598ce82 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloudforms-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml index b8c3bb277..4fe349897 100644 --- a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 +++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml @@ -1,13 +1,13 @@ apiVersion: v1 kind: PersistentVolume metadata: - name: miq-pv01 + name: cfme-pv03 spec: capacity: storage: 15Gi accessModes: - - ReadWriteOnce - nfs: - path: /exports/miq-pv01 - server: {{ openshift_cfme_nfs_server }} + - ReadWriteOnce + nfs: + path: "/exports/cfme-pv03" + server: "<your-nfs-host-here>" persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml new file mode 100644 index 000000000..0cdd821b5 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-db-pv +metadata: + name: cloudforms-db-pv + annotations: + description: PV Template for CFME PostgreSQL DB + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/cfme-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the CFME DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml new file mode 100644 index 000000000..527090ae8 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-app-pv +metadata: + name: cloudforms-app-pv + annotations: + description: PV Template for CFME Server + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/cfme-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the CFME APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml new file mode 100644 index 000000000..8b23f8a33 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-restore +spec: + template: + metadata: + name: cloudforms-restore + spec: + containers: + - name: postgresql + image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + - name: cfme-prod-vol + mountPath: "/restore" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + - name: cfme-prod-vol + persistentVolumeClaim: + claimName: cloudforms-postgresql + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml new file mode 100644 index 000000000..d2ece9298 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml @@ -0,0 +1,38 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: v1 +defaultAddCapabilities: +- SYS_ADMIN +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus. + creationTimestamp: + name: cfme-sysadmin +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +- SYS_CHROOT +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- secret diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml new file mode 100644 index 000000000..4a04f3372 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml @@ -0,0 +1,763 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-ext-db +metadata: + name: cloudforms-ext-db + annotations: + description: CloudForms appliance with persistent storage using a external DB host + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml new file mode 100644 index 000000000..d7c9f5af7 --- /dev/null +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml @@ -0,0 +1,940 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms +metadata: + name: cloudforms + annotations: + description: CloudForms appliance with persistent storage + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "${NAME}-${DATABASE_SERVICE_NAME}" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${DATABASE_VOLUME_CAPACITY}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Exposes the database server + spec: + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + selector: + name: "${DATABASE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the database + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${DATABASE_SERVICE_NAME}" + template: + metadata: + name: "${DATABASE_SERVICE_NAME}" + labels: + name: "${DATABASE_SERVICE_NAME}" + spec: + volumes: + - name: cfme-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" + containers: + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: cfme-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: cfme-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml new file mode 100644 index 000000000..044cb73a5 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: manageiq-backup +spec: + template: + metadata: + name: manageiq-backup + spec: + containers: + - name: postgresql + image: docker.io/manageiq/postgresql:latest + command: + - "/opt/manageiq/container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: manageiq-secrets + key: database-url + volumeMounts: + - name: miq-backup-vol + mountPath: "/backups" + volumes: + - name: miq-backup-vol + persistentVolumeClaim: + claimName: manageiq-backup + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml new file mode 100644 index 000000000..25696ef23 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: manageiq-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml index 7b40b6c69..a5cf54d4e 100644 --- a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 +++ b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml @@ -4,10 +4,10 @@ metadata: name: miq-pv03 spec: capacity: - storage: 5Gi + storage: 15Gi accessModes: - - ReadWriteOnce - nfs: - path: /exports/miq-pv03 - server: {{ openshift_cfme_nfs_server }} + - ReadWriteOnce + nfs: + path: "/exports/miq-pv03" + server: "<your-nfs-host-here>" persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml new file mode 100644 index 000000000..a803bebe2 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq-db-pv +metadata: + name: manageiq-db-pv + annotations: + description: PV Template for MIQ PostgreSQL DB + tags: PVS, MIQ +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: miq-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/miq-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the MIQ DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml new file mode 100644 index 000000000..1288544d1 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq-app-pv +metadata: + name: manageiq-app-pv + annotations: + description: PV Template for MIQ Server + tags: PVS, MIQ +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: miq-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: + - ReadWriteOnce + nfs: + path: "${BASE_PATH}/miq-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the MIQ APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml new file mode 100644 index 000000000..eea284dd4 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: manageiq-restore +spec: + template: + metadata: + name: manageiq-restore + spec: + containers: + - name: postgresql + image: docker.io/manageiq/postgresql:latest + command: + - "/opt/manageiq/container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: manageiq-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: miq-backup-vol + mountPath: "/backups" + - name: miq-prod-vol + mountPath: "/restore" + volumes: + - name: miq-backup-vol + persistentVolumeClaim: + claimName: manageiq-backup + - name: miq-prod-vol + persistentVolumeClaim: + claimName: manageiq-postgresql + restartPolicy: Never diff --git a/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml new file mode 100644 index 000000000..82cd5d49e --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml @@ -0,0 +1,771 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq-ext-db +metadata: + name: manageiq-ext-db + annotations: + description: ManageIQ appliance with persistent storage using a external DB host + tags: instant-app,manageiq,miq + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances ManageIQ pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: MEMCACHED_SERVICE_NAME + value: "${MEMCACHED_SERVICE_NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for ManageIQ backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: miq-privileged + serviceAccountName: miq-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: miq-anyuid + serviceAccountName: miq-anyuid +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: manageiq +- name: V2_KEY + displayName: ManageIQ Encryption Key + required: true + description: Encryption Key for ManageIQ Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: docker.io/manageiq/memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: APPLICATION_IMG_NAME + displayName: Application Image Name + description: This is the Application image name requested to deploy. + value: docker.io/manageiq/manageiq-pods +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the ManageIQ Frontend Application image tag/version requested to deploy. + value: frontend-latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the ManageIQ Backend Application image tag/version requested to deploy. + value: backend-latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: docker.io/manageiq/embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: docker.io/manageiq/httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/files/templates/manageiq/miq-template.yaml b/roles/openshift_management/files/templates/manageiq/miq-template.yaml new file mode 100644 index 000000000..3f5a12205 --- /dev/null +++ b/roles/openshift_management/files/templates/manageiq/miq-template.yaml @@ -0,0 +1,948 @@ +apiVersion: v1 +kind: Template +labels: + template: manageiq +metadata: + name: manageiq + annotations: + description: ManageIQ appliance with persistent storage + tags: instant-app,manageiq,miq + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: miq-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + ProxyPreserveHost on + ProxyPass /ws/ ws://${NAME}/ws/ + ProxyPassReverse /ws/ ws://${NAME}/ws/ + ProxyPass / http://${NAME}/ + ProxyPassReverse / http://${NAME}/ + </VirtualHost> +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances ManageIQ pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "${NAME}-${DATABASE_SERVICE_NAME}" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${DATABASE_VOLUME_CAPACITY}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 80 + scheme: HTTP + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: MEMCACHED_SERVICE_NAME + value: "${MEMCACHED_SERVICE_NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for ManageIQ backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the ManageIQ appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: MEMCACHED_SERVER + value: "${MEMCACHED_SERVICE_NAME}:11211" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_SERVICE_NAME + value: "${ANSIBLE_SERVICE_NAME}" + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/manageiq/container-scripts/sync-pv-data" + serviceAccount: miq-orchestrator + serviceAccountName: miq-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Exposes the database server + spec: + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + selector: + name: "${DATABASE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the database + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${DATABASE_SERVICE_NAME}" + template: + metadata: + name: "${DATABASE_SERVICE_NAME}" + labels: + name: "${DATABASE_SERVICE_NAME}" + spec: + volumes: + - name: miq-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: miq-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" + containers: + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: miq-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: miq-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: miq-privileged + serviceAccountName: miq-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: miq-anyuid + serviceAccountName: miq-anyuid +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: manageiq +- name: V2_KEY + displayName: ManageIQ Encryption Key + required: true + description: Encryption Key for ManageIQ Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: docker.io/manageiq/postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: docker.io/manageiq/memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: APPLICATION_IMG_NAME + displayName: Application Image Name + description: This is the Application image name requested to deploy. + value: docker.io/manageiq/manageiq-pods +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the ManageIQ Frontend Application image tag/version requested to deploy. + value: frontend-latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the ManageIQ Backend Application image tag/version requested to deploy. + value: backend-latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: docker.io/manageiq/embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: docker.io/manageiq/httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py new file mode 100644 index 000000000..3b7013d9a --- /dev/null +++ b/roles/openshift_management/filter_plugins/oo_management_filters.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +""" +Filter methods for the management role +""" + + +def oo_filter_container_providers(results): + """results - the result from posting the API calls for adding new +providers""" + all_results = [] + for result in results: + if 'results' in result['json']: + # We got an OK response + res = result['json']['results'][0] + all_results.append("Provider '{}' - Added successfully".format(res['name'])) + elif 'error' in result['json']: + # This was a problem + all_results.append("Provider '{}' - Failed to add. Message: {}".format( + result['item']['name'], result['json']['error']['message'])) + return all_results + + +class FilterModule(object): + """ Custom ansible filter mapping """ + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + """ returns a mapping of filters to methods """ + return { + "oo_filter_container_providers": oo_filter_container_providers, + } diff --git a/roles/openshift_management/handlers/main.yml b/roles/openshift_management/handlers/main.yml new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/roles/openshift_management/handlers/main.yml diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_management/meta/main.yml index 9200f2c3c..07ad51126 100644 --- a/roles/openshift_cfme/meta/main.yml +++ b/roles/openshift_management/meta/main.yml @@ -16,5 +16,3 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils -- role: openshift_common -- role: openshift_master_facts diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml new file mode 100644 index 000000000..e45ea8d43 --- /dev/null +++ b/roles/openshift_management/tasks/accounts.yml @@ -0,0 +1,28 @@ +--- +# This role task file is responsible for user/system account creation, +# and ensuring correct access is provided as required. +- name: Ensure the CFME system accounts exist + oc_serviceaccount: + namespace: "{{ openshift_management_project }}" + state: present + name: "{{ openshift_management_flavor_short }}{{ item.name }}" + with_items: + - "{{ __openshift_system_account_sccs }}" + +- name: Ensure the CFME system accounts have all the required SCCs + oc_adm_policy_user: + namespace: "{{ openshift_management_project }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + resource_kind: scc + resource_name: "{{ item.resource_name }}" + with_items: + - "{{ __openshift_system_account_sccs }}" + +- name: Ensure the CFME system accounts have the required roles + oc_adm_policy_user: + namespace: "{{ openshift_management_project }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + resource_kind: role + resource_name: "{{ item.resource_name }}" + with_items: + - "{{ __openshift_management_system_account_roles }}" diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml new file mode 100644 index 000000000..383e6edb5 --- /dev/null +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -0,0 +1,65 @@ +--- +- name: Ensure lib_openshift modules are available + include_role: + role: lib_openshift + +- name: Ensure OpenShift facts module is available + include_role: + role: openshift_facts + +- name: Ensure OpenShift facts are loaded + openshift_facts: + +- name: Ensure the management SA Secrets are read + oc_serviceaccount_secret: + state: list + service_account: management-admin + namespace: management-infra + register: sa + +- name: Ensure the management SA bearer token is identified + set_fact: + management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read + oc_secret: + state: list + name: "{{ management_token }}" + namespace: management-infra + decode: true + no_log: True + register: sa_secret + +- name: Ensure the SA bearer token value is saved + set_fact: + management_bearer_token: "{{ sa_secret.results.decoded.token }}" + +- name: Ensure we have the public route to the management service + oc_route: + state: list + name: httpd + namespace: openshift-management + register: route + +- name: Ensure the management service route is saved + set_fact: + management_route: "{{ route.results.0.spec.host }}" + +- name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_route }}/api/providers" + body_format: json + method: POST + user: "{{ openshift_management_username }}" + password: "{{ openshift_management_password }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: + connection_configurations: + - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "{{ openshift.master.cluster_public_hostname }}" + name: "{{ openshift_management_project }}" + port: "{{ openshift.master.api_port }}" + type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml new file mode 100644 index 000000000..9be923a57 --- /dev/null +++ b/roles/openshift_management/tasks/main.yml @@ -0,0 +1,96 @@ +--- +######################################################################) +# Users, projects, and privileges + +- name: Run pre-install Management validation checks + include: validate.yml + +# This creates a service account allowing Container Provider +# integration (managing OCP/Origin via MIQ/Management) +- name: Enable Container Provider Integration + include_role: + role: openshift_manageiq + +- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists" + oc_project: + state: present + name: "{{ openshift_management_project }}" + display_name: "{{ openshift_management_project_description }}" + +- name: Create and Authorize Management Accounts + include: accounts.yml + +###################################################################### +# STORAGE - Initialize basic storage class +- name: Determine the correct NFS host if required + include: storage/nfs_server.yml + when: openshift_management_storage_class in ['nfs', 'nfs_external'] + +#--------------------------------------------------------------------- +# * nfs - set up NFS shares on the first master for a proof of concept +- name: Create required NFS exports for Management app storage + include: storage/nfs.yml + when: openshift_management_storage_class == 'nfs' + +#--------------------------------------------------------------------- +# * external - NFS again, but pointing to a pre-configured NFS server +- name: Note Storage Type - External NFS + debug: + msg: "Setting up external NFS storage, openshift_management_storage_class is {{ openshift_management_storage_class }}" + when: openshift_management_storage_class == 'nfs_external' + +#--------------------------------------------------------------------- +# * cloudprovider - use an existing cloudprovider based storage +- name: Note Storage Type - Cloud Provider + debug: + msg: Validating cloud provider storage type, openshift_management_storage_class is 'cloudprovider' + when: openshift_management_storage_class == 'cloudprovider' + +#--------------------------------------------------------------------- +# * preconfigured - don't do anything, assume it's all there ready to go +- name: Note Storage Type - Preconfigured + debug: + msg: Skipping storage configuration, openshift_management_storage_class is 'preconfigured' + when: openshift_management_storage_class == 'preconfigured' + +###################################################################### +# APPLICATION TEMPLATE +- name: Install the Management app and PV templates + include: template.yml + +###################################################################### +# APP & DB Storage + +# For local/external NFS backed installations +- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}" + include: storage/create_nfs_pvs.yml + when: + - openshift_management_storage_class in ['nfs', 'nfs_external'] + +###################################################################### +# CREATE APP +- name: Note the correct ext-db template name + set_fact: + openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" + when: + - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + +- name: Note the correct podified db template name + set_fact: + openshift_management_template_name: "{{ openshift_management_flavor }}" + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + +- name: Ensure the Management App is created + oc_process: + namespace: "{{ openshift_management_project }}" + template_name: "{{ openshift_management_template_name }}" + create: True + params: "{{ openshift_management_template_parameters }}" + +- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max + command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + register: app_seeding_logs + until: app_seeding_logs.stdout.find('Server starting complete') != -1 + delay: 30 + retries: 20 diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_management/tasks/noop.yml index ed97d539c..ed97d539c 100644 --- a/roles/openshift_aws_iam_kms/defaults/main.yml +++ b/roles/openshift_management/tasks/noop.yml diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml new file mode 100644 index 000000000..d1b9a8d5c --- /dev/null +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -0,0 +1,69 @@ +--- +# Create the required PVs for the App and the DB +- name: Note the App PV Size from Template Parameters + set_fact: + openshift_management_app_pv_size: "{{ openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY }}" + when: + - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is defined + +- name: Note the App PV Size from defaults + set_fact: + openshift_management_app_pv_size: "{{ __openshift_management_app_pv_size }}" + when: + - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined + +- when: openshift_management_app_template in ['miq-template', 'cfme-template'] + block: + - name: Note the DB PV Size from Template Parameters + set_fact: + openshift_management_db_pv_size: "{{ openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY }}" + when: + - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is defined + + - name: Note the DB PV Size from defaults + set_fact: + openshift_management_db_pv_size: "{{ __openshift_management_db_pv_size }}" + when: + - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined + +- name: Check if the Management App PV has been created + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: pv + name: "{{ openshift_management_flavor_short }}-app" + register: miq_app_pv_check + +- name: Check if the Management DB PV has been created + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: pv + name: "{{ openshift_management_flavor_short }}-db" + register: miq_db_pv_check + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + +- name: Ensure the Management App PV is created + oc_process: + namespace: "{{ openshift_management_project }}" + template_name: "{{ openshift_management_flavor }}-app-pv" + create: True + params: + PV_SIZE: "{{ openshift_management_app_pv_size }}" + BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" + NFS_HOST: "{{ openshift_management_nfs_server }}" + when: miq_app_pv_check.results.results == [{}] + +- name: Ensure the Management DB PV is created + oc_process: + namespace: "{{ openshift_management_project }}" + template_name: "{{ openshift_management_flavor }}-db-pv" + create: True + params: + PV_SIZE: "{{ openshift_management_db_pv_size }}" + BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" + NFS_HOST: "{{ openshift_management_nfs_server }}" + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml new file mode 100644 index 000000000..94e11137c --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -0,0 +1,36 @@ +--- +# Tasks to statically provision NFS volumes +# Include if not using dynamic volume provisioning + +- name: Setting up NFS storage + block: + - name: Include the NFS Setup role tasks + include_role: + role: openshift_nfs + tasks_from: setup + vars: + l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" + + - name: Create the App export + include_role: + role: openshift_nfs + tasks_from: create_export + vars: + l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" + l_nfs_export_config: "{{ openshift_management_flavor_short }}" + l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" + l_nfs_options: "*(rw,no_root_squash,no_wdelay)" + + - name: Create the DB export + include_role: + role: openshift_nfs + tasks_from: create_export + vars: + l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" + l_nfs_export_config: "{{ openshift_management_flavor_short }}" + l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" + l_nfs_options: "*(rw,no_root_squash,no_wdelay)" + when: + - openshift_management_app_template in ['miq-template', 'cfme-template'] + + delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml new file mode 100644 index 000000000..96a742c83 --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs_server.yml @@ -0,0 +1,31 @@ +--- +- name: Ensure we save the local NFS server if one is provided + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" + when: + - openshift_management_storage_nfs_local_hostname is defined + - openshift_management_storage_nfs_local_hostname != False + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the local NFS server + set_fact: + openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" + when: + - openshift_management_nfs_server is not defined + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the external NFS server + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" + when: + - openshift_management_storage_class == "nfs_external" + +- name: Failed NFS server detection + assert: + that: + - openshift_management_nfs_server is defined + msg: | + "Unable to detect an NFS server. The 'nfs_external' + openshift_management_storage_class option requires that you set + openshift_management_storage_nfs_external_hostname. NFS hosts detected + for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml new file mode 100644 index 000000000..d8bf7aa3e --- /dev/null +++ b/roles/openshift_management/tasks/storage/storage.yml @@ -0,0 +1,3 @@ +--- +- include: nfs.yml + when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml new file mode 100644 index 000000000..9f97cdcb9 --- /dev/null +++ b/roles/openshift_management/tasks/template.yml @@ -0,0 +1,128 @@ +--- +# Tasks for ensuring the correct CFME templates are landed on the remote system + +###################################################################### +# CFME App Template +# +# Note, this is different from the create_nfs_pvs.yml tasks in that +# the application template does not require any jinja2 evaluation. +# +# TODO: Handle the case where the server or PV templates are updated +# in openshift-ansible and the change needs to be landed on the +# managed cluster. + +###################################################################### +# STANDARD PODIFIED DATABASE TEMPLATE +- when: openshift_management_app_template in ['miq-template', 'cfme-template'] + block: + - name: Check if the Management Server template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}" + register: miq_server_check + + - when: miq_server_check.results.results == [{}] + block: + - name: Copy over Management Server template + copy: + src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure Management Server Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" + +###################################################################### +# EXTERNAL DATABASE TEMPLATE +- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + block: + - name: Check if the Management Ext-DB Server template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}-ext-db" + register: miq_ext_db_server_check + + - when: miq_ext_db_server_check.results.results == [{}] + block: + - name: Copy over Management Ext-DB Server template + copy: + src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure Management Ext-DB Server Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}-ext-db" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" + +# End app template creation. +###################################################################### + +###################################################################### +# Begin conditional PV template creations + +# Required for the application server +- name: Check if the Management App PV template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}-app-pv" + register: miq_app_pv_check + +- when: miq_app_pv_check.results.results == [{}] + block: + - name: Copy over Management App PV template + copy: + src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure Management App PV Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}-app-pv" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + +#--------------------------------------------------------------------- + +# Required for database if the installation is fully podified +- when: openshift_management_app_template in ['miq-template', 'cfme-template'] + block: + - name: Check if the Management DB PV template has been created already + oc_obj: + namespace: "{{ openshift_management_project }}" + state: list + kind: template + name: "{{ openshift_management_flavor }}-db-pv" + register: miq_db_pv_check + + - when: miq_db_pv_check.results.results == [{}] + block: + - name: Copy over Management DB PV template + copy: + src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + dest: "{{ template_dir }}/" + + - name: Ensure Management DB PV Template is created + oc_obj: + namespace: "{{ openshift_management_project }}" + name: "{{ openshift_management_flavor }}-db-pv" + state: present + kind: template + files: + - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/uninstall.yml b/roles/openshift_management/tasks/uninstall.yml new file mode 100644 index 000000000..09fbc609f --- /dev/null +++ b/roles/openshift_management/tasks/uninstall.yml @@ -0,0 +1,23 @@ +--- +- name: Start removing all the objects + command: "oc delete -n {{ openshift_management_project }} {{ item }} --all" + with_items: + - rc + - dc + - po + - svc + - pv + - pvc + - statefulsets + - routes + +- name: Remove the project + command: "oc delete -n {{ openshift_management_project }} project {{ openshift_management_project }}" + +- name: Verify project has been destroyed + command: "oc get project {{ openshift_management_project }}" + ignore_errors: True + register: project_terminated + until: project_terminated.stderr.find("NotFound") != -1 + delay: 5 + retries: 30 diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml new file mode 100644 index 000000000..8b20bdc5e --- /dev/null +++ b/roles/openshift_management/tasks/validate.yml @@ -0,0 +1,90 @@ +--- +# Validate configuration parameters passed to the openshift_management role + +###################################################################### +# CORE PARAMETERS +- name: Ensure openshift_management_app_template is valid + assert: + that: + - openshift_management_app_template in __openshift_management_app_templates + + msg: | + "openshift_management_app_template must be one of {{ + __openshift_management_app_templates | join(', ') }}" + +- name: Ensure openshift_management_storage_class is a valid type + assert: + that: + - openshift_management_storage_class in __openshift_management_storage_classes + msg: | + "openshift_management_storage_class must be one of {{ + __openshift_management_storage_classes | join(', ') }}" + +###################################################################### +# STORAGE PARAMS - NFS +- name: Ensure external NFS storage has a valid NFS server hostname defined + assert: + that: + - openshift_management_storage_nfs_external_hostname | default(False) + msg: | + The selected storage class 'nfs_external' requires a valid + hostname for the openshift_management_storage_nfs_hostname parameter + when: + - openshift_management_storage_class == 'nfs_external' + +- name: Ensure local NFS storage has a valid NFS server to use + fail: + msg: | + No NFS hosts detected or defined but storage class is set to + 'nfs'. Add hosts to your [nfs] group or define one manually with + the 'openshift_management_storage_nfs_local_hostname' parameter + when: + - openshift_management_storage_class == 'nfs' + # You haven't created any NFS groups + - (groups.nfs is defined and groups.nfs | length == 0) or (groups.nfs is not defined) + # You did not manually specify a host to use + - (openshift_management_storage_nfs_local_hostname is not defined) or (openshift_management_storage_nfs_local_hostname == false) + +###################################################################### +# STORAGE PARAMS -CLOUD PROVIDER +- name: Validate Cloud Provider storage class + assert: + that: + - openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' + msg: | + openshift_management_storage_class is 'cloudprovider' but you have an + invalid kind defined, '{{ openshift_cloudprovider_kind }}'. See + 'openshift_cloudprovider_kind' in the example inventories for + the required parameters for your selected cloud + provider. Working providers: 'aws' and 'gce'. + when: + - openshift_management_storage_class == 'cloudprovider' + - openshift_cloudprovider_kind is defined + +- name: Validate 'cloudprovider' Storage Class has required parameters defined + assert: + that: + - openshift_cloudprovider_kind is defined + msg: | + openshift_management_storage_class is 'cloudprovider' but you do not + have 'openshift_cloudprovider_kind' defined, this is + required. Search the example inventories for + 'openshift_cloudprovider_kind'. The required parameters for your + selected cloud provider must be defined in your inventory as + well. Working providers: 'aws' and 'gce'. + when: + - openshift_management_storage_class == 'cloudprovider' + +###################################################################### +# DATABASE CONNECTION VALIDATION +- name: Validate all required database parameters were provided for ext-db template + assert: + that: + - item in openshift_management_template_parameters + msg: | + "You are using external database services but a required + database parameter {{ item }} was not found in + 'openshift_management_template_parameters'" + with_items: "{{ __openshift_management_required_db_conn_params }}" + when: + - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml new file mode 100644 index 000000000..da3ad0af7 --- /dev/null +++ b/roles/openshift_management/vars/main.yml @@ -0,0 +1,76 @@ +--- +# Misc enumerated values +#--------------------------------------------------------------------- +# Allowed choices for the storage class parameter +__openshift_management_storage_classes: + - nfs + - nfs_external + - preconfigured + - cloudprovider + +#--------------------------------------------------------------------- +# DEFAULT PV SIZES +# How large to make the MIQ application PV +__openshift_management_app_pv_size: 5Gi +# How large to make the MIQ PostgreSQL PV +__openshift_management_db_pv_size: 15Gi + +# Name of the application templates with object/parameter definitions +__openshift_management_app_templates: + - miq-template-ext-db + - miq-template + - cfme-template-ext-db + - cfme-template + +# PostgreSQL database connection parameters +__openshift_management_db_parameters: + - DATABASE_USER + - DATABASE_PASSWORD + - DATABASE_IP + - DATABASE_PORT + - DATABASE_NAME + +# # Commented out until we can support both CFME and MIQ +# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" +#openshift_management_flavor: cloudforms +openshift_management_flavor: manageiq +# TODO: Make this conditional as well based on the prior variable +# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" +# openshift_management_flavor_short: cfme +openshift_management_flavor_short: miq + +###################################################################### +# ACCOUNTING +###################################################################### +# Service Account SSCs +__openshift_system_account_sccs: + - name: -anyuid + resource_name: anyuid + - name: -orchestrator + resource_name: anyuid + - name: -privileged + resource_name: privileged + - name: -httpd + resource_name: anyuid + +# Service Account Roles +__openshift_management_system_account_roles: + - name: -orchestrator + resource_name: view + - name: -orchestrator + resource_name: edit + +###################################################################### +# DEFAULTS +###################################################################### +# User only has to provide parameters they need to override, we will +# do a hash update method with the provided user parameters to create +# the final connection structure. +# +# TODO: Update user provided configs with this if they are missing fields +__openshift_management_required_db_conn_params: + - DATABASE_USER + - DATABASE_PASSWORD + - DATABASE_IP + - DATABASE_PORT + - DATABASE_NAME diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index fbf69c270..2dcc56e3f 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -1,4 +1,4 @@ -OpenShift/Atomic Enterprise Master +OpenShift Master ================================== Master service installation @@ -17,7 +17,6 @@ From this role: | Name | Default value | | |---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------| -| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master | | openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up | | oreg_url | UNDEF | Default docker registry to use | | oreg_url_master | UNDEF | Default docker registry to use, specifically on the master | @@ -29,18 +28,10 @@ From this role: | openshift_master_public_console_url | UNDEF | | | openshift_master_saconfig_limit_secret_references | false | | -From openshift_common: - -| Name | Default Value | | -|-------------------------------|----------------|----------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | -| openshift_public_ip | UNDEF | Public IP address to use for this host | -| openshift_hostname | UNDEF | hostname to use for this instance | Dependencies ------------ -openshift_common Example Playbook ---------------- diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index d70106276..dafafda08 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,4 +1,9 @@ --- +# openshift_master_defaults_in_use is a workaround to detect if we are consuming +# the plays from the role or outside of the role. +openshift_master_defaults_in_use: True +openshift_master_debug_level: "{{ debug_level | default(2) }}" + r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" @@ -7,7 +12,7 @@ r_openshift_master_clean_install: false r_openshift_master_etcd3_storage: false r_openshift_master_os_firewall_enable: true r_openshift_master_os_firewall_deny: [] -r_openshift_master_os_firewall_allow: +default_r_openshift_master_os_firewall_allow: - service: api server https port: "{{ openshift.master.api_port }}/tcp" - service: api controllers https @@ -19,8 +24,123 @@ r_openshift_master_os_firewall_allow: - service: etcd embedded port: 4001/tcp cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" +r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}" + -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" -oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False +l_bind_docker_reg_auth: False + +containerized_svc_dir: "/usr/lib/systemd/system" +ha_svc_template_path: "native-cluster" + +# NOTE +# r_openshift_master_*_default may be defined external to this role. +# openshift_use_*, if defined, may affect other roles or play behavior. +r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}" +r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}" + +r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}" + +r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}" +r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}" + +r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}" +r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}" + +r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}" + +r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}" + +openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" +openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}" + +openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}" +openshift_master_config_dir: "{{ openshift_master_config_dir_default }}" +openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" + +openshift_master_node_config_networkconfig_mtu: 1450 + +openshift_master_node_config_kubeletargs_cpu: 500m +openshift_master_node_config_kubeletargs_mem: 512M + +openshift_master_bootstrap_enabled: False + +openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}" + +openshift_master_config_imageconfig_format: "{{ openshift.node.registry_url }}" + +# these are for the default settings in a generated node-config.yaml +openshift_master_node_config_default_edits: +- key: nodeName + state: absent +- key: dnsBindAddress + value: 127.0.0.1:53 +- key: dnsDomain + value: cluster.local +- key: dnsRecursiveResolvConf + value: /etc/origin/node/resolv.conf +- key: imageConfig.format + value: "{{ openshift_master_config_imageconfig_format }}" +- key: kubeletArguments.cloud-config + value: + - "/etc/origin/cloudprovider/{{ openshift_master_cloud_provider }}.conf" +- key: kubeletArguments.cloud-provider + value: + - "{{ openshift_master_cloud_provider }}" +- key: kubeletArguments.kube-reserved + value: + - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}" +- key: kubeletArguments.system-reserved + value: + - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}" +- key: enable-controller-attach-detach + value: + - 'true' +- key: networkConfig.mtu + value: 8951 +- key: networkConfig.networkPluginName + value: "{{ r_openshift_master_sdn_network_plugin_name }}" +- key: networkPluginName + value: "{{ r_openshift_master_sdn_network_plugin_name }}" + + +# We support labels for all nodes here +openshift_master_node_config_kubeletargs_default_labels: [] +# We do support overrides for node group labels +openshift_master_node_config_kubeletargs_master_labels: [] +openshift_master_node_config_kubeletargs_infra_labels: [] +openshift_master_node_config_kubeletargs_compute_labels: [] + +openshift_master_node_config_master: + type: master + edits: + - key: kubeletArguments.node-labels + value: "{{ openshift_master_node_config_kubeletargs_default_labels | + union(openshift_master_node_config_kubeletargs_master_labels) | + union(['type=master']) }}" +openshift_master_node_config_infra: + type: infra + edits: + - key: kubeletArguments.node-labels + value: "{{ openshift_master_node_config_kubeletargs_default_labels | + union(openshift_master_node_config_kubeletargs_infra_labels) | + union(['type=infra']) }}" +openshift_master_node_config_compute: + type: compute + edits: + - key: kubeletArguments.node-labels + value: "{{ openshift_master_node_config_kubeletargs_default_labels | + union(openshift_master_node_config_kubeletargs_compute_labels) | + union(['type=compute']) }}" + +openshift_master_node_configs: +- "{{ openshift_master_node_config_infra }}" +- "{{ openshift_master_node_config_compute }}" + +openshift_master_bootstrap_namespace: openshift-node diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index bd2383f61..a1cda2ad4 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -13,20 +13,5 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: lib_utils - role: lib_os_firewall -- role: openshift_master_facts -- role: openshift_hosted_facts -- role: openshift_master_certificates -- role: openshift_etcd_client_certificates - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - when: groups.oo_etcd_to_config | default([]) | length != 0 -- role: openshift_clock -- role: openshift_cloud_provider -- role: openshift_builddefaults -- role: openshift_buildoverrides -- role: nickhammond.logrotate -- role: contiv - contiv_role: netmaster - when: openshift.common.use_contiv | bool diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml index 0013f5289..1c30c1dea 100644 --- a/roles/openshift_master/tasks/bootstrap.yml +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -1,21 +1,4 @@ --- - -- name: ensure the node-bootstrap service account exists - oc_serviceaccount: - name: node-bootstrapper - namespace: openshift-infra - state: present - run_once: true - -- name: grant node-bootstrapper the correct permissions to bootstrap - oc_adm_policy_user: - namespace: openshift-infra - user: system:serviceaccount:openshift-infra:node-bootstrapper - resource_kind: cluster-role - resource_name: system:node-bootstrapper - state: present - run_once: true - # TODO: create a module for this command. # oc_serviceaccounts_kubeconfig - name: create service account kubeconfig with csr rights @@ -26,3 +9,67 @@ copy: content: "{{ kubeconfig_out.stdout }}" dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig" + +- name: create a temp dir for this work + command: mktemp -d /tmp/openshift_node_config-XXXXXX + register: mktempout + run_once: true + +# This generate is so that we do not have to maintain +# our own copy of the template. This is generated by +# the product and the following settings will be +# generated by the master +- name: generate a node-config dynamically + command: > + {{ openshift_master_client_binary }} adm create-node-config + --node-dir={{ mktempout.stdout }}/ + --node=CONFIGMAP + --hostnames=test + --dns-ip=0.0.0.0 + --certificate-authority={{ openshift_master_config_dir }}/ca.crt + --signer-cert={{ openshift_master_config_dir }}/ca.crt + --signer-key={{ openshift_master_config_dir }}/ca.key + --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt + --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt + register: configgen + run_once: true + +- name: remove the default settings + yedit: + state: "{{ item.state | default('present') }}" + src: "{{ mktempout.stdout }}/node-config.yaml" + key: "{{ item.key }}" + value: "{{ item.value | default(omit) }}" + with_items: "{{ openshift_master_node_config_default_edits }}" + run_once: true + +- name: copy the generated config into each group + copy: + src: "{{ mktempout.stdout }}/node-config.yaml" + remote_src: true + dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" + with_items: "{{ openshift_master_node_configs }}" + run_once: true + +- name: "specialize the generated configs for node-config-{{ item.type }}" + yedit: + src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" + edits: "{{ item.edits }}" + with_items: "{{ openshift_master_node_configs }}" + run_once: true + +- name: create node-config.yaml configmap + oc_configmap: + name: "node-config-{{ item.type }}" + namespace: "{{ openshift_master_bootstrap_namespace }}" + from_file: + node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" + with_items: "{{ openshift_master_node_configs }}" + run_once: true + +- name: remove templated files + file: + dest: "{{ mktempout.stdout }}/" + state: absent + with_items: "{{ openshift_master_node_configs }}" + run_once: true diff --git a/roles/openshift_master/tasks/check_master_api_is_ready.yml b/roles/openshift_master/tasks/check_master_api_is_ready.yml new file mode 100644 index 000000000..7e8a7a596 --- /dev/null +++ b/roles/openshift_master/tasks/check_master_api_is_ready.yml @@ -0,0 +1,14 @@ +--- +- name: Wait for API to become available + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + register: l_api_available_output + until: l_api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + run_once: true + changed_when: false diff --git a/roles/openshift_master/tasks/clean_systemd_units.yml b/roles/openshift_master/tasks/clean_systemd_units.yml deleted file mode 100644 index e641f84d4..000000000 --- a/roles/openshift_master/tasks/clean_systemd_units.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- name: Disable master service - systemd: - name: "{{ openshift.common.service_type }}-master" - state: stopped - enabled: no - masked: yes - ignore_errors: true diff --git a/roles/openshift_master/tasks/configure_external_etcd.yml b/roles/openshift_master/tasks/configure_external_etcd.yml new file mode 100644 index 000000000..b0590ac84 --- /dev/null +++ b/roles/openshift_master/tasks/configure_external_etcd.yml @@ -0,0 +1,17 @@ +--- +- name: Remove etcdConfig section + yedit: + src: /etc/origin/master/master-config.yaml + key: "etcdConfig" + state: absent +- name: Set etcdClientInfo.ca to master.etcd-ca.crt + yedit: + src: /etc/origin/master/master-config.yaml + key: etcdClientInfo.ca + value: master.etcd-ca.crt +- name: Set etcdClientInfo.urls to the external etcd + yedit: + src: /etc/origin/master/master-config.yaml + key: etcdClientInfo.urls + value: + - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml new file mode 100644 index 000000000..a16cbe78e --- /dev/null +++ b/roles/openshift_master/tasks/journald.yml @@ -0,0 +1,29 @@ +--- +- name: Checking for journald.conf + stat: path=/etc/systemd/journald.conf + register: journald_conf_file + +- name: Create journald persistence directories + file: + path: /var/log/journal + state: directory + +- name: Update journald setup + replace: + dest: /etc/systemd/journald.conf + regexp: '^(\#| )?{{ item.var }}=\s*.*?$' + replace: ' {{ item.var }}={{ item.val }}' + backup: yes + with_items: "{{ journald_vars_to_replace | default([]) }}" + when: journald_conf_file.stat.exists + register: journald_update + +# I need to restart journald immediatelly, otherwise it gets into way during +# further steps in ansible +- name: Restart journald + command: "systemctl restart systemd-journald" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + when: journald_update | changed diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index ba56ac94e..48b34c578 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -47,9 +47,9 @@ when: - not openshift.common.is_containerized | bool -- name: Create openshift.common.data_dir +- name: Create r_openshift_master_data_dir file: - path: "{{ openshift.common.data_dir }}" + path: "{{ r_openshift_master_data_dir }}" state: directory mode: 0755 owner: root @@ -169,7 +169,7 @@ register: l_already_set - set_fact: - openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" + openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: @@ -177,8 +177,8 @@ local_facts: no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" -- name: Remove the legacy master service if it exists - include: clean_systemd_units.yml +- name: Update journald config + include: journald.yml - name: Install the systemd units include: systemd_units.yml @@ -203,7 +203,7 @@ - restart master api - set_fact: - translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}" + translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}" # TODO: add the validate parameter when there is a validation command to run - name: Create master config @@ -232,22 +232,6 @@ - restart master controllers when: openshift_master_bootstrap_enabled | default(False) -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: master_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart master api - - restart master controllers - - include: set_loopback_context.yml when: - openshift.common.version_gte_3_2_or_1_2 @@ -308,35 +292,18 @@ # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. -- name: Wait for API to become available - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - register: l_api_available_output - until: l_api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - run_once: true - changed_when: false +- include: check_master_api_is_ready.yml when: - openshift.master.cluster_method == 'native' - master_api_service_status_changed | bool -- name: Start and enable master controller on first master +- name: Start and enable master controller service systemd: name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started when: - openshift.master.cluster_method == 'native' - - inventory_hostname == openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed retries: 1 @@ -347,31 +314,8 @@ when: - l_start_result | failed -- name: Wait for master controller service to start on first master - pause: - seconds: 15 - when: - - openshift.master.cluster_method == 'native' - -- name: Start and enable master controller on all masters - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - enabled: yes - state: started - when: - - openshift.master.cluster_method == 'native' - - inventory_hostname != openshift_master_hosts[0] - register: l_start_result - until: not l_start_result | failed - retries: 1 - delay: 60 - -- name: Dump logs from master-controllers if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers - when: - - l_start_result | failed - -- set_fact: +- name: Set fact master_controllers_service_status_changed + set_fact: master_controllers_service_status_changed: "{{ l_start_result | changed }}" when: - openshift.master.cluster_method == 'native' diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml new file mode 100644 index 000000000..cde01c49e --- /dev/null +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -0,0 +1,28 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: master_oreg_auth_credentials_stat + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + register: master_oreg_auth_credentials_create + retries: 3 + delay: 5 + until: master_oreg_auth_credentials_create.rc == 0 + notify: + - restart master api + - restart master controllers + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index 91332acfb..843352532 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -1,4 +1,9 @@ --- +- name: Ensure proxies are in the atomic.conf + include_role: + name: openshift_atomic + tasks_from: proxy + - name: Pre-pull master system container image command: > atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 72c231e52..5751723ab 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -1,12 +1,9 @@ --- -# This file is included both in the openshift_master role and in the upgrade -# playbooks. For that reason the ha_svc variables are use set_fact instead of -# the vars directory on the role. +# systemd_units.yml is included both in the openshift_master role and in the upgrade +# playbooks. -- name: Init HA Service Info - set_fact: - containerized_svc_dir: "/usr/lib/systemd/system" - ha_svc_template_path: "native-cluster" +- include: upgrade_facts.yml + when: openshift_master_defaults_in_use is not defined - name: Set HA Service Info for containerized installs set_fact: @@ -15,6 +12,25 @@ when: - openshift.common.is_containerized | bool +- include: registry_auth.yml + +- name: Disable the legacy master service if it exists + systemd: + name: "{{ openshift.common.service_type }}-master" + state: stopped + enabled: no + masked: yes + ignore_errors: true + +- name: Remove the legacy master service if it exists + file: + path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service" + state: absent + ignore_errors: true + when: + - openshift.master.cluster_method == "native" + - not openshift.common.is_master_system_container | bool + # This is the image used for both HA and non-HA clusters: - name: Pre-pull master image command: > diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml new file mode 100644 index 000000000..1ab105808 --- /dev/null +++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml @@ -0,0 +1,8 @@ +--- +- yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: 'etcdClientInfo.urls' + value: "{{ openshift.master.etcd_urls }}" + notify: + - restart master api + - restart master controllers diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml new file mode 100644 index 000000000..2252c003a --- /dev/null +++ b/roles/openshift_master/tasks/upgrade_facts.yml @@ -0,0 +1,37 @@ +--- +# This file exists because we call systemd_units.yml from outside of the role +# during upgrades. When we remove this pattern, we can probably +# eliminate most of these set_fact items. + +- name: Set openshift_master_config_dir if unset + set_fact: + openshift_master_config_dir: '/etc/origin/master' + when: openshift_master_config_dir is not defined + +- name: Set r_openshift_master_data_dir if unset + set_fact: + r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" + when: r_openshift_master_data_dir is not defined + +- set_fact: + oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" + when: oreg_auth_credentials_path is not defined + +- set_fact: + oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" + when: oreg_host is not defined + +- set_fact: + oreg_auth_credentials_replace: False + when: oreg_auth_credentials_replace is not defined + +- name: Set openshift_master_debug_level + set_fact: + openshift_master_debug_level: "{{ debug_level | default(2) }}" + when: + - openshift_master_debug_level is not defined + +- name: Init HA Service Info + set_fact: + containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}" + ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}" diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index b931f1414..3f7a528a9 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} CONFIG_FILE={{ openshift_master_config_file }} {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #} {% if openshift_master_is_scaleup_host %} @@ -21,7 +21,7 @@ AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }} {% endif %} {% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%} -{% for key, value in openshift.master.api_env_vars.items() | default([]) | union(openshift.master.controllers_env_vars.items() | default([])) -%} +{% for key, value in (openshift.master.api_env_vars | default({})).items() | union((openshift.master.controllers_env_vars | default({})).items()) -%} {{ key }}={{ value }} {% endfor -%} {% endif -%} diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index e8f7c47b0..5d4a99c97 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,12 +12,22 @@ Requires={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-api \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-api Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 69db62f16..f93f3b565 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,12 +11,22 @@ PartOf={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-controllers \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 2b7f9afc9..c83fc9fbb 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -107,7 +107,7 @@ etcdConfig: clientCA: ca.crt {% endif %} keyFile: etcd.server.key - storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd + storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd {% endif %} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io @@ -116,7 +116,7 @@ etcdStorageConfig: openShiftStorageVersion: v1 imageConfig: format: {{ openshift.master.registry_url }} - latest: false + latest: {{ openshift_master_image_config_latest }} {% if 'image_policy_config' in openshift.master %} imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }} {% endif %} @@ -180,8 +180,13 @@ masterPublicURL: {{ openshift.master.public_api_url }} networkConfig: clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }} hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} -{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %} - networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift.common.version_gte_3_7 | bool %} + clusterNetworks: + - cidr: {{ openshift.master.sdn_cluster_network_cidr }} + hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% endif %} +{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %} + networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }} {% endif %} # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet serviceNetworkCIDR: {{ openshift.common.portal_net }} @@ -252,11 +257,7 @@ servingInfo: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} bindNetwork: tcp4 certFile: master.server.crt -{% if openshift.common.version_gte_3_2_or_1_2 | bool %} - clientCA: ca-bundle.crt -{% else %} clientCA: ca.crt -{% endif %} keyFile: master.server.key maxRequestsInFlight: {{ openshift.master.max_requests_inflight }} requestTimeoutSeconds: 3600 @@ -280,12 +281,5 @@ servingInfo: - {{ cipher_suite }} {% endfor %} {% endif %} -{% if openshift_template_service_broker_namespaces is defined %} -templateServiceBrokerConfig: - templateNamespaces: -{% for namespace in openshift_template_service_broker_namespaces %} - - {{ namespace }} -{% endfor %} -{% endif %} volumeConfig: dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index 63eb3ea1b..cc21b37af 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} CONFIG_FILE={{ openshift_master_config_file }} {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #} {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index 0e78d2d23..02bfd6f62 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 @@ -13,7 +13,7 @@ Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier=atomic-openshift-master-api Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 0adfd05b6..493fc510e 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} CONFIG_FILE={{ openshift_master_config_file }} {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #} {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index 94928f88c..e284413f7 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -17,7 +17,7 @@ Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=always RestartSec=5s diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index cf39b73f6..0c681c764 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -20,3 +20,22 @@ openshift_master_valid_grant_methods: - deny openshift_master_is_scaleup_host: False + +# These defaults assume forcing journald persistence, fsync to disk once +# a second, rate-limiting to 10,000 logs a second, no forwarding to +# syslog or wall, using 8GB of disk space maximum, using 10MB journal +# files, keeping only a days worth of logs per journal file, and +# retaining journal files no longer than a month. +journald_vars_to_replace: +- { var: Storage, val: persistent } +- { var: Compress, val: yes } +- { var: SyncIntervalSec, val: 1s } +- { var: RateLimitInterval, val: 1s } +- { var: RateLimitBurst, val: 10000 } +- { var: SystemMaxUse, val: 8G } +- { var: SystemKeepFree, val: 20% } +- { var: SystemMaxFileSize, val: 10M } +- { var: MaxRetentionSec, val: 1month } +- { var: MaxFileSec, val: 1day } +- { var: ForwardToSyslog, val: no } +- { var: ForwardToWall, val: no } diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml index 018186e86..300b2cbff 100644 --- a/roles/openshift_master_certificates/meta/main.yml +++ b/roles/openshift_master_certificates/meta/main.yml @@ -12,6 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_master_facts -- role: openshift_ca +dependencies: [] diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml index a80313505..d0dcdae4b 100644 --- a/roles/openshift_master_facts/defaults/main.yml +++ b/roles/openshift_master_facts/defaults/main.yml @@ -1,5 +1,5 @@ --- -openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" +openshift_master_default_subdomain: "router.default.svc.cluster.local" openshift_master_admission_plugin_config: openshift.io/ImagePolicy: configuration: diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index e767772ce..a4f410296 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -6,10 +6,6 @@ Custom filters for use in openshift-master import copy import sys -# pylint import-error disabled because pylint cannot find the package -# when installed in a virtualenv -from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error - from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.filter.core import to_bool as ansible_bool @@ -82,23 +78,8 @@ class IdentityProviderBase(object): self._allow_additional = True @staticmethod - def validate_idp_list(idp_list, openshift_version, deployment_type): + def validate_idp_list(idp_list): ''' validates a list of idps ''' - login_providers = [x.name for x in idp_list if x.login] - - multiple_logins_unsupported = False - if len(login_providers) > 1: - if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']: - if LooseVersion(openshift_version) < LooseVersion('3.2'): - multiple_logins_unsupported = True - if deployment_type in ['origin']: - if LooseVersion(openshift_version) < LooseVersion('1.2'): - multiple_logins_unsupported = True - if multiple_logins_unsupported: - raise errors.AnsibleFilterError("|failed multiple providers are " - "not allowed for login. login " - "providers: {0}".format(', '.join(login_providers))) - names = [x.name for x in idp_list] if len(set(names)) != len(names): raise errors.AnsibleFilterError("|failed more than one provider configured with the same name") @@ -380,14 +361,8 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase): if 'extra_authorize_parameters' in self._idp: self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') - if 'extraAuthorizeParameters' in self._idp: - if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']: - val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes')) - self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val - def validate(self): ''' validate this idp instance ''' - IdentityProviderOauthBase.validate(self) if not isinstance(self.provider['claims'], dict): raise errors.AnsibleFilterError("|failed claims for provider {0} " "must be a dictionary".format(self.__class__.__name__)) @@ -476,7 +451,7 @@ class FilterModule(object): ''' Custom ansible filters for use by the openshift_master role''' @staticmethod - def translate_idps(idps, api_version, openshift_version, deployment_type): + def translate_idps(idps, api_version): ''' Translates a list of dictionaries into a valid identityProviders config ''' idp_list = [] @@ -492,7 +467,7 @@ class FilterModule(object): idp_inst.set_provider_items() idp_list.append(idp_inst) - IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type) + IdentityProviderBase.validate_idp_list(idp_list) return u(yaml.dump([idp.to_dict() for idp in idp_list], allow_unicode=True, default_flow_style=False, diff --git a/roles/openshift_master_facts/lookup_plugins/oo_option.py b/roles/openshift_master_facts/lookup_plugins/oo_option.py deleted file mode 120000 index 5ae43f8dd..000000000 --- a/roles/openshift_master_facts/lookup_plugins/oo_option.py +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins/oo_option.py
\ No newline at end of file diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py index c45f255af..f27eb629d 100644 --- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py +++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py @@ -101,7 +101,7 @@ class LookupModule(LookupBase): {'name': 'MatchInterPodAffinity'} ]) - if short_version in ['3.5', '3.6', '3.7']: + if short_version in ['3.5', '3.6']: predicates.extend([ {'name': 'NoVolumeZoneConflict'}, {'name': 'MaxEBSVolumeCount'}, @@ -114,6 +114,21 @@ class LookupModule(LookupBase): {'name': 'CheckNodeDiskPressure'}, ]) + if short_version in ['3.7']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, + ]) + if regions_enabled: region_predicate = { 'name': 'Region', diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index fa228af2a..cf0be3bef 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -1,5 +1,4 @@ --- - # Ensure the default sub-domain is set: - name: Migrate legacy osm_default_subdomain fact set_fact: @@ -35,7 +34,6 @@ cluster_method: "{{ openshift_master_cluster_method | default('native') }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" @@ -90,7 +88,6 @@ controller_args: "{{ osm_controller_args | default(None) }}" disabled_features: "{{ osm_disabled_features | default(None) }}" master_count: "{{ openshift_master_count | default(None) }}" - controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}" master_image: "{{ osm_image | default(None) }}" admission_plugin_config: "{{openshift_master_admission_plugin_config }}" kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py index 4a28fb8f8..38a918803 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py @@ -57,6 +57,20 @@ DEFAULT_PREDICATES_1_5 = [ DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 +DEFAULT_PREDICATES_3_7 = [ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, +] + REGION_PREDICATE = { 'name': 'Region', 'argument': { @@ -79,6 +93,8 @@ TEST_VARS = [ ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), ('3.6', 'origin', DEFAULT_PREDICATES_3_6), ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), + ('3.7', 'origin', DEFAULT_PREDICATES_3_7), + ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7), ] diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index 1f10de4a2..b74f22c00 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -39,6 +39,8 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml). - `openshift_metrics_hawkular_replicas:` The number of replicas for Hawkular metrics. +- `openshift_metrics_hawkular_route_annotations`: Dictionary with annotations for the Hawkular route. + - `openshift_metrics_cassandra_replicas`: The number of Cassandra nodes to deploy for the initial cluster. @@ -107,3 +109,78 @@ Author Information ------------------ Jose David Martín (j.david.nieto@gmail.com) + +Image update procedure +---------------------- +An upgrade of the metrics stack from older version to newer is an automated process and should be performed by calling appropriate ansible playbook and setting required ansible variables in your inventory as documented in https://docs.openshift.org/. + +Following text describes manual update of the metrics images without version upgrade. To determine the current version of images being used you can: +``` +oc describe pod | grep 'Image ID:' +``` +This will get the repo digest that can later be compared to the inspected image details. + +A way to determine when was your image last updated: +``` +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +<registry>/openshift3/origin-metrics-cassandra v3.7 f8ad8d569e27 14 hours ago 783.7 MB + +$ docker inspect 9c3597aeb39f +[ + { + . . . + "RepoDigests": [ + "<registry>/openshift3/metrics-cassandra@sha256:d37fc0cab268625b53a92bb98d09fcc501cfca1c68e16bac6dd98446d32ba135 + ], + . . . + "Config": { + . . . + "Labels": { + . . . + "build-date": "2017-10-17T16:47:44.350655", + . . . + "release": "0.143.4.0", + . . . + "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/openshift3/metrics-cassandra/images/v3.7.0-0.143.4.0", + . . . + "version": "v3.7.0" + } + }, + . . . +``` + +Pull a new image to see if registry has any newer images with the same tag: +``` +$ docker pull <registry>/openshift3/origin-metrics-cassandra:v3.7 +``` + +If there was an update, you need to run the `docker pull` on each node. + +It is recommended that you now rerun the `openshift_metrics` playbook to ensure that any necessary config changes are also picked up. + +To manually redeploy your pod you can do the following: +- for a DC you can do: +``` +oc rollout latest <dc_name> +``` + +- for a RC you can scale down and scale back up +``` +oc scale --replicas=0 <rc_name> + +... wait for scale down + +oc scale --replicas=<original_replica_count> <rc_name> +``` + +- for a DS you can delete the pod or unlabel and relabel your node +``` +oc delete pod --selector=<ds_selector> +``` + +Changelog +--------- + +Tue Oct 10, 2017 +- Default imagePullPolicy changed from Always to IfNotPresent diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index d9a17ae7f..8da74430f 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,6 +1,6 @@ --- openshift_metrics_start_cluster: True -openshift_metrics_install_metrics: True +openshift_metrics_install_metrics: False openshift_metrics_startup_timeout: 500 openshift_metrics_hawkular_replicas: 1 @@ -12,11 +12,12 @@ openshift_metrics_hawkular_cert: "" openshift_metrics_hawkular_key: "" openshift_metrics_hawkular_ca: "" openshift_metrics_hawkular_nodeselector: "" +openshift_metrics_hawkular_route_annotations: {} openshift_metrics_cassandra_replicas: 1 -openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}" -openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}" -openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}" +openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}" +openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}" +openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}" openshift_metrics_cassandra_limits_memory: 2G openshift_metrics_cassandra_limits_cpu: null openshift_metrics_cassandra_requests_memory: 1G @@ -53,9 +54,12 @@ openshift_metrics_master_url: https://kubernetes.default.svc openshift_metrics_node_id: nodename openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}" -openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" openshift_metrics_hawkular_user_write_access: False openshift_metrics_heapster_allowed_users: system:master-proxy + +openshift_metrics_cassandra_enable_prometheus_endpoint: True +openshift_metrics_hawkular_enable_prometheus_endpoint: True diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 7928a0346..48584bd64 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -54,6 +54,7 @@ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" size: "{{ openshift_metrics_cassandra_pvc_size }}" pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" with_sequence: count={{ openshift_metrics_cassandra_replicas }} when: openshift_metrics_cassandra_storage_type == 'dynamic' changed_when: false diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index 6b37f85ab..b63f5ca8c 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -40,6 +40,7 @@ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-route.yaml" vars: name: hawkular-metrics + annotations: "{{ openshift_metrics_hawkular_route_annotations }}" labels: metrics-infra: hawkular-metrics host: "{{ openshift_metrics_hawkular_hostname }}" diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index eaabdd20f..10509fc1e 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -43,7 +43,13 @@ check_mode: no tags: metrics_init -- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" +- include: install_metrics.yaml + when: + - openshift_metrics_install_metrics | bool + +- include: uninstall_metrics.yaml + when: + - not openshift_metrics_install_metrics | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index 2e2013d40..d6756f9b9 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -10,7 +10,7 @@ is invalid, must be one of: emptydir, pv, dynamic when: - openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types - - "not {{ openshift_metrics_heapster_standalone | bool }}" + - not (openshift_metrics_heapster_standalone | bool) - name: list existing secrets command: > diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 index fc82f49b1..6a3811598 100644 --- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 @@ -30,7 +30,7 @@ spec: {% endif %} containers: - image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}" - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: hawkular-cassandra-{{ node }} ports: - name: cql-port @@ -56,6 +56,8 @@ spec: value: "/cassandra_data" - name: JVM_OPTS value: "-Dcassandra.commitlog.ignorereplayerrors=true" + - name: ENABLE_PROMETHEUS_ENDPOINT + value: "{{ openshift_metrics_cassandra_enable_prometheus_endpoint }}" - name: TRUSTSTORE_NODES_AUTHORITIES value: "/hawkular-cassandra-certs/tls.peer.truststore.crt" - name: TRUSTSTORE_CLIENT_AUTHORITIES diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index 9a9363075..0662bea53 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -25,7 +25,7 @@ spec: {% endif %} containers: - image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: hawkular-metrics ports: - name: http-endpoint @@ -55,6 +55,7 @@ spec: - "-Dcom.datastax.driver.FORCE_NIO=true" - "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}" - "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}" + - "-Dhawkular.metrics.jmx-reporting-enabled" env: - name: POD_NAMESPACE valueFrom: @@ -66,6 +67,8 @@ spec: value: "{{ 17 | oo_random_word }}" - name: TRUSTSTORE_AUTHORITIES value: "/hawkular-metrics-certs/tls.truststore.crt" + - name: ENABLE_PROMETHEUS_ENDPOINT + value: "{{ openshift_metrics_hawkular_enable_prometheus_endpoint }}" - name: OPENSHIFT_KUBE_PING_NAMESPACE valueFrom: fieldRef: diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 index d65eaf9ae..40d09e9fa 100644 --- a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 +++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 @@ -25,7 +25,7 @@ spec: {% endif %} containers: - image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: hawkular-openshift-agent {% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none) or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none) diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2 index d8c7763ea..e732c1eee 100644 --- a/roles/openshift_metrics/templates/heapster.j2 +++ b/roles/openshift_metrics/templates/heapster.j2 @@ -27,7 +27,7 @@ spec: containers: - name: heapster image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent ports: - containerPort: 8082 name: "http-endpoint" diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2 index 423ab54a3..253d6ecf5 100644 --- a/roles/openshift_metrics/templates/route.j2 +++ b/roles/openshift_metrics/templates/route.j2 @@ -2,6 +2,9 @@ apiVersion: v1 kind: Route metadata: name: {{ name }} +{% if annotations is defined %} + annotations: {{ annotations | to_yaml }} +{% endif %} {% if labels is defined and labels %} labels: {% for k, v in labels.iteritems() %} diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml index 678c4104c..8704ddfa0 100644 --- a/roles/openshift_metrics/vars/default_images.yml +++ b/roles/openshift_metrics/vars/default_images.yml @@ -1,3 +1,3 @@ --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}" +__openshift_metrics_image_prefix: "docker.io/openshift/origin-" +__openshift_metrics_image_version: "latest" diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f0bdac7d2..5a1728de5 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}" +__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_metrics_image_version: "v3.7" diff --git a/roles/openshift_named_certificates/defaults/main.yml b/roles/openshift_named_certificates/defaults/main.yml new file mode 100644 index 000000000..a32e385ec --- /dev/null +++ b/roles/openshift_named_certificates/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" +openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" +openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" +openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_named_certificates/tasks/named_certificates.yml b/roles/openshift_named_certificates/tasks/named_certificates.yml deleted file mode 100644 index 7b097b443..000000000 --- a/roles/openshift_named_certificates/tasks/named_certificates.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Clear named certificates - file: - path: "{{ named_certs_dir }}" - state: absent - when: overwrite_named_certs | bool - -- name: Ensure named certificate directory exists - file: - path: "{{ named_certs_dir }}" - state: directory - mode: 0700 - -- name: Land named certificates - copy: - src: "{{ item.certfile }}" - dest: "{{ named_certs_dir }}" - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named certificate keys - copy: - src: "{{ item.keyfile }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named CA certificates - copy: - src: "{{ item }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) | oo_collect('cafile') }}" diff --git a/roles/openshift_named_certificates/vars/main.yml b/roles/openshift_named_certificates/vars/main.yml index 368e9bdac..7f891441d 100644 --- a/roles/openshift_named_certificates/vars/main.yml +++ b/roles/openshift_named_certificates/vars/main.yml @@ -1,10 +1,4 @@ --- -openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" -openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" -openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" -openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" -openshift_version: "{{ openshift_pkg_version | default('') }}" - overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}" named_certs_dir: "{{ openshift.common.config_base }}/master/named_certificates/" internal_hostnames: "{{ openshift.common.internal_hostnames }}" diff --git a/roles/openshift_nfs/README.md b/roles/openshift_nfs/README.md new file mode 100644 index 000000000..36ea36385 --- /dev/null +++ b/roles/openshift_nfs/README.md @@ -0,0 +1,17 @@ +OpenShift NFS +============= + +Sets up basic NFS services on a cluster host. + +See [tasks/create_export.yml](tasks/create_export.yml) for +instructions on using the export creation tasks file. + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Tim Bielawa (tbielawa@redhat.com) diff --git a/roles/openshift_nfs/defaults/main.yml b/roles/openshift_nfs/defaults/main.yml new file mode 100644 index 000000000..ee94c7c57 --- /dev/null +++ b/roles/openshift_nfs/defaults/main.yml @@ -0,0 +1,8 @@ +--- +r_openshift_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +r_openshift_nfs_os_firewall_deny: [] +r_openshift_nfs_firewall_allow: +- service: nfs + port: "2049/tcp" diff --git a/roles/openshift_etcd_ca/meta/main.yml b/roles/openshift_nfs/meta/main.yml index f1d669d6b..d7b5910f2 100644 --- a/roles/openshift_etcd_ca/meta/main.yml +++ b/roles/openshift_nfs/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: author: Tim Bielawa - description: Meta role around the etcd_ca role + description: OpenShift Basic NFS Configuration company: Red Hat, Inc. license: Apache License, Version 2.0 min_ansible_version: 2.2 @@ -11,8 +11,6 @@ galaxy_info: - 7 categories: - cloud - - system dependencies: -- role: openshift_etcd_facts -- role: etcd_ca - when: (etcd_ca_setup | default(True) | bool) +- role: lib_utils +- role: lib_os_firewall diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml new file mode 100644 index 000000000..b0b888d56 --- /dev/null +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -0,0 +1,34 @@ +--- +# Makes a new NFS export +# +# Include signature +# +# include_role: +# role: openshift_nfs +# tasks_from: create_export +# vars: +# l_nfs_base_dir: Base dir to exports +# l_nfs_export_config: Name to prefix the .exports file with +# l_nfs_export_name: Name of sub-directory of the export +# l_nfs_options: Mount Options + +- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists" + file: + path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}" + state: directory + mode: 0777 + owner: nfsnobody + group: nfsnobody + +- name: "Create {{ l_nfs_export_name }} NFS export" + lineinfile: + path: "/etc/exports.d/{{ l_nfs_export_config }}.exports" + create: true + state: present + line: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }} {{ l_nfs_options }}" + register: created_export + +- name: Re-export NFS filesystems + command: exportfs -ar + when: + - created_export | changed diff --git a/roles/openshift_hosted/tasks/registry/firewall.yml b/roles/openshift_nfs/tasks/firewall.yml index 775b7d6d7..0898b2b5c 100644 --- a/roles/openshift_hosted/tasks/registry/firewall.yml +++ b/roles/openshift_nfs/tasks/firewall.yml @@ -1,5 +1,5 @@ --- -- when: r_openshift_hosted_registry_firewall_enabled | bool and not r_openshift_hosted_registry_use_firewalld | bool +- when: r_openshift_nfs_firewall_enabled | bool and not r_openshift_nfs_use_firewalld | bool block: - name: Add iptables allow rules os_firewall_manage_iptables: @@ -8,7 +8,7 @@ protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}" + with_items: "{{ r_openshift_nfs_firewall_allow }}" - name: Remove iptables rules os_firewall_manage_iptables: @@ -17,9 +17,9 @@ protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}" + with_items: "{{ r_openshift_nfs_os_firewall_deny }}" -- when: r_openshift_hosted_registry_firewall_enabled | bool and r_openshift_hosted_registry_use_firewalld | bool +- when: r_openshift_nfs_firewall_enabled | bool and r_openshift_nfs_use_firewalld | bool block: - name: Add firewalld allow rules firewalld: @@ -28,7 +28,7 @@ immediate: true state: enabled when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}" + with_items: "{{ r_openshift_nfs_firewall_allow }}" - name: Remove firewalld allow rules firewalld: @@ -37,4 +37,4 @@ immediate: true state: disabled when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}" + with_items: "{{ r_openshift_nfs_os_firewall_deny }}" diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml new file mode 100644 index 000000000..3070de495 --- /dev/null +++ b/roles/openshift_nfs/tasks/setup.yml @@ -0,0 +1,29 @@ +--- +- name: setup firewall + include: firewall.yml + static: yes + +- name: Install nfs-utils + package: name=nfs-utils state=present + +- name: Configure NFS + lineinfile: + dest: /etc/sysconfig/nfs + regexp: '^RPCNFSDARGS=.*$' + line: 'RPCNFSDARGS="-N 2 -N 3"' + register: nfs_config + +- name: Restart nfs-config + systemd: name=nfs-config state=restarted + when: nfs_config | changed + +- name: Ensure exports directory exists + file: + path: "{{ l_nfs_base_dir }}" + state: directory + +- name: Enable and start NFS services + systemd: + name: nfs-server + state: started + enabled: yes diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index fb0b494da..67f697924 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -1,4 +1,4 @@ -OpenShift/Atomic Enterprise Node +OpenShift Node ================================ Node service installation @@ -17,22 +17,12 @@ From this role: | Name | Default value | | |----------------------------|-----------------------|----------------------------------------------------------| -| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node | | oreg_url | UNDEF (Optional) | Default docker registry to use | | oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node | -From openshift_common: - -| Name | Default Value | | -|-------------------------------|---------------------|---------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | -| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | UNDEF (Required) | hostname to use for this instance | - Dependencies ------------ -openshift_common Example Playbook ---------------- diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index cc000496a..37f48e724 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,8 +1,64 @@ --- +openshift_node_debug_level: "{{ debug_level | default(2) }}" + r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" +openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}" + +openshift_image_tag: '' + +default_r_openshift_node_image_prep_packages: +- "{{ openshift_service_type }}-master" +- "{{ openshift_service_type }}-node" +- "{{ openshift_service_type }}-docker-excluder" +- "{{ openshift_service_type }}-sdn-ovs" +- ansible +- openvswitch +- docker +- etcd +- haproxy +- dnsmasq +- ntp +- logrotate +- httpd-tools +- bind +- firewalld +- libselinux-python +- conntrack-tools +- openssl +- cloud-init +- iproute +- python-dbus +- PyYAML +- yum-utils +# gluster +- glusterfs-fuse +# nfs +- nfs-utils +- flannel +- bash-completion +# cockpit +- cockpit-ws +- cockpit-system +- cockpit-bridge +- cockpit-docker +# iscsi +- iscsi-initiator-utils +# ceph +- ceph-common +# systemcontainer +# - runc +# - container-selinux +# - atomic +# +r_openshift_node_image_prep_packages: "{{ default_r_openshift_node_image_prep_packages | union(openshift_node_image_prep_packages | default([])) }}" + +openshift_node_bootstrap: False + r_openshift_node_os_firewall_deny: [] -r_openshift_node_os_firewall_allow: +default_r_openshift_node_os_firewall_allow: - service: Kubernetes kubelet port: 10250/tcp - service: http @@ -11,18 +67,48 @@ r_openshift_node_os_firewall_allow: port: 443/tcp - service: OpenShift OVS sdn port: 4789/udp - cond: openshift.common.use_openshift_sdn | default(true) | bool + cond: openshift_use_openshift_sdn | bool - service: Calico BGP Port port: 179/tcp - cond: "{{ openshift.common.use_calico | bool }}" + cond: "{{ openshift_node_use_calico }}" - service: Kubernetes service NodePort TCP port: "{{ openshift_node_port_range | default('') }}/tcp" cond: "{{ openshift_node_port_range is defined }}" - service: Kubernetes service NodePort UDP port: "{{ openshift_node_port_range | default('') }}/udp" cond: "{{ openshift_node_port_range is defined }}" +# Allow multiple port ranges to be added to the role +r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}" -oreg_url: '' -oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" -oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False +l_bind_docker_reg_auth: False + +# NOTE +# r_openshift_node_*_default may be defined external to this role. +# openshift_use_*, if defined, may affect other roles or play behavior. +openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}" +openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}" + +openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}" + +openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}" +openshift_node_use_calico: "{{ openshift_node_use_calico_default }}" + +openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}" + +openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}" +openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}" + +openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}" +openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}" + +openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" + +openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" +openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}" diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml new file mode 100644 index 000000000..ea280640f --- /dev/null +++ b/roles/openshift_node/files/bootstrap.yml @@ -0,0 +1,63 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost + gather_facts: yes + vars: + origin_dns: + file: /etc/dnsmasq.d/origin-dns.conf + lines: + - regex: ^listen-address + state: present + line: "listen-address={{ ansible_default_ipv4.address }}" + node_dns: + file: /etc/dnsmasq.d/node-dnsmasq.conf + lines: + - regex: "^server=/in-addr.arpa/127.0.0.1$" + line: server=/in-addr.arpa/127.0.0.1 + - regex: "^server=/cluster.local/127.0.0.1$" + line: server=/cluster.local/127.0.0.1 + + tasks: + - include_vars: openshift_settings.yaml + + - name: set the data for node_dns + lineinfile: + create: yes + insertafter: EOF + path: "{{ node_dns.file }}" + regexp: "{{ item.regex }}" + line: "{{ item.line | default(omit) }}" + with_items: "{{ node_dns.lines }}" + + - name: set the data for origin_dns + lineinfile: + create: yes + state: "{{ item.state | default('present') }}" + insertafter: "{{ item.after | default(omit) }}" + path: "{{ origin_dns.file }}" + regexp: "{{ item.regex }}" + line: "{{ item.line | default(omit)}}" + with_items: "{{ origin_dns.lines }}" + + - when: + - openshift_group_type is defined + - openshift_group_type != '' + - openshift_group_type != 'master' + block: + - name: determine the openshift_service_type + stat: + path: /etc/sysconfig/atomic-openshift-node + register: service_type_results + + - name: set openshift_service_type fact based on stat results + set_fact: + openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}" + + - name: update the sysconfig to have necessary variables + lineinfile: + dest: "/etc/sysconfig/{{ openshift_service_type }}-node" + line: "{{ item.line }}" + regexp: "{{ item.regexp }}" + with_items: + - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}" + regexp: "^BOOTSTRAP_CONFIG_NAME=.*" diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index f2c45a4bd..b102c1b18 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -3,7 +3,11 @@ systemd: name: openvswitch state: restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool + when: + - (not skip_node_svc_handlers | default(False) | bool) + - not (ovs_service_status_changed | default(false) | bool) + - openshift_node_use_openshift_sdn | bool + - not openshift_node_bootstrap register: l_openshift_node_stop_openvswitch_result until: not l_openshift_node_stop_openvswitch_result | failed retries: 3 @@ -11,10 +15,11 @@ notify: - restart openvswitch pause - - name: restart openvswitch pause pause: seconds=15 - when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool + when: + - (not skip_node_svc_handlers | default(False) | bool) + - openshift.common.is_containerized | bool - name: restart node systemd: @@ -27,9 +32,7 @@ when: - (not skip_node_svc_handlers | default(False) | bool) - not (node_service_status_changed | default(false) | bool) - -- name: reload sysctl.conf - command: /sbin/sysctl -p + - not openshift_node_bootstrap - name: reload systemd units command: systemctl daemon-reload diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 06373de04..5bc7b9869 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -15,10 +15,7 @@ dependencies: - role: openshift_node_facts - role: lib_openshift - role: lib_os_firewall -- role: openshift_common - role: openshift_clock - role: openshift_docker -- role: openshift_node_certificates - role: openshift_cloud_provider - role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq | bool diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml new file mode 100644 index 000000000..38c2b794d --- /dev/null +++ b/roles/openshift_node/tasks/aws.yml @@ -0,0 +1,21 @@ +--- +- name: Configure AWS Cloud Provider Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^AWS_ACCESS_KEY_ID=' + line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" + - regex: '^AWS_SECRET_ACCESS_KEY=' + line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" + register: sys_env_update + no_log: True + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind == 'aws' + - openshift_cloudprovider_aws_access_key is defined + - openshift_cloudprovider_aws_secret_key is defined + notify: + - restart node diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml new file mode 100644 index 000000000..cf22181a8 --- /dev/null +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -0,0 +1,105 @@ +--- +- name: install needed rpm(s) + package: + name: "{{ item }}" + state: present + with_items: "{{ r_openshift_node_image_prep_packages }}" + +- name: create the directory for node + file: + state: directory + path: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d" + +- name: laydown systemd override + copy: + dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d/override.conf" + content: | + [Unit] + After=cloud-init.service + +- name: update the sysconfig to have necessary variables + lineinfile: + dest: "/etc/sysconfig/{{ openshift_service_type }}-node" + line: "{{ item.line | default(omit) }}" + regexp: "{{ item.regexp }}" + state: "{{ item.state | default('present') }}" + with_items: + # add the kubeconfig + - line: "KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig" + regexp: "^KUBECONFIG=.*" + # remove the config file. This comes from openshift_facts + - regexp: "^CONFIG_FILE=.*" + state: absent + +- name: include aws sysconfig credentials + include: aws.yml + static: yes + +#- name: update the ExecStart to have bootstrap +# lineinfile: +# dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service" +# line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" +# regexp: "^ExecStart=.*" + +- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services" + systemd: + name: "{{ item }}" + enabled: no + with_items: + - "{{ openshift_service_type }}-node.service" + - "{{ openshift_service_type }}-master.service" + +- name: Check for RPM generated config marker file .config_managed + stat: + path: /etc/origin/.config_managed + register: rpmgenerated_config + +- name: create directories for bootstrapping + file: + state: directory + dest: "{{ item }}" + with_items: + - /root/openshift_bootstrap + - /var/lib/origin/openshift.local.config + - /var/lib/origin/openshift.local.config/node + - "/etc/docker/certs.d/docker-registry.default.svc:5000" + +- name: laydown the bootstrap.yml file for on boot configuration + copy: + src: bootstrap.yml + dest: /root/openshift_bootstrap/bootstrap.yml + +- name: symlink master ca for docker-registry + file: + src: "{{ item }}" + dest: "/etc/docker/certs.d/docker-registry.default.svc:5000/{{ item | basename }}" + state: link + force: yes + with_items: + - /var/lib/origin/openshift.local.config/node/node-client-ca.crt + +- when: rpmgenerated_config.stat.exists + block: + - name: Remove RPM generated config files if present + file: + path: "/etc/origin/{{ item }}" + state: absent + with_items: + - master + - .config_managed + + # with_fileglob doesn't work correctly due to a few issues. + # Could change this to fileglob when it gets fixed. + - name: find all files in /etc/origin/node so we can remove them + find: + path: /etc/origin/node/ + register: find_results + + - name: Remove everything except the resolv.conf required for node + file: + path: "{{ item.path }}" + state: absent + when: + - "'resolv.conf' not in item.path" + - "'node-dnsmasq.conf' not in item.path" + with_items: "{{ find_results.files }}" diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml new file mode 100644 index 000000000..c08f43118 --- /dev/null +++ b/roles/openshift_node/tasks/config.yml @@ -0,0 +1,107 @@ +--- +- name: Install the systemd units + include: systemd_units.yml + +- name: Start and enable openvswitch service + systemd: + name: openvswitch.service + enabled: yes + state: started + daemon_reload: yes + when: + - openshift.common.is_containerized | bool + - openshift_node_use_openshift_sdn | default(true) | bool + register: ovs_start_result + until: not ovs_start_result | failed + retries: 3 + delay: 30 + +- set_fact: + ovs_service_status_changed: "{{ ovs_start_result | changed }}" + +- file: + dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}" + state: directory + when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args + +# TODO: add the validate parameter when there is a validation command to run +- name: Create the Node config + template: + dest: "{{ openshift.common.config_base }}/node/node-config.yaml" + src: node.yaml.v1.j2 + backup: true + owner: root + group: root + mode: 0600 + notify: + - restart node + +- name: Configure Node Environment Variables + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "^{{ item.key }}=" + line: "{{ item.key }}={{ item.value }}" + create: true + with_dict: "{{ openshift.node.env_vars | default({}) }}" + notify: + - restart node + +- name: include aws provider credentials + include: aws.yml + static: yes + +# Necessary because when you're on a node that's also a master the master will be +# restarted after the node restarts docker and it will take up to 60 seconds for +# systemd to start the master again +- when: + - openshift.common.is_containerized | bool + - not openshift_node_bootstrap + block: + - name: Wait for master API to become available before proceeding + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt + {{ openshift_node_master_api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + + - name: Start and enable node dep + systemd: + daemon_reload: yes + name: "{{ openshift.common.service_type }}-node-dep" + enabled: yes + state: started + +- when: not openshift_node_bootstrap + block: + - name: Start and enable node + systemd: + name: "{{ openshift.common.service_type }}-node" + enabled: yes + state: started + daemon_reload: yes + register: node_start_result + until: not node_start_result | failed + retries: 1 + delay: 30 + ignore_errors: true + + - name: Dump logs from node service if it failed + command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node + when: node_start_result | failed + + - name: Abort if node failed to start + fail: + msg: Node failed to start please inspect the logs and try again + when: node_start_result | failed + + - set_fact: + node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml index 1186062eb..527580481 100644 --- a/roles/openshift_node/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@ create: true with_items: - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" + line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}" - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml deleted file mode 100644 index f92ff79b5..000000000 --- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Install Node docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: openshift.docker.node.service - notify: - - reload systemd units - - restart node diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml new file mode 100644 index 000000000..6b7e40491 --- /dev/null +++ b/roles/openshift_node/tasks/install.yml @@ -0,0 +1,29 @@ +--- +- when: not openshift.common.is_containerized | bool + block: + - name: Install Node package + package: + name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" + state: present + + - name: Install sdn-ovs package + package: + name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" + state: present + when: + - openshift_node_use_openshift_sdn | bool + + - name: Install conntrack-tools package + package: + name: "conntrack-tools" + state: present + +- when: + - openshift.common.is_containerized | bool + - not openshift.common.is_node_system_container | bool + block: + - name: Pre-pull node image when containerized + command: > + docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 525dd1d1a..eae9ca7bc 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,43 +1,16 @@ --- -# TODO: allow for overriding default ports where possible - fail: msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: - - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] + - (not ansible_selinux or ansible_selinux.status != 'enabled') + - deployment_type == 'openshift-enterprise' - not openshift_use_crio | default(false) - name: setup firewall include: firewall.yml static: yes -- name: Set node facts - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - # Reset node labels to an empty dictionary. - - role: node - local_facts: - labels: {} - - role: node - local_facts: - annotations: "{{ openshift_node_annotations | default(none) }}" - debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" - iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" - kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" - registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" - set_node_ip: "{{ openshift_set_node_ip | default(None) }}" - node_image: "{{ osn_image | default(None) }}" - ovs_image: "{{ osn_ovs_image | default(None) }}" - proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" - local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" - dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" - env_vars: "{{ openshift_node_env_vars | default(None) }}" - +#### Disable SWAP ##### # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory - name: Check for swap usage command: grep "^[^#].*swap" /etc/fstab @@ -46,9 +19,10 @@ failed_when: false register: swap_result -# Disable Swap Block -- block: - +- when: + - swap_result.stdout_lines | length > 0 + - openshift_disable_swap | default(true) | bool + block: - name: Disable swap command: swapoff --all @@ -64,29 +38,10 @@ dest: /etc/fstab line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' state: present +#### End Disable Swap Block #### - when: - - swap_result.stdout_lines | length > 0 - - openshift_disable_swap | default(true) | bool -# End Disable Swap Block - -- name: Install Node package - package: - name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - when: not openshift.common.is_containerized | bool - -- name: setup tuned - include: tuned.yml - static: yes - -- name: Install sdn-ovs package - package: - name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - when: - - openshift.common.use_openshift_sdn | default(true) | bool - - not openshift.common.is_containerized | bool +- name: include node installer + include: install.yml - name: Restart cri-o systemd: @@ -95,102 +50,28 @@ state: restarted when: openshift_use_crio | default(false) -- name: Install conntrack-tools package - package: - name: "conntrack-tools" - state: present - when: not openshift.common.is_containerized | bool - -- name: Install the systemd units - include: systemd_units.yml +- name: restart NetworkManager to ensure resolv.conf is present + systemd: + name: NetworkManager + enabled: yes + state: restarted + when: openshift_node_bootstrap | bool # The atomic-openshift-node service will set this parameter on # startup, but if the network service is restarted this setting is # lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388 -# -# Use lineinfile w/ a handler for this task until -# https://github.com/ansible/ansible/pull/24277 is included in an -# ansible release and we can use the sysctl module. -- name: Persist net.ipv4.ip_forward sysctl entry - lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1' - notify: - - reload sysctl.conf - -- name: Start and enable openvswitch service - systemd: - name: openvswitch.service - enabled: yes - state: started - daemon_reload: yes - when: - - openshift.common.is_containerized | bool - - openshift.common.use_openshift_sdn | default(true) | bool - register: ovs_start_result - until: not ovs_start_result | failed - retries: 3 - delay: 30 - -- set_fact: - ovs_service_status_changed: "{{ ovs_start_result | changed }}" - -- file: - dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}" - state: directory - when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args - -# TODO: add the validate parameter when there is a validation command to run -- name: Create the Node config - template: - dest: "{{ openshift.common.config_base }}/node/node-config.yaml" - src: node.yaml.v1.j2 - backup: true - owner: root - group: root - mode: 0600 - notify: - - restart node - -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: node_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart node +- sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_file: "/etc/sysctl.d/99-openshift.conf" + reload: yes -- name: Configure AWS Cloud Provider Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^AWS_ACCESS_KEY_ID=' - line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" - - regex: '^AWS_SECRET_ACCESS_KEY=' - line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" - no_log: True - when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined - notify: - - restart node +- include: registry_auth.yml -- name: Configure Node Environment Variables - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "^{{ item.key }}=" - line: "{{ item.key }}={{ item.value }}" - create: true - with_dict: "{{ openshift.node.env_vars | default({}) }}" - notify: - - restart node +- name: include standard node config + include: config.yml +#### Storage class plugins here #### - name: NFS storage plugin configuration include: storage_plugins/nfs.yml tags: @@ -208,55 +89,11 @@ include: storage_plugins/iscsi.yml when: "'iscsi' in openshift.node.storage_plugin_deps" -# Necessary because when you're on a node that's also a master the master will be -# restarted after the node restarts docker and it will take up to 60 seconds for -# systemd to start the master again -- name: Wait for master API to become available before proceeding - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt - {{ openshift_node_master_api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: openshift.common.is_containerized | bool - -- name: Start and enable node dep - systemd: - daemon_reload: yes - name: "{{ openshift.common.service_type }}-node-dep" - enabled: yes - state: started - when: openshift.common.is_containerized | bool - - -- name: Start and enable node - systemd: - name: "{{ openshift.common.service_type }}-node" - enabled: yes - state: started - daemon_reload: yes - register: node_start_result - until: not node_start_result | failed - retries: 1 - delay: 30 - ignore_errors: true - -- name: Dump logs from node service if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node - when: node_start_result | failed +##### END Storage ##### -- name: Abort if node failed to start - fail: - msg: Node failed to start please inspect the logs and try again - when: node_start_result | failed +- include: config/workaround-bz1331590-ovs-oom-fix.yml + when: openshift_node_use_openshift_sdn | default(true) | bool -- set_fact: - node_service_status_changed: "{{ node_start_result | changed }}" +- name: include bootstrap node config + include: bootstrap.yml + when: openshift_node_bootstrap diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index b2dceedbe..164a79b39 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -1,4 +1,9 @@ --- +- name: Ensure proxies are in the atomic.conf + include_role: + name: openshift_atomic + tasks_from: proxy + - name: Pre-pull node system container image command: > atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} @@ -9,4 +14,8 @@ oc_atomic_container: name: "{{ openshift.common.service_type }}-node" image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" + values: + - "DNS_DOMAIN={{ openshift.common.dns_domain }}" + - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service" + - "MASTER_SERVICE={{ openshift.common.service_type }}.service" state: latest diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index e09063aa5..0f73ce454 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -10,6 +10,11 @@ l_service_name: "{{ openshift.docker.service_name }}" when: not l_use_crio +- name: Ensure proxies are in the atomic.conf + include_role: + name: openshift_atomic + tasks_from: proxy + - name: Pre-pull OpenVSwitch system container image command: > atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml new file mode 100644 index 000000000..5e5e4f94a --- /dev/null +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -0,0 +1,27 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: node_oreg_auth_credentials_stat + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + register: node_oreg_auth_credentials_create + retries: 3 + delay: 5 + until: node_oreg_auth_credentials_create.rc == 0 + notify: + - restart node + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index b86bb1549..9c182ade6 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -1,49 +1,31 @@ --- -# This file is included both in the openshift_master role and in the upgrade -# playbooks. - -- include: config/install-node-deps-docker-service-file.yml - when: openshift.common.is_containerized | bool - -- block: - - name: Pre-pull node image - command: > - docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - - - include: config/install-node-docker-service-file.yml - when: - - openshift.common.is_containerized | bool - - not openshift.common.is_node_system_container | bool - - name: Install Node service file template: dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: "node.service.j2" - when: not openshift.common.is_containerized | bool + src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" + when: not openshift.common.is_node_system_container | bool notify: - reload systemd units - restart node -- include: config/install-ovs-service-env-file.yml - when: openshift.common.is_containerized | bool +- when: openshift.common.is_containerized | bool + block: + - name: include node deps docker service file + include: config/install-node-deps-docker-service-file.yml -- name: Install Node system container - include: node_system_container.yml - when: - - openshift.common.is_containerized | bool - - openshift.common.is_node_system_container | bool + - name: include ovs service environment file + include: config/install-ovs-service-env-file.yml -- name: Install OpenvSwitch system containers - include: openvswitch_system_container.yml - when: - - openshift.common.use_openshift_sdn | default(true) | bool - - openshift.common.is_containerized | bool - - openshift.common.is_openvswitch_system_container | bool + - name: Install Node system container + include: node_system_container.yml + when: + - openshift.common.is_node_system_container | bool -- include: config/workaround-bz1331590-ovs-oom-fix.yml - when: openshift.common.use_openshift_sdn | default(true) | bool + - name: Install OpenvSwitch system containers + include: openvswitch_system_container.yml + when: + - openshift_node_use_openshift_sdn | bool + - openshift.common.is_openvswitch_system_container | bool - block: - name: Pre-pull openvswitch image @@ -55,7 +37,7 @@ - include: config/install-ovs-docker-service-file.yml when: - openshift.common.is_containerized | bool - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_node_use_openshift_sdn | bool - not openshift.common.is_openvswitch_system_container | bool - include: config/configure-node-settings.yml diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 0856737f6..7602d8ee6 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -12,17 +12,17 @@ After=dnsmasq.service [Service] Type=notify -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node Environment=GOTRACEBACK=crash ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string: -ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/openshift start node {% if openshift_node_bootstrap %} --kubeconfig=${KUBECONFIG} --bootstrap-config-name=${BOOTSTRAP_CONFIG_NAME}{% endif %} --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=65536 LimitCORE=infinity WorkingDirectory=/var/lib/origin/ -SyslogIdentifier={{ openshift.common.service_type }}-node +SyslogIdentifier={{ openshift_service_type }}-node Restart=always RestartSec=5s TimeoutStartSec=300 diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 711afcadb..718d35dca 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -13,7 +13,7 @@ dockerConfig: iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}" imageConfig: format: {{ openshift.node.registry_url }} - latest: false + latest: {{ openshift_node_image_config_latest }} kind: NodeConfig kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} {% if openshift_use_crio | default(False) %} @@ -21,8 +21,6 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam - remote container-runtime-endpoint: - /var/run/crio.sock - experimental-cri: - - 'true' image-service-endpoint: - /var/run/crio.sock node-labels: @@ -39,15 +37,15 @@ masterClientConnectionOverrides: qps: 100 {% endif %} masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig -{% if openshift.common.use_openshift_sdn | bool %} -networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift_node_use_openshift_sdn | bool %} +networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} # networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which # deprecates networkPluginName above. The two should match. networkConfig: mtu: {{ openshift.node.sdn_mtu }} -{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %} - networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %} + networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} {% if openshift.node.set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} @@ -68,10 +66,12 @@ servingInfo: - {{ cipher_suite }} {% endfor %} {% endif %} -volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes +volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes +{% if not (openshift_node_use_kuryr | default(False)) | bool %} proxyArguments: proxy-mode: - {{ openshift.node.proxy_mode }} +{% endif %} volumeConfig: localQuota: perFSGroup: {{ openshift.node.local_quota_per_fsgroup }} diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 8734e7443..fa7238849 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service {% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 639b6f6c8..561aa01f4 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -4,8 +4,9 @@ After={{ openshift.docker.service_name }}.service After=openvswitch.service PartOf={{ openshift.docker.service_name }}.service Requires={{ openshift.docker.service_name }}.service -{% if openshift.common.use_openshift_sdn %} +{% if openshift_node_use_openshift_sdn %} Wants=openvswitch.service +PartOf=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service {% endif %} @@ -21,7 +22,23 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ + -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ + -e HOST=/rootfs -e HOST_ETC=/host-etc \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ + -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \ + -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ + -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ + -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \ + -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml index 70a38b844..455f26f30 100644 --- a/roles/openshift_node_certificates/defaults/main.yml +++ b/roles/openshift_node_certificates/defaults/main.yml @@ -1,2 +1,3 @@ --- openshift_node_cert_expire_days: 730 +openshift_ca_host: '' diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index 4abe8bcaf..ef66bf9ca 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -2,9 +2,21 @@ - name: update ca trust command: update-ca-trust notify: - - restart docker after updating ca trust + - check for container runtime after updating ca trust -- name: restart docker after updating ca trust +- name: check for container runtime after updating ca trust + command: > + systemctl -q is-active {{ openshift.docker.service_name }}.service + register: l_docker_installed + # An rc of 0 indicates that the container runtime service is + # running. We will restart it by notifying the restart handler since + # we have updated the system CA trust. + changed_when: l_docker_installed.rc == 0 + failed_when: false + notify: + - restart container runtime after updating ca trust + +- name: restart container runtime after updating ca trust systemd: name: "{{ openshift.docker.service_name }}" state: restarted diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml index 93216c1d2..0440bf11a 100644 --- a/roles/openshift_node_certificates/meta/main.yml +++ b/roles/openshift_node_certificates/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_facts +dependencies: [] diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md new file mode 100644 index 000000000..4596190d7 --- /dev/null +++ b/roles/openshift_node_dnsmasq/README.md @@ -0,0 +1,27 @@ +OpenShift Node DNS resolver +=========================== + +Configure dnsmasq to act as a DNS resolver for an OpenShift node. + +Requirements +------------ + +Role Variables +-------------- + +From this role: + +| Name | Default value | Description | +|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------| +| openshift_node_dnsmasq_install_network_manager_hook | true | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance | + +Dependencies +------------ + +* openshift_common +* openshift_node_facts + +License +------- + +Apache License Version 2.0 diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml index ed97d539c..ebcff46b5 100644 --- a/roles/openshift_node_dnsmasq/defaults/main.yml +++ b/roles/openshift_node_dnsmasq/defaults/main.yml @@ -1 +1,7 @@ --- +openshift_node_dnsmasq_install_network_manager_hook: true + +# lo must always be present in this list or dnsmasq will conflict with +# the node's dns service. +openshift_node_dnsmasq_except_interfaces: +- lo diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 61d2a5b51..f4e48b5b7 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -54,6 +54,8 @@ domain-needed server=/cluster.local/172.30.0.1 server=/30.172.in-addr.arpa/172.30.0.1 enable-dbus +dns-forward-max=5000 +cache-size=5000 EOF # New config file, must restart NEEDS_RESTART=1 @@ -112,7 +114,9 @@ EOF fi sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF} echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF} - if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then + if ! grep -qw search ${NEW_RESOLV_CONF}; then + echo 'search cluster.local' >> ${NEW_RESOLV_CONF} + elif ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF} fi cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml index b4a0c3583..9f98126a0 100644 --- a/roles/openshift_node_dnsmasq/handlers/main.yml +++ b/roles/openshift_node_dnsmasq/handlers/main.yml @@ -3,6 +3,7 @@ systemd: name: NetworkManager state: restarted + enabled: True - name: restart dnsmasq systemd: diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml index 84035b88c..d80ed1b72 100644 --- a/roles/openshift_node_dnsmasq/meta/main.yml +++ b/roles/openshift_node_dnsmasq/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_common - role: openshift_node_facts diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml index d0221a94b..9bbaafc29 100644 --- a/roles/openshift_node_dnsmasq/tasks/main.yml +++ b/roles/openshift_node_dnsmasq/tasks/main.yml @@ -14,6 +14,17 @@ package: name=dnsmasq state=installed when: not openshift.common.is_atomic | bool +- name: ensure origin/node directory exists + file: + state: directory + path: "{{ item }}" + owner: root + group: root + mode: '0700' + with_items: + - /etc/origin + - /etc/origin/node + # this file is copied to /etc/dnsmasq.d/ when the node starts and is removed # when the node stops. A dbus-message is sent to dnsmasq to add the same entries # so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml index dddcfc9da..e5a92a630 100644 --- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml +++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml @@ -5,5 +5,6 @@ dest: /etc/NetworkManager/dispatcher.d/ mode: 0755 notify: restart NetworkManager + when: openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool - meta: flush_handlers diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml index d5fda7bd0..8a7da66c2 100644 --- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml +++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml @@ -1,2 +1,11 @@ --- - fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." + when: not openshift_node_bootstrap | bool + +- name: Install NetworkManager during node_bootstrap provisioning + package: + name: NetworkManager + state: present + notify: restart NetworkManager + +- include: ./network-manager.yml diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 index ef3ba2880..6543c7c3e 100644 --- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 +++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 @@ -3,5 +3,10 @@ domain-needed no-negcache max-cache-ttl=1 enable-dbus -bind-interfaces -listen-address={{ openshift.node.dns_ip }} +dns-forward-max=5000 +cache-size=5000 +bind-dynamic +{% for interface in openshift_node_dnsmasq_except_interfaces %} +except-interface={{ interface }} +{% endfor %} +# End of config diff --git a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py new file mode 100644 index 000000000..69069f2dc --- /dev/null +++ b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift-node +''' +from ansible import errors + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_node_facts role''' + + @staticmethod + def node_get_dns_ip(openshift_dns_ip, hostvars): + ''' Navigates the complicated logic of when to set dnsIP + + In all situations if they've set openshift_dns_ip use that + For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None + For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip) + For 1.2/3.2+ installs we set to the node's default interface ip + ''' + + if not issubclass(type(hostvars), dict): + raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + + # We always use what they've specified if they've specified a value + if openshift_dns_ip is not None: + return openshift_dns_ip + return hostvars['ansible_default_ipv4']['address'] + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'node_get_dns_ip': self.node_get_dns_ip} diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index c268c945e..b45130400 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -1,10 +1,4 @@ --- -- set_fact: - openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - when: - - openshift_node_debug_level is not defined - - lookup('oo_option', 'openshift_node_debug_level') != "" - - name: Set node facts openshift_facts: role: "{{ item.role }}" @@ -17,10 +11,9 @@ - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" - debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" + labels: "{{ openshift_node_labels | default(None) }}" registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" @@ -30,5 +23,5 @@ ovs_image: "{{ osn_ovs_image | default(None) }}" proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" - dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" + dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}" env_vars: "{{ openshift_node_env_vars | default(None) }}" diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index 4e6229bfb..73b98ad90 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -1,4 +1,4 @@ -OpenShift/Atomic Enterprise Node upgrade +OpenShift Node upgrade ========= Role responsible for a single node upgrade. @@ -32,14 +32,12 @@ From openshift.common: | Name | Default Value | | |------------------------------------|---------------------|---------------------| | openshift.common.config_base |---------------------|---------------------| -| openshift.common.data_dir |---------------------|---------------------| | openshift.common.hostname |---------------------|---------------------| | openshift.common.http_proxy |---------------------|---------------------| | openshift.common.is_atomic |---------------------|---------------------| | openshift.common.is_containerized |---------------------|---------------------| | openshift.common.portal_net |---------------------|---------------------| | openshift.common.service_type |---------------------|---------------------| -| openshift.common.use_openshift_sdn |---------------------|---------------------| From openshift.master: @@ -51,14 +49,13 @@ From openshift.node: | Name | Default Value | | |------------------------------------|---------------------|---------------------| -| openshift.node.debug_level |---------------------|---------------------| | openshift.node.node_image |---------------------|---------------------| | openshift.node.ovs_image |---------------------|---------------------| Dependencies ------------ -openshift_common + TODO diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml index ed97d539c..10b4c6977 100644 --- a/roles/openshift_node_upgrade/defaults/main.yml +++ b/roles/openshift_node_upgrade/defaults/main.yml @@ -1 +1,14 @@ --- +openshift_node_debug_level: "{{ debug_level | default(2) }}" + +openshift_use_openshift_sdn: True +os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" + +openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" + +# oreg_url is defined by user input +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" +oreg_auth_credentials_replace: False +l_bind_docker_reg_auth: False diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml index d31b899cf..90d80855e 100644 --- a/roles/openshift_node_upgrade/handlers/main.yml +++ b/roles/openshift_node_upgrade/handlers/main.yml @@ -6,7 +6,7 @@ when: - not skip_node_svc_handlers | default(False) | bool - not (ovs_service_status_changed | default(false) | bool) - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_use_openshift_sdn | bool register: l_openshift_node_upgrade_stop_openvswitch_result until: not l_openshift_node_upgrade_stop_openvswitch_result | failed retries: 3 diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml index 2a36d8945..a810b01dc 100644 --- a/roles/openshift_node_upgrade/meta/main.yml +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -11,4 +11,3 @@ galaxy_info: - 7 dependencies: - role: lib_utils -- role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml index 1186062eb..527580481 100644 --- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@ create: true with_items: - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" + line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}" - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index bc092c26c..6bcf3072d 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -10,6 +10,8 @@ # tasks file for openshift_node_upgrade +- include: registry_auth.yml + - name: Stop node and openvswitch services service: name: "{{ item }}" @@ -44,7 +46,7 @@ changed_when: "'Downloaded newer image' in pull_result.stdout" when: - openshift.common.is_containerized | bool - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_use_openshift_sdn | bool - include: docker/upgrade.yml vars: @@ -142,7 +144,7 @@ # End Disable Swap Block - name: Reset selinux context - command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes + command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes when: - ansible_selinux is defined - ansible_selinux.status == 'enabled' diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml new file mode 100644 index 000000000..5e5e4f94a --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml @@ -0,0 +1,27 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: node_oreg_auth_credentials_stat + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + register: node_oreg_auth_credentials_create + retries: 3 + delay: 5 + until: node_oreg_auth_credentials_create.rc == 0 + notify: + - restart node + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index 4e9550150..226f5290c 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -4,9 +4,9 @@ # - openshift_image_tag # - openshift.common.is_containerized # - openshift.node.ovs_image -# - openshift.common.use_openshift_sdn +# - openshift_use_openshift_sdn # - openshift.common.service_type -# - openshift.node.debug_level +# - openshift_node_debug_level # - openshift.common.config_base # - openshift.common.http_proxy # - openshift.common.portal_net @@ -28,10 +28,10 @@ when: openshift.common.is_containerized | bool - include: config/workaround-bz1331590-ovs-oom-fix.yml - when: openshift.common.use_openshift_sdn | default(true) | bool + when: openshift_use_openshift_sdn | bool - include: config/install-ovs-docker-service-file.yml - when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool + when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool - include: config/configure-node-settings.yml - include: config/configure-proxy-settings.yml diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service index 4c47f8c0d..aae35719c 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 639b6f6c8..07d1ebc3c 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -4,8 +4,9 @@ After={{ openshift.docker.service_name }}.service After=openvswitch.service PartOf={{ openshift.docker.service_name }}.service Requires={{ openshift.docker.service_name }}.service -{% if openshift.common.use_openshift_sdn %} +{% if openshift_use_openshift_sdn %} Wants=openvswitch.service +PartOf=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service {% endif %} @@ -21,7 +22,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ + -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ + -e HOST=/rootfs -e HOST_ETC=/host-etc \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ + -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \ + -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ + -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ + -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md index 1489cb0bd..0407d6ef1 100644 --- a/roles/openshift_persistent_volumes/README.md +++ b/roles/openshift_persistent_volumes/README.md @@ -17,13 +17,6 @@ From this role: | persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes | -From openshift_common: - -| Name | Default Value | | -|-------------------------------|----------------|----------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | - - Dependencies ------------ diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 25e5a38dd..19e9a56b7 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,6 +9,4 @@ galaxy_info: - name: EL versions: - 7 -dependencies: -- role: openshift_common -- role: openshift_hosted_facts +dependencies: {} diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md new file mode 100644 index 000000000..92f74928c --- /dev/null +++ b/roles/openshift_prometheus/README.md @@ -0,0 +1,118 @@ +OpenShift Prometheus +==================== + +OpenShift Prometheus Installation + +Requirements +------------ + + +Role Variables +-------------- + +For default values, see [`defaults/main.yaml`](defaults/main.yaml). + +- `openshift_prometheus_state`: present - install/update. absent - uninstall. + +- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be + deployed. + +- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on. + +- `openshift_prometheus_<COMPONENT>_image_prefix`: specify image prefix for the component + +- `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component + +## PVC related variables +Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable: +``` +openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir) +openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE> +``` +e.g +``` +openshift_prometheus_storage_type: pvc +openshift_prometheus_alertmanager_pvc_name: alertmanager +openshift_prometheus_alertbuffer_pvc_size: 10G +openshift_prometheus_pvc_access_modes: [ReadWriteOnce] +``` + +## NFS PV Storage variables +Each prometheus component (prometheus, alertmanager, alertbuffer) can set nfs pv by setting corresponding variable: +``` +openshift_prometheus_<COMPONENT>_storage_kind=<VALUE> +openshift_prometheus_<COMPONENT>_storage_(access_modes|host|labels)=<VALUE> +openshift_prometheus_<COMPONENT>_storage_volume_(name|size)=<VALUE> +openshift_prometheus_<COMPONENT>_storage_nfs_(directory|options)=<VALUE> +``` +e.g +``` +openshift_prometheus_storage_kind=nfs +openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +openshift_prometheus_storage_host=nfs.example.com #for external host +openshift_prometheus_storage_nfs_directory=/exports +openshift_prometheus_storage_alertmanager_nfs_options='*(rw,root_squash)' +openshift_prometheus_storage_volume_name=prometheus +openshift_prometheus_storage_alertbuffer_volume_size=10Gi +openshift_prometheus_storage_labels={'storage': 'prometheus'} +``` + +NOTE: Setting `openshift_prometheus_<COMPONENT>_storage_labels` overrides `openshift_prometheus_<COMPONENT>_pvc_pv_selector` + + +## Additional Alert Rules file variable +An external file with alert rules can be added by setting path to additional rules variable: +``` +openshift_prometheus_additional_rules_file: <PATH> +``` + +File content should be in prometheus alert rules format. +Following example sets rule to fire an alert when one of the cluster nodes is down: + +``` +groups: +- name: example-rules + interval: 30s # defaults to global interval + rules: + - alert: Node Down + expr: up{job="kubernetes-nodes"} == 0 + annotations: + miqTarget: "ContainerNode" + severity: "HIGH" + message: "{{ '{{' }}{{ '$labels.instance' }}{{ '}}' }} is down" +``` + + +## Additional variables to control resource limits +Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can specify a cpu and memory limits and requests by setting +the corresponding role variable: +``` +openshift_prometheus_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE> +``` +e.g +``` +openshift_prometheus_alertmanager_limits_memory: 1Gi +openshift_prometheus_oath_proxy_requests_cpu: 100 +``` + +Dependencies +------------ + +openshift_facts + + +Example Playbook +---------------- + +``` +- name: Configure openshift-prometheus + hosts: oo_first_master + roles: + - role: openshift_prometheus +``` + +License +------- + +Apache License, Version 2.0 + diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml new file mode 100644 index 000000000..00995eee6 --- /dev/null +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -0,0 +1,60 @@ +--- +# defaults file for openshift_prometheus +openshift_prometheus_state: present + +openshift_prometheus_namespace: prometheus + +openshift_prometheus_node_selector: {"region":"infra"} + +# image defaults +openshift_prometheus_image_prefix: "openshift/" +openshift_prometheus_image_version: "v2.0.0-dev.3" +openshift_prometheus_proxy_image_prefix: "openshift/" +openshift_prometheus_proxy_image_version: "v1.0.0" +openshift_prometheus_alertmanager_image_prefix: "openshift/" +openshift_prometheus_alertmanager_image_version: "v0.9.1" +openshift_prometheus_alertbuffer_image_prefix: "openshift/" +openshift_prometheus_alertbuffer_image_version: "v0.0.2" + +# additional prometheus rules file +openshift_prometheus_additional_rules_file: null + +# storage +# One of ['emptydir', 'pvc'] +openshift_prometheus_storage_type: "emptydir" +openshift_prometheus_pvc_name: prometheus +openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}" +openshift_prometheus_pvc_access_modes: [ReadWriteOnce] +openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}" + +# One of ['emptydir', 'pvc'] +openshift_prometheus_alertmanager_storage_type: "emptydir" +openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager +openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}" +openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce] +openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}" + +# One of ['emptydir', 'pvc'] +openshift_prometheus_alertbuffer_storage_type: "emptydir" +openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer +openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}" +openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce] +openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}" + +# container resources +openshift_prometheus_cpu_limit: null +openshift_prometheus_memory_limit: null +openshift_prometheus_cpu_requests: null +openshift_prometheus_memory_requests: null +openshift_prometheus_alertmanager_cpu_limit: null +openshift_prometheus_alertmanager_memory_limit: null +openshift_prometheus_alertmanager_cpu_requests: null +openshift_prometheus_alertmanager_memory_requests: null +openshift_prometheus_alertbuffer_cpu_limit: null +openshift_prometheus_alertbuffer_memory_limit: null +openshift_prometheus_alertbuffer_cpu_requests: null +openshift_prometheus_alertbuffer_memory_requests: null +openshift_prometheus_oauth_proxy_cpu_limit: null +openshift_prometheus_oauth_proxy_memory_limit: null +openshift_prometheus_oauth_proxy_cpu_requests: null +openshift_prometheus_oauth_proxy_memory_requests: null diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml new file mode 100644 index 000000000..33188bb7e --- /dev/null +++ b/roles/openshift_prometheus/meta/main.yaml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: OpenShift Development <dev@lists.openshift.redhat.com> + description: Deploy OpenShift prometheus integration for the cluster + company: Red Hat, Inc. + license: license (Apache) + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - openshift +dependencies: +- { role: lib_openshift } +- { role: openshift_facts } diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml new file mode 100644 index 000000000..00c3c1987 --- /dev/null +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -0,0 +1,238 @@ +--- + +# namespace +- name: Add prometheus project + oc_project: + state: "{{ state }}" + name: "{{ openshift_prometheus_namespace }}" + node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}" + description: Prometheus + +# secrets +- name: Set alert and prometheus secrets + oc_secret: + state: "{{ state }}" + name: "{{ item }}-proxy" + namespace: "{{ openshift_prometheus_namespace }}" + contents: + - path: session_secret + data: "{{ 43 | oo_random_word }}=" + with_items: + - prometheus + - alerts + +# serviceaccount +- name: create prometheus serviceaccount + oc_serviceaccount: + state: "{{ state }}" + name: prometheus + namespace: "{{ openshift_prometheus_namespace }}" + # TODO add annotations when supproted + # annotations: + # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' + # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' + + secrets: + - prometheus-secrets + changed_when: no + +# TODO remove this when annotations are supported by oc_serviceaccount +- name: annotate serviceaccount + command: > + {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + serviceaccount prometheus + serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' + serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' + + +# create clusterrolebinding for prometheus serviceaccount +- name: Set cluster-reader permissions for prometheus + oc_adm_policy_user: + state: "{{ state }}" + namespace: "{{ openshift_prometheus_namespace }}" + resource_kind: cluster-role + resource_name: cluster-reader + user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" + +# create prometheus and alerts services +# TODO join into 1 task with loop +- name: Create prometheus service + oc_service: + state: "{{ state }}" + name: "{{ item.name }}" + namespace: "{{ openshift_prometheus_namespace }}" + selector: + app: prometheus + labels: + name: "{{ item.name }}" + # TODO add annotations when supported + # annotations: + # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + ports: + - port: 443 + targetPort: 8443 + with_items: + - name: prometheus + +- name: Create alerts service + oc_service: + state: "{{ state }}" + name: "{{ item.name }}" + namespace: "{{ openshift_prometheus_namespace }}" + selector: + app: prometheus + labels: + name: "{{ item.name }}" + # TODO add annotations when supported + # annotations: + # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + ports: + - port: 443 + targetPort: 9443 + with_items: + - name: alerts + + +# Annotate services with secret name +# TODO remove this when annotations are supported by oc_service +- name: annotate prometheus service + command: > + {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + service prometheus + prometheus.io/scrape='true' + prometheus.io/scheme=https + service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls + +- name: annotate alerts service + command: > + {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls' + +# create prometheus and alerts routes +- name: create prometheus and alerts routes + oc_route: + state: "{{ state }}" + name: "{{ item.name }}" + namespace: "{{ openshift_prometheus_namespace }}" + service_name: "{{ item.name }}" + tls_termination: reencrypt + with_items: + - name: prometheus + - name: alerts + +# Storage +- name: create prometheus pvc + oc_pvc: + namespace: "{{ openshift_prometheus_namespace }}" + name: "{{ openshift_prometheus_pvc_name }}" + access_modes: "{{ openshift_prometheus_pvc_access_modes }}" + volume_capacity: "{{ openshift_prometheus_pvc_size }}" + selector: "{{ openshift_prometheus_pvc_pv_selector }}" + when: openshift_prometheus_storage_type == 'pvc' + +- name: create alertmanager pvc + oc_pvc: + namespace: "{{ openshift_prometheus_namespace }}" + name: "{{ openshift_prometheus_alertmanager_pvc_name }}" + access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}" + volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}" + selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}" + when: openshift_prometheus_alertmanager_storage_type == 'pvc' + +- name: create alertbuffer pvc + oc_pvc: + namespace: "{{ openshift_prometheus_namespace }}" + name: "{{ openshift_prometheus_alertbuffer_pvc_name }}" + access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}" + volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}" + selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}" + when: openshift_prometheus_alertbuffer_storage_type == 'pvc' + +# create prometheus stateful set +- name: Set prometheus template + template: + src: prometheus.j2 + dest: "{{ tempdir }}/templates/prometheus.yaml" + vars: + namespace: "{{ openshift_prometheus_namespace }}" +# prom_replicas: "{{ openshift_prometheus_replicas }}" + +- name: Set prometheus stateful set + oc_obj: + state: "{{ state }}" + name: "prometheus" + namespace: "{{ openshift_prometheus_namespace }}" + kind: statefulset + files: + - "{{ tempdir }}/templates/prometheus.yaml" + delete_after: true + +# prometheus configmap +# Copy the additional rules file if it is defined +- name: Copy additional rules file to host + copy: + src: "{{ openshift_prometheus_additional_rules_file }}" + dest: "{{ tempdir }}/prometheus.additional.rules" + when: + - openshift_prometheus_additional_rules_file is defined + - openshift_prometheus_additional_rules_file is not none + - openshift_prometheus_additional_rules_file | trim | length > 0 + +- stat: + path: "{{ tempdir }}/prometheus.additional.rules" + register: additional_rules_stat + +# The kubernetes version impacts the prometheus scraping endpoint +# so gathering it before constructing the configmap +- name: get oc version + oc_version: + register: oc_version + +- set_fact: + kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" + +- template: + src: prometheus.yml.j2 + dest: "{{ tempdir }}/prometheus.yml" + changed_when: no + +- template: + src: prometheus.rules.j2 + dest: "{{ tempdir }}/prometheus.rules" + changed_when: no + +# In prometheus configmap create "additional.rules" section if file exists +- name: Set prometheus configmap + oc_configmap: + state: "{{ state }}" + name: "prometheus" + namespace: "{{ openshift_prometheus_namespace }}" + from_file: + prometheus.rules: "{{ tempdir }}/prometheus.rules" + prometheus.additional.rules: "{{ tempdir }}/prometheus.additional.rules" + prometheus.yml: "{{ tempdir }}/prometheus.yml" + when: additional_rules_stat.stat.exists == True + +- name: Set prometheus configmap + oc_configmap: + state: "{{ state }}" + name: "prometheus" + namespace: "{{ openshift_prometheus_namespace }}" + from_file: + prometheus.rules: "{{ tempdir }}/prometheus.rules" + prometheus.yml: "{{ tempdir }}/prometheus.yml" + when: additional_rules_stat.stat.exists == False + +# alertmanager configmap +- template: + src: alertmanager.yml.j2 + dest: "{{ tempdir }}/alertmanager.yml" + changed_when: no + +- name: Set alertmanager configmap + oc_configmap: + state: "{{ state }}" + name: "prometheus-alerts" + namespace: "{{ openshift_prometheus_namespace }}" + from_file: + alertmanager.yml: "{{ tempdir }}/alertmanager.yml" diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml new file mode 100644 index 000000000..523a64334 --- /dev/null +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -0,0 +1,26 @@ +--- + +- name: Create temp directory for doing work in on target + command: mktemp -td openshift-prometheus-ansible-XXXXXX + register: mktemp + changed_when: False + +- set_fact: + tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory + file: + state: directory + path: "{{ tempdir }}/templates" + mode: 0755 + changed_when: False + +- include: install_prometheus.yaml + vars: + state: "{{ openshift_prometheus_state }}" + +- name: Delete temp directory + file: + name: "{{ tempdir }}" + state: absent + changed_when: False diff --git a/roles/openshift_prometheus/templates/alertmanager.yml.j2 b/roles/openshift_prometheus/templates/alertmanager.yml.j2 new file mode 100644 index 000000000..6c432a3d0 --- /dev/null +++ b/roles/openshift_prometheus/templates/alertmanager.yml.j2 @@ -0,0 +1,20 @@ +global: + +# The root route on which each incoming alert enters. +route: + # default route if none match + receiver: alert-buffer-wh + + # The labels by which incoming alerts are grouped together. For example, + # multiple alerts coming in for cluster=A and alertname=LatencyHigh would + # be batched into a single group. + # TODO: + group_by: [] + + # All the above attributes are inherited by all child routes and can + # overwritten on each. + +receivers: +- name: alert-buffer-wh + webhook_configs: + - url: http://localhost:9099/topics/alerts diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 new file mode 100644 index 000000000..916c57aa2 --- /dev/null +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -0,0 +1,247 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: prometheus + namespace: {{ namespace }} + labels: + app: prometheus +spec: + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + selector: + provider: openshift + matchLabels: + app: prometheus + template: + metadata: + name: prometheus + labels: + app: prometheus + spec: + serviceAccountName: prometheus +{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %} + nodeSelector: +{% for key, value in openshift_prometheus_node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + # Deploy Prometheus behind an oauth proxy + - name: prom-proxy + image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}" + imagePullPolicy: IfNotPresent + resources: + requests: +{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} + memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} + cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}" +{% endif %} + limits: +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} + memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} + cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}" +{% endif %} + ports: + - containerPort: 8443 + name: web + args: + - -provider=openshift + - -https-address=:8443 + - -http-address= + - -email-domain=* + - -upstream=http://localhost:9090 + - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' + - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -openshift-ca=/etc/pki/tls/cert.pem + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - -skip-auth-regex=^/metrics + volumeMounts: + - mountPath: /etc/tls/private + name: prometheus-tls + - mountPath: /etc/proxy/secrets + name: prometheus-secrets + - mountPath: /prometheus + name: prometheus-data + + - name: prometheus + args: + - --storage.tsdb.retention=6h + - --storage.tsdb.min-block-duration=2m + - --config.file=/etc/prometheus/prometheus.yml + - --web.listen-address=localhost:9090 + image: "{{openshift_prometheus_image_prefix}}prometheus:{{openshift_prometheus_image_version}}" + imagePullPolicy: IfNotPresent + resources: + requests: +{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %} + memory: "{{openshift_prometheus_memory_requests}}" +{% endif %} +{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %} + cpu: "{{openshift_prometheus_cpu_requests}}" +{% endif %} + limits: +{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %} + memory: "{{ openshift_prometheus_memory_limit }}" +{% endif %} +{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %} + cpu: "{{openshift_prometheus_cpu_limit}}" +{% endif %} + + volumeMounts: + - mountPath: /etc/prometheus + name: prometheus-config + - mountPath: /prometheus + name: prometheus-data + + # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy + - name: alerts-proxy + image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}" + imagePullPolicy: IfNotPresent + resources: + requests: +{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} + memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} + cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}" +{% endif %} + limits: +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} + memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} + cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}" +{% endif %} + ports: + - containerPort: 9443 + name: web + args: + - -provider=openshift + - -https-address=:9443 + - -http-address= + - -email-domain=* + - -upstream=http://localhost:9099 + - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' + - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -openshift-ca=/etc/pki/tls/cert.pem + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + volumeMounts: + - mountPath: /etc/tls/private + name: alerts-tls + - mountPath: /etc/proxy/secrets + name: alerts-secrets + + - name: alert-buffer + args: + - --storage-path=/alert-buffer/messages.db + image: "{{openshift_prometheus_alertbuffer_image_prefix}}prometheus-alert-buffer:{{openshift_prometheus_alertbuffer_image_version}}" + imagePullPolicy: IfNotPresent + resources: + requests: +{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %} + memory: "{{openshift_prometheus_alertbuffer_memory_requests}}" +{% endif %} +{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %} + cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}" +{% endif %} + limits: +{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %} + memory: "{{openshift_prometheus_alertbuffer_memory_limit}}" +{% endif %} +{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %} + cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}" +{% endif %} + volumeMounts: + - mountPath: /alert-buffer + name: alert-buffer-data + ports: + - containerPort: 9099 + name: alert-buf + + - name: alertmanager + args: + - -config.file=/etc/alertmanager/alertmanager.yml + image: "{{openshift_prometheus_alertmanager_image_prefix}}prometheus-alertmanager:{{openshift_prometheus_alertmanager_image_version}}" + imagePullPolicy: IfNotPresent + resources: + requests: +{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %} + memory: "{{openshift_prometheus_alertmanager_memory_requests}}" +{% endif %} +{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %} + cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}" +{% endif %} + limits: +{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %} + memory: "{{openshift_prometheus_alertmanager_memory_limit}}" +{% endif %} +{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %} + cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}" +{% endif %} + ports: + - containerPort: 9093 + name: web + volumeMounts: + - mountPath: /etc/alertmanager + name: alertmanager-config + - mountPath: /alertmanager + name: alertmanager-data + + restartPolicy: Always + volumes: + - name: prometheus-config + configMap: + defaultMode: 420 + name: prometheus + - name: prometheus-secrets + secret: + secretName: prometheus-proxy + - name: prometheus-tls + secret: + secretName: prometheus-tls + - name: prometheus-data +{% if openshift_prometheus_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_prometheus_pvc_name }} +{% else %} + emptydir: {} +{% endif %} + - name: alertmanager-config + configMap: + defaultMode: 420 + name: prometheus-alerts + - name: alerts-secrets + secret: + secretName: alerts-proxy + - name: alerts-tls + secret: + secretName: prometheus-alerts-tls + - name: alertmanager-data +{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_prometheus_alertmanager_pvc_name }} +{% else %} + emptydir: {} +{% endif %} + - name: alert-buffer-data +{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_prometheus_alertbuffer_pvc_name }} +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_prometheus/templates/prometheus.rules.j2 b/roles/openshift_prometheus/templates/prometheus.rules.j2 new file mode 100644 index 000000000..e861dc127 --- /dev/null +++ b/roles/openshift_prometheus/templates/prometheus.rules.j2 @@ -0,0 +1,4 @@ +groups: +- name: example-rules + interval: 30s # defaults to global interval + rules: diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2 new file mode 100644 index 000000000..63430f834 --- /dev/null +++ b/roles/openshift_prometheus/templates/prometheus.yml.j2 @@ -0,0 +1,174 @@ +rule_files: + - 'prometheus.rules' +{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %} + - 'prometheus.additional.rules' +{% endif %} + + + +# A scrape configuration for running Prometheus on a Kubernetes cluster. +# This uses separate scrape configs for cluster components (i.e. API server, node) +# and services to allow each to use different authentication configs. +# +# Kubernetes labels will be added as Prometheus labels on metrics via the +# `labelmap` relabeling action. + +# Scrape config for API servers. +# +# Kubernetes exposes API servers as endpoints to the default/kubernetes +# service so this uses `endpoints` role and uses relabelling to only keep +# the endpoints associated with the default/kubernetes service using the +# default named port `https`. This works for single API server deployments as +# well as HA API server deployments. +scrape_configs: +- job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + +# Scrape config for nodes. +# +# Each node exposes a /metrics endpoint that contains operational metrics for +# the Kubelet and other components. +- job_name: 'kubernetes-nodes' + + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + +# Scrape config for controllers. +# +# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for +# the controllers. +# +# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via +# endpoints. +- job_name: 'kubernetes-controllers' + + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: endpoints + + # Keep only the default/kubernetes service endpoints for the https port, and then + # set the port to 8444. This is the default configuration for the controllers on OpenShift + # masters. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + - source_labels: [__address__] + action: replace + target_label: __address__ + regex: (.+)(?::\d+) + replacement: $1:8444 + +# Scrape config for cAdvisor. +# +# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that +# reports container metrics for each running pod. Scrape those by default. +- job_name: 'kubernetes-cadvisor' + + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + +{% if kubernetes_version | float() >= 1.7 | float() %} + metrics_path: /metrics/cadvisor +{% else %} + metrics_path: /metrics +{% endif %} + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + +# Scrape config for service endpoints. +# +# The relabeling allows the actual service scrape endpoint to be configured +# via the following annotations: +# +# * `prometheus.io/scrape`: Only scrape services that have a value of `true` +# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need +# to set this to `https` & most likely set the `tls_config` of the scrape config. +# * `prometheus.io/path`: If the metrics path is not `/metrics` override this. +# * `prometheus.io/port`: If the metrics are exposed on a different port to the +# service then set this appropriately. +- job_name: 'kubernetes-service-endpoints' + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # TODO: this should be per target + insecure_skip_verify: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username] + action: replace + target_label: __basic_auth_username__ + regex: (.+) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password] + action: replace + target_label: __basic_auth_password__ + regex: (.+) + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + +alerting: + alertmanagers: + - scheme: http + static_configs: + - targets: + - "localhost:9093" diff --git a/roles/openshift_prometheus/tests/inventory b/roles/openshift_prometheus/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/roles/openshift_prometheus/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/roles/openshift_prometheus/tests/test.yaml b/roles/openshift_prometheus/tests/test.yaml new file mode 100644 index 000000000..37baf573c --- /dev/null +++ b/roles/openshift_prometheus/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - openshift_prometheus diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml index ac21a5e37..1e6aafd00 100644 --- a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml +++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml @@ -1,6 +1,8 @@ --- - name: Generate ClusterRoleBindings - template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml + template: + src: clusterrolebinding.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-clusterrolebinding.yaml" vars: acct_name: provisioners-{{item}} obj_name: run-provisioners-{{item}} diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml index e6cbb1bbf..fe5ff9f18 100644 --- a/roles/openshift_provisioners/tasks/generate_secrets.yaml +++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml @@ -1,6 +1,8 @@ --- - name: Generate secret for efs - template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml + template: + src: secret.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-secret.yaml" vars: name: efs obj_name: "provisioners-efs" diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml index 4fe0583ee..000f19994 100644 --- a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml +++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml @@ -1,6 +1,8 @@ --- - name: Generating serviceaccounts - template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml + template: + src: serviceaccount.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-sa.yaml" vars: obj_name: provisioners-{{item}} labels: diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index 4a6e00513..6e8792446 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -9,7 +9,9 @@ changed_when: no - name: Generate efs PersistentVolumeClaim - template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml + template: + src: pvc.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pvc.yaml" vars: obj_name: "provisioners-efs" size: "1Mi" @@ -21,7 +23,9 @@ changed_when: no - name: Generate efs PersistentVolume - template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml + template: + src: pv.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pv.yaml" vars: obj_name: "provisioners-efs" size: "1Mi" diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml index ba472f1c9..d6db81ab9 100644 --- a/roles/openshift_provisioners/tasks/install_support.yaml +++ b/roles/openshift_provisioners/tasks/install_support.yaml @@ -1,16 +1,9 @@ --- -- name: Check for provisioners project already exists - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers - register: provisioners_project_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no - -- name: Create provisioners project - command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}} - when: not ansible_check_mode and "not found" in provisioners_project_result.stderr +- name: Set provisioners project + oc_project: + state: present + kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + name: "{{ openshift_provisioners_project }}" - name: Create temp directory for all our templates file: path={{mktemp.stdout}}/templates state=directory mode=0755 diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2 index f4128f9f0..f81b1617a 100644 --- a/roles/openshift_provisioners/templates/pv.j2 +++ b/roles/openshift_provisioners/templates/pv.j2 @@ -30,3 +30,4 @@ spec: name: {{claim_name}} namespace: {{openshift_provisioners_project}} {% endif %} + storageClassName: "" diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2 index 83d503056..0dd8772eb 100644 --- a/roles/openshift_provisioners/templates/pvc.j2 +++ b/roles/openshift_provisioners/templates/pvc.j2 @@ -23,4 +23,5 @@ spec: resources: requests: storage: {{size}} + storageClassName: "" diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md index abd1997dd..ce3b51454 100644 --- a/roles/openshift_repos/README.md +++ b/roles/openshift_repos/README.md @@ -1,4 +1,4 @@ -OpenShift Repos +OpenShift Repos ================ Configures repositories for an OpenShift installation @@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos. Role Variables -------------- -| Name | Default value | | -|-------------------------------|---------------|------------------------------------| -| openshift_deployment_type | None | Possible values enterprise, origin | -| openshift_additional_repos | {} | TODO | +| Name | Default value | | +|-------------------------------|---------------|----------------------------------------------| +| openshift_deployment_type | None | Possible values openshift-enterprise, origin | +| openshift_additional_repos | {} | TODO | Dependencies ------------ diff --git a/roles/openshift_repos/tasks/centos_repos.yml b/roles/openshift_repos/tasks/centos_repos.yml new file mode 100644 index 000000000..7dc15af2a --- /dev/null +++ b/roles/openshift_repos/tasks/centos_repos.yml @@ -0,0 +1,25 @@ +--- +# Note: OpenShift repositories under CentOS may be shipped through the +# "centos-release-openshift-origin" package which configures the repository. +# This task matches the file names provided by the package so that they are +# not installed twice in different files and remains idempotent. + +- name: Configure origin gpg keys + copy: + src: "origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS" + dest: "/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS" + notify: refresh cache + +# openshift_release is formatted to a standard string in openshift_version role. +# openshift_release is expected to be in format 'x.y.z...' here. +# Here, we drop the '.' characters and try to match the correct repo template +# for our corresponding openshift_release. +- name: Configure correct origin release repository + template: + src: "{{ item }}" + dest: "/etc/yum.repos.d/{{ (item | basename | splitext)[0] }}" + with_first_found: + - "CentOS-OpenShift-Origin{{ (openshift_release | default('')).split('.') | join('') }}.repo.j2" + - "CentOS-OpenShift-Origin{{ ((openshift_release | default('')).split('.') | join(''))[0:2] }}.repo.j2" + - "CentOS-OpenShift-Origin.repo.j2" + notify: refresh cache diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 7458db87e..d41245093 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -6,53 +6,37 @@ - when: not ostree_booted.stat.exists block: + # TODO: This needs to be removed and placed into a role - name: Ensure libselinux-python is installed package: name=libselinux-python state=present - name: Create any additional repos that are defined - template: - src: yum_repo.j2 - dest: /etc/yum.repos.d/openshift_additional.repo - when: - - openshift_additional_repos | length > 0 - notify: refresh cache - - - name: Remove the additional repos if no longer defined - file: - dest: /etc/yum.repos.d/openshift_additional.repo - state: absent - when: - - openshift_additional_repos | length == 0 + yum_repository: + description: "{{ item.description | default(item.name | default(item.id)) }}" + name: "{{ item.name | default(item.id) }}" + baseurl: "{{ item.baseurl }}" + gpgkey: "{{ item.gpgkey | default(omit)}}" + gpgcheck: "{{ item.gpgcheck | default(1) }}" + sslverify: "{{ item.sslverify | default(1) }}" + sslclientkey: "{{ item.sslclientkey | default(omit) }}" + sslclientcert: "{{ item.sslclientcert | default(omit) }}" + file: "{{ item.name }}" + enabled: "{{ item.enabled | default('no')}}" + with_items: "{{ openshift_additional_repos }}" + when: openshift_additional_repos | length > 0 notify: refresh cache # Singleton block - when: r_openshift_repos_has_run is not defined block: - # Note: OpenShift repositories under CentOS may be shipped through the - # "centos-release-openshift-origin" package which configures the repository. - # This task matches the file names provided by the package so that they are - # not installed twice in different files and remains idempotent. - - name: Configure origin repositories and gpg keys if needed - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - with_items: - - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS - dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS - - src: origin/repos/openshift-ansible-centos-paas-sig.repo - dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo - notify: refresh cache + - include: centos_repos.yml when: - ansible_os_family == "RedHat" - ansible_distribution != "Fedora" - openshift_deployment_type == 'origin' - openshift_enable_origin_repo | default(true) | bool - - name: Enable centos-openshift-origin-testing repository - command: yum-config-manager --enable centos-openshift-origin-testing - when: openshift_repos_enable_testing | bool - - name: Ensure clean repo cache in the event repos have been changed manually debug: msg: "First run of openshift_repos" diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 index 09364c26f..b0c036e7c 100644 --- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/ -enabled=0 +enabled={{ 1 if openshift_repos_enable_testing else 0 }} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 new file mode 100644 index 000000000..97e855d58 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin14] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin14/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin14/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin14/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 new file mode 100644 index 000000000..5e756e680 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin15] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin15/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin15/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin15/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 new file mode 100644 index 000000000..7050c95f5 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin36] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin36/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin36/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin36/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2 deleted file mode 100644 index ef2cd6603..000000000 --- a/roles/openshift_repos/templates/yum_repo.j2 +++ /dev/null @@ -1,14 +0,0 @@ -{% for repo in openshift_additional_repos %} -[{{ repo.id }}] -name={{ repo.name | default(repo.id) }} -baseurl={{ repo.baseurl }} -{% set enable_repo = repo.enabled | default(1) %} -enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} -{% set enable_gpg_check = repo.gpgcheck | default(1) %} -gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} -{% for key, value in repo.iteritems() %} -{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %} -{{ key }}={{ value }} -{% endif %} -{% endfor %} -{% endfor %} diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py new file mode 100644 index 000000000..72c47b8ee --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -0,0 +1,44 @@ +''' + Openshift Sanitize inventory class that provides useful filters used in Logging. +''' + + +import re + + +# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +def vars_with_pattern(source, pattern=""): + ''' Returns a list of variables whose name matches the given pattern ''' + if source == '': + return list() + + var_list = list() + + var_pattern = re.compile(pattern) + + for item in source: + if var_pattern.match(item): + var_list.append(item) + + return var_list + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs, + 'vars_with_pattern': vars_with_pattern + } diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py new file mode 100644 index 000000000..f61801714 --- /dev/null +++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py @@ -0,0 +1,68 @@ +#!/usr/bin/python + +""" Ansible module to help with setting facts conditionally based on other facts """ + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: conditional_set_fact + +short_description: This will set a fact if the value is defined + +description: + - "To avoid constant set_fact & when conditions for each var we can use this" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + +- name: Conditionally set fact + conditional_set_fact: + fact1: not_defined_variable + fact2: defined_variable + +''' + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + facts=dict(type='dict', required=True), + vars=dict(required=False, type='dict', default=[]) + ), + supports_check_mode=True + ) + + local_facts = dict() + is_changed = False + + for param in module.params['vars']: + other_var = module.params['vars'][param] + + if other_var in module.params['facts']: + local_facts[param] = module.params['facts'][other_var] + if not is_changed: + is_changed = True + + return module.exit_json(changed=is_changed, # noqa: F405 + ansible_facts=local_facts) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml new file mode 100644 index 000000000..7c1573096 --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml @@ -0,0 +1,48 @@ +--- +# this is used to set the logging variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + logging_hostname: openshift_hosted_logging_hostname + logging_ops_hostname: openshift_hosted_logging_ops_hostname + logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size + logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size + openshift_logging_storage_kind: openshift_hosted_logging_storage_kind + openshift_logging_storage_host: openshift_hosted_logging_storage_host + openshift_logging_storage_labels: openshift_hosted_logging_storage_labels + openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size + openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind + openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host + openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels + openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size + openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster + openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret + openshift_logging_kibana_hostname: openshift_hosted_logging_hostname + openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname + openshift_logging_kibana_nodeselector: openshift_hosted_logging_kibana_nodeselector + openshift_logging_kibana_ops_nodeselector: openshift_hosted_logging_kibana_ops_nodeselector + openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source + openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head + openshift_logging_fluentd_nodeselector: openshift_hosted_logging_fluentd_nodeselector_label + openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram + openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector + openshift_logging_es_ops_nodeselector: openshift_hosted_logging_elasticsearch_ops_nodeselector + openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram + openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes + openshift_logging_master_public_url: openshift_hosted_logging_master_public_url + openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix + openshift_logging_image_version: openshift_hosted_logging_deployer_version + openshift_logging_install_logging: openshift_hosted_logging_deploy + openshift_logging_curator_nodeselector: openshift_hosted_logging_curator_nodeselector + openshift_logging_curator_ops_nodeselector: openshift_hosted_logging_curator_ops_nodeselector + + +- set_fact: + openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size | default('10Gi') if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml new file mode 100644 index 000000000..279646981 --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml @@ -0,0 +1,17 @@ +--- +# this is used to set the metrics variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes + openshift_metrics_storage_host: openshift_hosted_metrics_storage_host + openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory + openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name + openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size + openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels + openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix + openshift_metrics_image_version: openshift_hosted_metrics_deployer_version + openshift_metrics_install_metrics: openshift_hosted_metrics_deploy + openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml new file mode 100644 index 000000000..94d3acffc --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -0,0 +1,21 @@ +--- + +- name: Check for usage of deprecated variables + set_fact: + __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" + when: + - hostvars[inventory_hostname][item] is defined + with_items: "{{ __warn_deprecated_vars }}" + +- block: + - debug: msg="{{__deprecation_message}}" + - pause: + seconds: "{{ 10 }}" + when: + - __deprecation_message | default ('') | length > 0 + +# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory +- name: Assign deprecated variables to correct counterparts + include: "{{ item }}" + with_fileglob: + - "../tasks/__deprecations_*.yml" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 59ce505d3..a6c168bc7 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -1,4 +1,8 @@ --- +# We should print out deprecations prior to any failures so that if a play does fail for other reasons +# the user would also be aware of any deprecated variables they should note to adjust +- include: deprecations.yml + - name: Abort when conflicting deployment type variables are set when: - deployment_type is defined @@ -19,6 +23,8 @@ # TODO: once this is well-documented, add deprecation notice if using old name. deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" + openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" - name: Abort when deployment type is invalid # this variable is required; complain early and clearly if it is invalid. @@ -41,7 +47,7 @@ - name: Abort when openshift_release is invalid when: - openshift_release is defined - - not openshift_release | match('\d+(\.\d+){1,3}$') + - not openshift_release | match('^\d+(\.\d+){1,3}$') fail: msg: |- openshift_release is "{{ openshift_release }}" which is not a valid version string. @@ -50,3 +56,16 @@ - include: unsupported.yml when: - not openshift_enable_unsupported_configurations | default(false) | bool + +- name: Ensure clusterid is set along with the cloudprovider + fail: + msg: > + Ensure that the openshift_clusterid is set and that all infrastructure has the required tags. + + For dynamic provisioning when using multiple clusters in different zones, tag each node with Key=kubernetes.io/cluster/xxxx,Value=clusterid where xxxx and clusterid are unique per cluster. In versions prior to 3.6, this was Key=KubernetesCluster,Value=clusterid. + + https://github.com/openshift/openshift-docs/blob/master/install_config/persistent_storage/dynamically_provisioning_pvs.adoc#available-dynamically-provisioned-plug-ins + when: + - openshift_clusterid is not defined + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind == 'aws' diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml index 24e44ea85..b70ab90a1 100644 --- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml +++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml @@ -10,3 +10,33 @@ Starting in 3.6 openshift_use_dnsmasq must be true or critical features will not function. This also means that NetworkManager must be installed enabled and responsible for management of the primary interface. + +- name: Ensure that openshift_node_dnsmasq_install_network_manager_hook is true + when: + - not openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool + fail: + msg: |- + The NetworkManager hook is considered a critical part of the DNS + infrastructure. + +- set_fact: + __using_dynamic: True + when: + - hostvars[inventory_hostname][item] in ['dynamic'] + with_items: + - "{{ hostvars[inventory_hostname] | vars_with_pattern(pattern='openshift_.*_storage_kind') }}" + +- name: Ensure that dynamic provisioning is set if using dynamic storage + when: + - dynamic_volumes_check | default(true) | bool + - not openshift_master_dynamic_provisioning_enabled | default(false) | bool + - not openshift_cloudprovider_kind is defined + - __using_dynamic is defined and __using_dynamic | bool + fail: + msg: |- + Using a storage kind of 'dynamic' without enabling dynamic provisioning nor + setting a cloud provider will cause generated PVCs to not be able to bind as + intended. Either update to not use a dynamic storage or set + openshift_master_dynamic_provisioning_enabled to True and set an + openshift_cloudprovider_kind. You can disable this check with + 'dynamic_volumes_check=False'. diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index da48e42c1..0fc2372d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,7 +1,78 @@ --- # origin uses community packages named 'origin' -# online currently uses 'openshift' packages -# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift' -# atomic-enterprise uses Red Hat packages named 'atomic-openshift' -# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1 -known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'] +# openshift-enterprise uses Red Hat packages named 'atomic-openshift' +known_openshift_deployment_types: ['origin', 'openshift-enterprise'] + +__deprecation_header: "[DEPRECATION WARNING]:" + +# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release +__warn_deprecated_vars: + # logging + - 'openshift_hosted_logging_deploy' + - 'openshift_hosted_logging_hostname' + - 'openshift_hosted_logging_ops_hostname' + - 'openshift_hosted_logging_master_public_url' + - 'openshift_hosted_logging_elasticsearch_cluster_size' + - 'openshift_hosted_logging_elasticsearch_ops_cluster_size' + - 'openshift_hosted_logging_image_pull_secret' + - 'openshift_hosted_logging_enable_ops_cluster' + - 'openshift_hosted_logging_curator_nodeselector' + - 'openshift_hosted_logging_curator_ops_nodeselector' + - 'openshift_hosted_logging_kibana_nodeselector' + - 'openshift_hosted_logging_kibana_ops_nodeselector' + - 'openshift_hosted_logging_fluentd_nodeselector_label' + - 'openshift_hosted_logging_journal_source' + - 'openshift_hosted_logging_journal_read_from_head' + - 'openshift_hosted_logging_elasticsearch_instance_ram' + - 'openshift_hosted_logging_storage_labels' + - 'openshift_hosted_logging_elasticsearch_pvc_dynamic' + - 'openshift_hosted_logging_elasticsearch_pvc_size' + - 'openshift_hosted_logging_elasticsearch_pvc_prefix' + - 'openshift_hosted_logging_elasticsearch_storage_group' + - 'openshift_hosted_logging_elasticsearch_nodeselector' + - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' + - 'openshift_hosted_loggingops_storage_labels' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_size' + - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix' + - 'openshift_hosted_logging_elasticsearch_storage_group' + - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' + - 'openshift_hosted_logging_storage_access_modes' + - 'openshift_hosted_logging_storage_kind' + - 'openshift_hosted_loggingops_storage_kind' + - 'openshift_hosted_logging_storage_host' + - 'openshift_hosted_loggingops_storage_host' + - 'openshift_hosted_logging_storage_nfs_directory' + - 'openshift_hosted_loggingops_storage_nfs_directory' + - 'openshift_hosted_logging_storage_volume_name' + - 'openshift_hosted_loggingops_storage_volume_name' + - 'openshift_hosted_logging_storage_volume_size' + - 'openshift_hosted_loggingops_storage_volume_size' + - 'openshift_hosted_logging_enable_ops_cluster' + - 'openshift_hosted_logging_image_pull_secret' + - 'openshift_hosted_logging_curator_nodeselector' + - 'openshift_hosted_logging_curator_ops_nodeselector' + - 'openshift_hosted_logging_kibana_nodeselector' + - 'openshift_hosted_logging_kibana_ops_nodeselector' + - 'openshift_hosted_logging_ops_hostname' + - 'openshift_hosted_logging_fluentd_nodeselector_label' + - 'openshift_hosted_logging_journal_source' + - 'openshift_hosted_logging_journal_read_from_head' + - 'openshift_hosted_logging_elasticsearch_instance_ram' + - 'openshift_hosted_logging_elasticsearch_nodeselector' + - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' + - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' + - 'openshift_hosted_logging_storage_access_modes' + - 'openshift_hosted_logging_deployer_prefix' + - 'openshift_hosted_logging_deployer_version' + # metrics + - 'openshift_hosted_metrics_deploy' + - 'openshift_hosted_metrics_storage_kind' + - 'openshift_hosted_metrics_storage_access_modes' + - 'openshift_hosted_metrics_storage_host' + - 'openshift_hosted_metrics_storage_nfs_directory' + - 'openshift_hosted_metrics_storage_volume_name' + - 'openshift_hosted_metrics_storage_volume_size' + - 'openshift_hosted_metrics_storage_labels' + - 'openshift_hosted_metrics_deployer_prefix' + - 'openshift_hosted_metrics_deployer_version' diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml index 01ee2544d..7c848cb12 100644 --- a/roles/openshift_service_catalog/defaults/main.yml +++ b/roles/openshift_service_catalog/defaults/main.yml @@ -1,3 +1,7 @@ --- openshift_service_catalog_remove: false openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"} + +openshift_use_openshift_sdn: True +# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}" +os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml index 71e21a269..f449fba2b 100644 --- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml +++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml @@ -1,25 +1,26 @@ apiVersion: v1 kind: Template metadata: - name: service-catalog + name: service-catalog-role-bindings objects: -- kind: ClusterRole - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRole metadata: name: servicecatalog-serviceclass-viewer rules: - apiGroups: - servicecatalog.k8s.io resources: - - serviceclasses + - clusterserviceclasses + - clusterserviceplans verbs: - list - watch - get -- kind: ClusterRoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding metadata: name: servicecatalog-serviceclass-viewer-binding roleRef: @@ -37,8 +38,8 @@ objects: metadata: name: service-catalog-apiserver -- kind: ClusterRole - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRole metadata: name: sar-creator rules: @@ -49,17 +50,19 @@ objects: verbs: - create -- kind: ClusterRoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding metadata: name: service-catalog-sar-creator-binding roleRef: name: sar-creator - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + subjects: + - kind: ServiceAccount + name: service-catalog-apiserver + namespace: kube-service-catalog -- kind: ClusterRole - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRole metadata: name: namespace-viewer rules: @@ -72,26 +75,30 @@ objects: - watch - get -- kind: ClusterRoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding metadata: name: service-catalog-namespace-viewer-binding roleRef: name: namespace-viewer - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + subjects: + - kind: ServiceAccount + name: service-catalog-apiserver + namespace: kube-service-catalog -- kind: ClusterRoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding metadata: name: service-catalog-controller-namespace-viewer-binding roleRef: name: namespace-viewer - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-controller + subjects: + - kind: ServiceAccount + name: service-catalog-controller + namespace: kube-service-catalog -- kind: ClusterRole - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRole metadata: name: service-catalog-controller rules: @@ -102,6 +109,7 @@ objects: verbs: - create - update + - patch - delete - get - list @@ -109,19 +117,22 @@ objects: - apiGroups: - servicecatalog.k8s.io resources: - - brokers/status - - instances/status - - bindings/status + - clusterservicebrokers/status + - serviceinstances/status + - servicebindings/status + - servicebindings/finalizers + - serviceinstances/reference verbs: - update - apiGroups: - servicecatalog.k8s.io resources: - - brokers - - instances - - bindings + - clusterservicebrokers + - serviceinstances + - servicebindings verbs: - list + - get - watch - apiGroups: - "" @@ -133,7 +144,8 @@ objects: - apiGroups: - servicecatalog.k8s.io resources: - - serviceclasses + - clusterserviceclasses + - clusterserviceplans verbs: - create - delete @@ -154,17 +166,19 @@ objects: - list - watch -- kind: ClusterRoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding metadata: name: service-catalog-controller-binding roleRef: name: service-catalog-controller - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-controller - -- kind: Role - apiVersion: v1 + subjects: + - kind: ServiceAccount + name: service-catalog-controller + namespace: kube-service-catalog + +- apiVersion: authorization.openshift.io/v1 + kind: Role metadata: name: endpoint-accessor rules: @@ -179,21 +193,25 @@ objects: - create - update -- kind: RoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: RoleBinding metadata: - name: endpoint-accessor-binding + name: endpointer-accessor-binding roleRef: name: endpoint-accessor namespace: kube-service-catalog - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-controller + subjects: + - kind: ServiceAccount + namespace: kube-service-catalog + name: service-catalog-controller -- kind: ClusterRoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding metadata: name: system:auth-delegator-binding roleRef: name: system:auth-delegator - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + subjects: + - kind: ServiceAccount + name: service-catalog-apiserver + namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml index f6ee0955d..f563ae42e 100644 --- a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml +++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml @@ -1,11 +1,11 @@ apiVersion: v1 kind: Template metadata: - name: kube-system-service-catalog + name: kube-system-service-catalog-role-bindings objects: -- kind: Role - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: Role metadata: name: extension-apiserver-authentication-reader namespace: ${KUBE_SYSTEM_NAMESPACE} @@ -19,16 +19,18 @@ objects: verbs: - get -- kind: RoleBinding - apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 + kind: RoleBinding metadata: name: extension-apiserver-authentication-reader-binding namespace: ${KUBE_SYSTEM_NAMESPACE} roleRef: name: extension-apiserver-authentication-reader - namespace: kube-system - userNames: - - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + namespace: ${KUBE_SYSTEM_NAMESPACE} + subjects: + - kind: ServiceAccount + name: service-catalog-apiserver + namespace: kube-service-catalog parameters: - description: Do not change this value. diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js deleted file mode 100644 index 1f25cc39f..000000000 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true; -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true; diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index cc897b032..9d55185c8 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -16,6 +16,16 @@ --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer +- name: Delete old apiserver.crt + file: + path: "{{ generated_certs_dir }}/apiserver.crt" + state: absent + +- name: Delete old apiserver.key + file: + path: "{{ generated_certs_dir }}/apiserver.key" + state: absent + - name: Generating server keys oc_adm_ca_server_cert: cert: "{{ generated_certs_dir }}/apiserver.crt" @@ -36,19 +46,28 @@ - name: tls.key path: "{{ generated_certs_dir }}/apiserver.key" +- name: Create service-catalog-ssl secret + oc_secret: + state: present + name: service-catalog-ssl + namespace: kube-service-catalog + files: + - name: tls.crt + path: "{{ generated_certs_dir }}/apiserver.crt" + - slurp: src: "{{ generated_certs_dir }}/ca.crt" register: apiserver_ca - shell: > - oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no - name: Create api service oc_obj: state: present - name: v1alpha1.servicecatalog.k8s.io + name: v1beta1.servicecatalog.k8s.io kind: apiservices.apiregistration.k8s.io namespace: "kube-service-catalog" content: @@ -57,10 +76,10 @@ apiVersion: apiregistration.k8s.io/v1beta1 kind: APIService metadata: - name: v1alpha1.servicecatalog.k8s.io + name: v1beta1.servicecatalog.k8s.io spec: group: servicecatalog.k8s.io - version: v1alpha1 + version: v1beta1 service: namespace: "kube-service-catalog" name: apiserver diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 686857d94..aa3ec5724 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -6,8 +6,6 @@ register: mktemp changed_when: False -- include: wire_aggregator.yml - - name: Set default image variables based on deployment_type include_vars: "{{ item }}" with_first_found: @@ -25,10 +23,22 @@ name: "kube-service-catalog" node_selector: "" -- name: Make kube-service-catalog project network global - command: > - oc adm pod-network make-projects-global kube-service-catalog - when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant' +- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant' + block: + - name: Waiting for netnamespace kube-service-catalog to be ready + oc_obj: + kind: netnamespace + name: kube-service-catalog + state: list + register: get_output + until: not get_output.results.stderr is defined + retries: 30 + delay: 1 + changed_when: false + + - name: Make kube-service-catalog project network global + command: > + oc adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml @@ -37,16 +47,15 @@ dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml" - oc_obj: - name: service-catalog + name: service-catalog-role-bindings kind: template namespace: "kube-service-catalog" files: - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml" - delete_after: yes - oc_process: create: True - template_name: service-catalog + template_name: service-catalog-role-bindings namespace: "kube-service-catalog" - copy: @@ -54,16 +63,15 @@ dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml" - oc_obj: - name: kube-system-service-catalog + name: kube-system-service-catalog-role-bindings kind: template namespace: kube-system files: - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml" - delete_after: yes - oc_process: create: True - template_name: kube-system-service-catalog + template_name: kube-system-service-catalog-role-bindings namespace: kube-system - oc_obj: @@ -80,14 +88,14 @@ vars: original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_obj: name: admin @@ -103,23 +111,14 @@ vars: original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- shell: > - oc get policybindings/kube-system:default -n kube-system || echo "not found" - register: get_kube_system - changed_when: no - -- command: > - oc create policybinding kube-system -n kube-system - when: "'not found' in get_kube_system.stdout" + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_adm_policy_user: namespace: kube-service-catalog @@ -168,19 +167,19 @@ - "{{ mktemp.stdout }}/service_catalog_api_server.yml" delete_after: yes -- template: - src: api_server_service.j2 - dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml" - - name: Set Service Catalog API Server service - oc_obj: - state: present - namespace: "kube-service-catalog" - kind: service + oc_service: name: apiserver - files: - - "{{ mktemp.stdout }}/service_catalog_api_service.yml" - delete_after: yes + namespace: kube-service-catalog + state: present + ports: + - name: secure + port: 443 + protocol: TCP + targetPort: 6443 + selector: + app: apiserver + session_affinity: None - template: src: api_server_route.j2 @@ -216,19 +215,19 @@ - "{{ mktemp.stdout }}/controller_manager.yml" delete_after: yes -- template: - src: controller_manager_service.j2 - dest: "{{ mktemp.stdout }}/controller_manager_service.yml" - - name: Set Controller Manager service - oc_obj: - state: present - namespace: "kube-service-catalog" - kind: service + oc_service: name: controller-manager - files: - - "{{ mktemp.stdout }}/controller_manager_service.yml" - delete_after: yes + namespace: kube-service-catalog + state: present + ports: + - port: 6443 + protocol: TCP + targetPort: 6443 + selector: + app: controller-manager + session_affinity: None + service_type: ClusterIP - include: start_api_server.yml diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index 2fb1ec440..ca9844e79 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,11 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog - -- name: Remove Policy Binding - command: > - oc delete policybindings/kube-system:default -n kube-system --ignore-not-found + oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -13,7 +9,7 @@ # state: absent # namespace: "kube-service-catalog" # kind: apiservices.apiregistration.k8s.io -# name: v1alpha1.servicecatalog.k8s.io +# name: v1beta1.servicecatalog.k8s.io - name: Remove Service Catalog API Server route oc_obj: @@ -50,6 +46,26 @@ kind: deployment name: controller-manager +- name: Remove Service Catalog kube-system Role Bindinds + shell: > + oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f - + +- oc_obj: + kind: template + name: "kube-system-service-catalog-role-bindings" + namespace: kube-system + state: absent + +- name: Remove Service Catalog kube-service-catalog Role Bindinds + shell: > + oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f - + +- oc_obj: + kind: template + name: "service-catalog-role-bindings" + namespace: kube-service-catalog + state: absent + - name: Remove Service Catalog namespace oc_project: state: absent diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml deleted file mode 100644 index 1c788470a..000000000 --- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml +++ /dev/null @@ -1,198 +0,0 @@ ---- -- name: Make temp cert dir - command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX - register: certtemp - changed_when: False - -- name: Check for First Master Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_crt - changed_when: false - delegate_to: "{{ first_master }}" - -- name: Check for First Master Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_key - changed_when: false - delegate_to: "{{ first_master }}" - - -# TODO: this currently has a bug where hostnames are required -- name: Creating First Master Aggregator signer certs - command: > - oc adm ca create-signer-cert - --cert=/etc/origin/master/front-proxy-ca.crt - --key=/etc/origin/master/front-proxy-ca.key - --serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ first_master }}" - when: - - not first_proxy_ca_crt.stat.exists - - not first_proxy_ca_key.stat.exists - -- name: Check for Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_crt - changed_when: false - -- name: Check for Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_key - changed_when: false - -- name: Copy Aggregator Signer certs from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - delegate_to: "{{ first_master }}" - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -- name: Copy Aggregator Signer certs to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -# oc_adm_ca_server_cert: -# cert: /etc/origin/master/front-proxy-ca.crt -# key: /etc/origin/master/front-proxy-ca.key - -- name: Check for first master api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: first_front_proxy_kubeconfig - delegate_to: "{{ first_master }}" - -- name: Create first master api-client config for Aggregator - command: > - oc adm create-api-client-config - --certificate-authority=/etc/origin/master/front-proxy-ca.crt - --signer-cert=/etc/origin/master/front-proxy-ca.crt - --signer-key=/etc/origin/master/front-proxy-ca.key - --user aggregator-front-proxy - --client-dir=/etc/origin/master - --signer-serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ first_master }}" - when: - - not first_front_proxy_kubeconfig.stat.exists - -- name: Check for api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: front_proxy_kubeconfig - -- name: Copy api-client config from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - delegate_to: "{{ first_master }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: Copy api-client config to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - -- name: Update master config - yedit: - state: present - src: /etc/origin/master/master-config.yaml - edits: - - key: aggregatorConfig.proxyClientInfo.certFile - value: aggregator-front-proxy.crt - - key: aggregatorConfig.proxyClientInfo.keyFile - value: aggregator-front-proxy.key - - key: authConfig.requestHeader.clientCA - value: front-proxy-ca.crt - - key: authConfig.requestHeader.clientCommonNames - value: [aggregator-front-proxy] - - key: authConfig.requestHeader.usernameHeaders - value: [X-Remote-User] - - key: authConfig.requestHeader.groupHeaders - value: [X-Remote-Group] - - key: authConfig.requestHeader.extraHeaderPrefixes - value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - - key: kubernetesMasterConfig.apiServerArguments.runtime-config - value: [apis/settings.k8s.io/v1alpha1=true] - - key: admissionConfig.pluginConfig.PodPreset.configuration.kind - value: DefaultAdmissionConfig - - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion - value: v1 - - key: admissionConfig.pluginConfig.PodPreset.configuration.disable - value: false - register: yedit_output - -#restart master serially here -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index c09834fd4..0e5bb7230 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -24,6 +24,7 @@ spec: {% endfor %} containers: - args: + - apiserver - --storage-type - etcd - --secure-port @@ -41,9 +42,11 @@ spec: - --cors-allowed-origins - {{ cors_allowed_origin }} - --admission-control - - "KubernetesNamespaceLifecycle" + - KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck + - --feature-gates + - OriginatingIdentity=true image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} - command: ["/usr/bin/apiserver"] + command: ["/usr/bin/service-catalog"] imagePullPolicy: Always name: apiserver ports: diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2 deleted file mode 100644 index bae337201..000000000 --- a/roles/openshift_service_catalog/templates/api_server_service.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: apiserver -spec: - ports: - - name: secure - port: 443 - protocol: TCP - targetPort: 6443 - selector: - app: apiserver - sessionAffinity: None diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index 1bbc0fa2c..e5e5f6b50 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -29,11 +29,17 @@ spec: fieldRef: fieldPath: metadata.namespace args: + - controller-manager - -v - "5" - - "--leader-election-namespace=$(K8S_NAMESPACE)" + - --leader-election-namespace + - kube-service-catalog + - --broker-relist-interval + - "5m" + - --feature-gates + - OriginatingIdentity=true image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} - command: ["/usr/bin/controller-manager"] + command: ["/usr/bin/service-catalog"] imagePullPolicy: Always name: controller-manager ports: @@ -41,7 +47,19 @@ spec: protocol: TCP resources: {} terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/run/kubernetes-service-catalog + name: service-catalog-ssl + readOnly: true dnsPolicy: ClusterFirst restartPolicy: Always securityContext: {} terminationGracePeriodSeconds: 30 + volumes: + - name: service-catalog-ssl + secret: + defaultMode: 420 + items: + - key: tls.crt + path: apiserver.crt + secretName: apiserver-ssl diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2 deleted file mode 100644 index 2bac645fc..000000000 --- a/roles/openshift_service_catalog/templates/controller_manager_service.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: controller-manager -spec: - ports: - - port: 6443 - protocol: TCP - targetPort: 6443 - selector: - app: controller-manager - sessionAffinity: None - type: ClusterIP diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_role_patching.j2 index 69b062b3f..4629d5bb3 100644 --- a/roles/openshift_service_catalog/templates/sc_role_patching.j2 +++ b/roles/openshift_service_catalog/templates/sc_role_patching.j2 @@ -3,8 +3,8 @@ - "servicecatalog.k8s.io" attributeRestrictions: null resources: - - instances - - bindings + - serviceinstances + - servicebindings verbs: - create - update diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml index 4df60e9a8..cab9cc7d8 100644 --- a/roles/openshift_service_catalog/vars/openshift-enterprise.yml +++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/ose-" -__openshift_service_catalog_image_version: "v3.6" +__openshift_service_catalog_image_version: "v3.7" diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index a059745a6..d0bc0e028 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | Description | |--------------------------------------------------|-------------------------|-----------------------------------------| | openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources | openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized | openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. | openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods @@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin | openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes | openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. | openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode | openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes | openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 8d21a3f27..148549887 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" +openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' @@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" +openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" openshift_storage_glusterfs_registry_storageclass: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" @@ -52,15 +54,15 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}" openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}" openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}" -r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +r_openshift_storage_glusterfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_storage_glusterfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" r_openshift_storage_glusterfs_os_firewall_deny: [] r_openshift_storage_glusterfs_os_firewall_allow: - service: glusterfs_sshd port: "2222/tcp" -- service: glusterfs_daemon - port: "24007/tcp" - service: glusterfs_management + port: "24007/tcp" +- service: glusterfs_rdma port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml new file mode 100644 index 000000000..7b705c2d4 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml @@ -0,0 +1,135 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index 9ebb0d5ec..7b705c2d4 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml @@ -85,8 +85,6 @@ objects: volumeMounts: - name: db mountPath: /var/lib/heketi - - name: topology - mountPath: ${TOPOLOGY_PATH} - name: config mountPath: /etc/heketi readinessProbe: @@ -103,9 +101,6 @@ objects: port: 8080 volumes: - name: db - - name: topology - secret: - secretName: heketi-${CLUSTER_NAME}-topology-secret - name: config secret: secretName: heketi-${CLUSTER_NAME}-config-secret @@ -138,6 +133,3 @@ parameters: displayName: GlusterFS cluster name description: A unique name to identify this heketi service, useful for running multiple heketi instances value: glusterfs -- name: TOPOLOGY_PATH - displayName: heketi topology file location - required: True diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml new file mode 100644 index 000000000..7b705c2d4 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -0,0 +1,135 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index a31c5bd5e..51724f979 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -15,8 +15,20 @@ oc_project: state: present name: "{{ glusterfs_namespace }}" + node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}" when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass +- name: Add namespace service accounts to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" + resource_kind: scc + resource_name: privileged + state: present + with_items: + - 'default' + - 'router' + when: glusterfs_is_native or glusterfs_heketi_is_native + - name: Delete pre-existing heketi resources oc_obj: namespace: "{{ glusterfs_namespace }}" @@ -50,8 +62,8 @@ kind: pod state: list selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" + register: deploy_heketi_pod + until: "deploy_heketi_pod.results.results[0]['items'] | count == 0" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: glusterfs_heketi_wipe @@ -102,7 +114,7 @@ state: list kind: pod selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod + register: deploy_heketi_pod when: glusterfs_heketi_is_native - name: Check if need to deploy deploy-heketi @@ -110,9 +122,9 @@ glusterfs_heketi_deploy_is_missing: False when: - "glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | count > 0" # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - name: Check for existing heketi pod oc_obj: @@ -146,6 +158,21 @@ when: - glusterfs_heketi_is_native +- name: Get heketi admin secret + oc_secret: + state: list + namespace: "{{ glusterfs_namespace }}" + name: "heketi-{{ glusterfs_name }}-admin-secret" + decode: True + register: glusterfs_heketi_admin_secret + +- name: Set heketi admin key + set_fact: + glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}" + when: + - glusterfs_is_native + - glusterfs_heketi_admin_secret.results.results[0] + - name: Generate heketi admin key set_fact: glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" @@ -189,14 +216,37 @@ - glusterfs_heketi_deploy_is_missing - glusterfs_heketi_is_missing +- name: Wait for deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" + register: deploy_heketi_pod + until: + - "deploy_heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (glusterfs_timeout | int / 10) | int }}" + when: + - glusterfs_heketi_is_native + - not glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False +- name: Place heketi topology on heketi Pod + shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_is_native + - name: Load heketi topology command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1" register: topology_load diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 7a2987883..012c722ff 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 8c3e31fc9..932d06038 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -55,16 +55,6 @@ - glusterfs_wipe - item.stdout_lines | count > 0 -- name: Add service accounts to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" - resource_kind: scc - resource_name: privileged - state: present - with_items: - - 'default' - - 'router' - - name: Label GlusterFS nodes oc_label: name: "{{ hostvars[item].openshift.node.nodename }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 17f87578d..1bcab8e49 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 3ba1eb2d2..73396c9af 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -6,16 +6,6 @@ with_items: - "deploy-heketi-template.yml" -- name: Create heketi topology secret - oc_secret: - namespace: "{{ glusterfs_namespace }}" - state: present - name: "heketi-{{ glusterfs_name }}-topology-secret" - force: True - files: - - name: topology.json - path: "{{ mktemp.stdout }}/topology.json" - - name: Create deploy-heketi template oc_obj: namespace: "{{ glusterfs_namespace }}" @@ -39,18 +29,7 @@ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}" HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" CLUSTER_NAME: "{{ glusterfs_name }}" - TOPOLOGY_PATH: "{{ mktemp.stdout }}" -- name: Wait for deploy-heketi pod - oc_obj: - namespace: "{{ glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod - until: - - "heketi_pod.results.results[0]['items'] | count > 0" - # Pod's 'Ready' status must be True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" - delay: 10 - retries: "{{ (glusterfs_timeout | int / 10) | int }}" +- name: Set heketi Deployed fact + set_fact: + glusterfs_heketi_deploy_is_missing: False diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index afc04a537..54a6dd7c3 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@ register: setup_storage - name: Copy heketi-storage list - shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" + shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml new file mode 100644 index 000000000..030fa81c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure device mapper modules loaded + template: + src: glusterfs.conf + dest: /etc/modules-load.d/glusterfs.conf + register: km + +- name: load kernel modules + systemd: + name: systemd-modules-load.service + state: restarted + when: km | changed diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf new file mode 100644 index 000000000..dd4d6e6f7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -0,0 +1,4 @@ +#{{ ansible_managed }} +dm_thin_pool +dm_snapshot +dm_mirror
\ No newline at end of file diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..454e84aaf --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 @@ -0,0 +1,36 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + } + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..095fb780f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 @@ -0,0 +1,36 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + } + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 51f8f4e0e..c4e023c1e 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -31,10 +31,13 @@ group: nfsnobody with_items: - "{{ openshift.hosted.registry }}" - - "{{ openshift.hosted.metrics }}" - - "{{ openshift.hosted.logging }}" - - "{{ openshift.hosted.loggingops }}" + - "{{ openshift.metrics }}" + - "{{ openshift.logging }}" + - "{{ openshift.loggingops }}" - "{{ openshift.hosted.etcd }}" + - "{{ openshift.prometheus }}" + - "{{ openshift.prometheus.alertmanager }}" + - "{{ openshift.prometheus.alertbuffer }}" - name: Configure exports template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 7e8f70b23..c2a741035 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,5 +1,8 @@ {{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }} -{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }} -{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }} +{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} +{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} +{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} +{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} +{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} +{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index c0ea00f34..1c8b9046c 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -156,7 +156,9 @@ - set_fact: openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined - fail: msg: openshift_version role was unable to set openshift_version @@ -171,7 +173,10 @@ - fail: msg: openshift_version role was unable to set openshift_pkg_version name: Abort if openshift_pkg_version was not set - when: openshift_pkg_version is not defined + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined + - fail: msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index a2a579e9d..b727eb74d 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -1,6 +1,6 @@ --- - set_fact: - l_use_crio: "{{ openshift_use_crio | default(false) }}" + l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - name: Set containerized version to configure if openshift_image_tag specified set_fact: @@ -22,7 +22,9 @@ command: > docker run --rm {{ openshift.common.cli_image }}:latest version register: cli_image_version - when: openshift_version is not defined + when: + - openshift_version is not defined + - not l_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) - set_fact: @@ -31,6 +33,7 @@ - openshift_version is not defined - openshift.common.deployment_type == 'origin' - cli_image_version.stdout_lines[0].split('-') | length > 1 + - not l_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" @@ -45,14 +48,14 @@ when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio + - not l_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio + - not l_use_crio_only # TODO: figure out a way to check for the openshift_version when using CRI-O. # We should do that using the images in the ostree storage so we don't have diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml index 0af5abf38..2d74f2e48 100644 --- a/roles/os_firewall/tasks/iptables.yml +++ b/roles/os_firewall/tasks/iptables.yml @@ -33,7 +33,7 @@ register: result delegate_to: "{{item}}" run_once: true - with_items: "{{ ansible_play_hosts }}" + with_items: "{{ ansible_play_batch }}" - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail pause: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 39d59db70..fa74c9953 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -3,20 +3,17 @@ command: subscription-manager repos --disable="*" - set_fact: - default_ose_version: '3.0' - when: deployment_type == 'enterprise' - -- set_fact: default_ose_version: '3.6' - when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] + when: deployment_type == 'openshift-enterprise' - set_fact: - ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}" + ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}" - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" - when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] ) + when: + - deployment_type == 'openshift-enterprise' + - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] ) - name: Enable RHEL repositories command: subscription-manager repos \ diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 453044a6e..b06f51908 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -4,10 +4,10 @@ # to make it able to enable repositories - set_fact: - rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('Red Hat OpenShift Container Platform, Premium*', True) }}" - rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" - rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" - rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}" + rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}" + rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}" + rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}" + rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}" - fail: msg: "This role is only supported for Red Hat hosts" @@ -41,15 +41,19 @@ redhat_subscription: username: "{{ rhel_subscription_user }}" password: "{{ rhel_subscription_pass }}" + register: rh_subscription + until: rh_subscription | succeeded - name: Retrieve the OpenShift Pool ID command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only register: openshift_pool_id + until: openshift_pool_id | succeeded changed_when: False - name: Determine if OpenShift Pool Already Attached command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only register: openshift_pool_attached + until: openshift_pool_attached | succeeded changed_when: False when: openshift_pool_id.stdout == '' @@ -58,10 +62,12 @@ when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' - name: Attach to OpenShift Pool - command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }} + command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }} + register: subscribe_pool + until: subscribe_pool | succeeded when: openshift_pool_id.stdout != '' - include: enterprise.yml when: - - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] + - deployment_type == 'openshift-enterprise' - not ostree_booted.stat.exists | bool diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml new file mode 100644 index 000000000..a92a138b0 --- /dev/null +++ b/roles/template_service_broker/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# placeholder file? +template_service_broker_remove: False +template_service_broker_install: False +openshift_template_service_broker_namespaces: ['openshift'] diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..622afb6bd --- /dev/null +++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = true; diff --git a/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js new file mode 100644 index 000000000..d0a9f11dc --- /dev/null +++ b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js @@ -0,0 +1,2 @@ +// empty file so that the master-config can still point to a file that exists +// this file will be replaced by the template service broker role if enabled diff --git a/roles/etcd_ca/meta/main.yml b/roles/template_service_broker/meta/main.yml index e3e2f7781..ab5a0cf08 100644 --- a/roles/etcd_ca/meta/main.yml +++ b/roles/template_service_broker/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: - author: Jason DeTiberus - description: Etcd CA + author: OpenShift Red Hat + description: OpenShift Template Service Broker company: Red Hat, Inc. license: Apache License, Version 2.0 min_ansible_version: 2.1 @@ -11,6 +11,3 @@ galaxy_info: - 7 categories: - cloud - - system -dependencies: -- role: etcd_common diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml new file mode 100644 index 000000000..6a532a206 --- /dev/null +++ b/roles/template_service_broker/tasks/install.yml @@ -0,0 +1,91 @@ +--- +# Fact setting +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: set template_service_broker facts + set_fact: + template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}" + template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}" + template_service_broker_image_name: "{{ template_service_broker_image_name | default(__template_service_broker_image_name) }}" + +- oc_project: + name: openshift-template-service-broker + state: present + +- command: mktemp -d /tmp/tsb-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- copy: + src: "{{ __tsb_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __tsb_template_file }}" + - "{{ __tsb_rbac_file }}" + - "{{ __tsb_broker_file }}" + - "{{ __tsb_config_file }}" + +- yedit: + src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}" + key: templateNamespaces + value: "{{ openshift_template_service_broker_namespaces }}" + value_type: list + +- slurp: + src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}" + register: config + +- name: Apply template file + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" + --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" + | kubectl apply -f - + +# reconcile with rbac +- name: Reconcile with RBAC file + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | oc auth reconcile -f - + +- name: copy tech preview extension file for service console UI + copy: + src: openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +# Check that the TSB is running +- name: Verify that TSB is running + command: > + curl -k https://apiserver.openshift-template-service-broker.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_health + until: api_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- set_fact: + openshift_master_config_dir: "{{ openshift.common.config_base }}/master" + when: openshift_master_config_dir is undefined + +- slurp: + src: "{{ openshift_master_config_dir }}/service-signer.crt" + register: __ca_bundle + +# Register with broker +- name: Register TSB with broker + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | oc apply -f - + +- file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml new file mode 100644 index 000000000..d7ca970c7 --- /dev/null +++ b/roles/template_service_broker/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include: install.yml + when: template_service_broker_install | default(false) | bool + +- include: remove.yml + when: template_service_broker_remove | default(false) | bool diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml new file mode 100644 index 000000000..28836f97f --- /dev/null +++ b/roles/template_service_broker/tasks/remove.yml @@ -0,0 +1,35 @@ +--- +- command: mktemp -d /tmp/tsb-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- copy: + src: "{{ __tsb_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __tsb_template_file }}" + - "{{ __tsb_broker_file }}" + +- name: Delete TSB broker + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete --ignore-not-found -f - + +- name: Delete TSB objects + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | oc delete --ignore-not-found -f - + +- name: empty out tech preview extension file for service console UI + copy: + src: remove-openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- oc_project: + name: openshift-template-service-broker + state: absent + +- file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml new file mode 100644 index 000000000..77afe1f43 --- /dev/null +++ b/roles/template_service_broker/vars/default_images.yml @@ -0,0 +1,4 @@ +--- +__template_service_broker_prefix: "docker.io/openshift/" +__template_service_broker_version: "latest" +__template_service_broker_image_name: "origin" diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml new file mode 100644 index 000000000..a65340f16 --- /dev/null +++ b/roles/template_service_broker/vars/main.yml @@ -0,0 +1,7 @@ +--- +__tsb_files_location: "../../../files/origin-components/" + +__tsb_template_file: "apiserver-template.yaml" +__tsb_config_file: "apiserver-config.yaml" +__tsb_rbac_file: "rbac-template.yaml" +__tsb_broker_file: "template-service-broker-registration.yaml" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml new file mode 100644 index 000000000..dfab1e01b --- /dev/null +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -0,0 +1,4 @@ +--- +__template_service_broker_prefix: "registry.access.redhat.com/openshift3/" +__template_service_broker_version: "v3.7" +__template_service_broker_image_name: "ose" diff --git a/roles/tuned/defaults/main.yml b/roles/tuned/defaults/main.yml new file mode 100644 index 000000000..418a4b521 --- /dev/null +++ b/roles/tuned/defaults/main.yml @@ -0,0 +1,3 @@ +--- +tuned_etc_directory: '/etc/tuned' +tuned_templates_source: '../templates' diff --git a/roles/etcd_client_certificates/meta/main.yml b/roles/tuned/meta/main.yml index efebdb599..833d94c13 100644 --- a/roles/etcd_client_certificates/meta/main.yml +++ b/roles/tuned/meta/main.yml @@ -1,16 +1,13 @@ --- galaxy_info: - author: Jason DeTiberus - description: Etcd Client Certificates + author: Jiri Mencak + description: Restart the tuned daemon if present and make it use the recommended profile company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 2.1 + min_ansible_version: 2.3 platforms: - name: EL versions: - 7 categories: - cloud - - system -dependencies: -- role: etcd_common diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/tuned/tasks/main.yml index 425bf6a26..e95d274d5 100644 --- a/roles/openshift_node/tasks/tuned.yml +++ b/roles/tuned/tasks/main.yml @@ -12,8 +12,6 @@ - name: Set tuned OpenShift variables set_fact: openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" - tuned_etc_directory: '/etc/tuned' - tuned_templates_source: '../templates/tuned' - name: Ensure directory structure exists file: diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/tuned/templates/openshift-control-plane/tuned.conf index f22f21065..f22f21065 100644 --- a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf +++ b/roles/tuned/templates/openshift-control-plane/tuned.conf diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/tuned/templates/openshift-node/tuned.conf index 78c7d19c9..78c7d19c9 100644 --- a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf +++ b/roles/tuned/templates/openshift-node/tuned.conf diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/tuned/templates/openshift/tuned.conf index 68ac5dadb..68ac5dadb 100644 --- a/roles/openshift_node/templates/tuned/openshift/tuned.conf +++ b/roles/tuned/templates/openshift/tuned.conf diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/tuned/templates/recommend.conf index 5fa765798..086e5673d 100644 --- a/roles/openshift_node/templates/tuned/recommend.conf +++ b/roles/tuned/templates/recommend.conf @@ -1,8 +1,11 @@ -[openshift-node] -/etc/origin/node/node-config.yaml=.*region=primary - [openshift-control-plane,master] /etc/origin/master/master-config.yaml=.* [openshift-control-plane,node] /etc/origin/node/node-config.yaml=.*region=infra + +[openshift-control-plane,lb] +/etc/haproxy/haproxy.cfg=.* + +[openshift-node] +/etc/origin/node/node-config.yaml=.* |