diff options
-rw-r--r-- | openshift-ansible.spec | 2 | ||||
-rw-r--r-- | playbooks/common/openshift-master/config.yml | 21 | ||||
-rw-r--r-- | roles/openshift_loadbalancer/defaults/main.yml | 2 | ||||
-rw-r--r-- | roles/openshift_loadbalancer/tasks/main.yml | 25 | ||||
-rw-r--r-- | roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 | 17 | ||||
-rw-r--r-- | roles/openshift_logging/README.md | 3 | ||||
-rw-r--r-- | roles/openshift_logging/files/generate-jks.sh | 12 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/generate_certs.yaml | 78 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/generate_jks.yaml | 111 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/install_logging.yaml | 21 | ||||
-rw-r--r-- | roles/openshift_logging/tasks/main.yaml | 1 | ||||
-rw-r--r-- | roles/openshift_metrics/README.md | 4 | ||||
-rw-r--r-- | roles/openshift_metrics/tasks/install_metrics.yaml | 16 | ||||
-rw-r--r-- | roles/openshift_metrics/tasks/install_support.yaml | 18 | ||||
-rw-r--r-- | roles/openshift_node/meta/main.yml | 4 |
15 files changed, 219 insertions, 116 deletions
diff --git a/openshift-ansible.spec b/openshift-ansible.spec index a2940e001..0b7c44660 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -18,6 +18,8 @@ Requires: python2 Requires: python-six Requires: tar Requires: openshift-ansible-docs = %{version}-%{release} +Requires: java-1.8.0-openjdk-headless +Requires: httpd-tools %description Openshift and Atomic Enterprise Ansible diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index de36fd263..7a334e771 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -75,17 +75,6 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_master_mktemp - changed_when: False - - name: Determine if session secrets must be generated hosts: oo_first_master roles: @@ -117,7 +106,6 @@ hosts: oo_masters_to_config any_errors_fatal: true vars: - sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" openshift_master_ha: "{{ openshift.master.ha }}" openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" @@ -144,12 +132,3 @@ - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: name={{ g_master_mktemp.stdout }} state=absent - changed_when: False diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index d096019af..6190383b6 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -2,7 +2,7 @@ haproxy_frontends: - name: main binds: - - "*:8443" + - "*:{{ openshift_master_api_port | default(8443) }}" default_backend: default haproxy_backends: diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 400f80715..e9bc8b4ab 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -1,14 +1,31 @@ --- -- fail: msg="Cannot use containerized=true for load balancer hosts." - when: openshift.common.is_containerized | bool - - name: Install haproxy package: name=haproxy state=present + when: not openshift.common.is_containerized | bool + +- name: Pull haproxy image + command: > + docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }} + when: openshift.common.is_containerized | bool + +- name: Create config directory for haproxy + file: + path: /etc/haproxy + state: directory + when: openshift.common.is_containerized | bool + +- name: Create the systemd unit files + template: + src: "haproxy.docker.service.j2" + dest: "{{ containerized_svc_dir }}/haproxy.service" + when: openshift.common.is_containerized | bool + notify: restart haproxy - name: Configure systemd service directory for haproxy file: path: /etc/systemd/system/haproxy.service.d state: directory + when: not openshift.common.is_containerized | bool # Work around ini_file create option in 2.2 which defaults to no - name: Create limits.conf file @@ -19,6 +36,7 @@ owner: root group: root changed_when: false + when: not openshift.common.is_containerized | bool - name: Configure the nofile limits for haproxy ini_file: @@ -27,6 +45,7 @@ option: LimitNOFILE value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}" notify: restart haproxy + when: not openshift.common.is_containerized | bool - name: Configure haproxy template: diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 new file mode 100644 index 000000000..624876ab0 --- /dev/null +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -0,0 +1,17 @@ +[Unit] +After=docker.service +Requires=docker.service +PartOf=docker.service + +[Service] +ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint="haproxy -f /etc/haproxy/haproxy.cfg" {{ openshift.common.router_image }}:{{ openshift_image_tag }} +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop openshift_loadbalancer +LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} +LimitCORE=infinity +Restart=always +RestartSec=5s + +[Install] +WantedBy=docker.service diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 2cc2c48ee..9b71dc676 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -6,6 +6,9 @@ This role is used for installing the Aggregated Logging stack. It should be run a single host, it will create any missing certificates and API objects that the current [logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does. +This role requires that the control host it is run on has Java installed as part of keystore +generation for Elasticsearch (it uses JKS) as well as openssl to sign certificates. + As part of the installation, it is recommended that you add the Fluentd node selector label to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels). diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh index 995ec0b98..9fe557f83 100644 --- a/roles/openshift_logging/files/generate-jks.sh +++ b/roles/openshift_logging/files/generate-jks.sh @@ -1,6 +1,10 @@ #! /bin/sh set -ex +function usage() { + echo Usage: `basename $0` cert_directory [logging_namespace] 1>&2 +} + function generate_JKS_chain() { dir=${SCRATCH_DIR:-_output} ADD_OID=$1 @@ -147,8 +151,14 @@ function createTruststore() { -noprompt -alias sig-ca } -dir="$CERT_DIR" +if [ $# -lt 1 ]; then + usage + exit 1 +fi + +dir=$1 SCRATCH_DIR=$dir +PROJECT=${2:-logging} if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then generate_JKS_client_cert "system.admin" diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index e16071e46..20e50482e 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -85,82 +85,8 @@ loop_control: loop_var: node_name -- name: Check for jks-generator service account - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}} - register: serviceaccount_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no - -- name: Create jks-generator service account - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}} - when: not ansible_check_mode and "not found" in serviceaccount_result.stderr - -- name: Check for hostmount-anyuid scc entry - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}' - register: scc_result - when: not ansible_check_mode - changed_when: no - -- name: Add to hostmount-anyuid scc - command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}} - when: - - not ansible_check_mode - - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1 - -- name: Copy JKS generation script - copy: - src: generate-jks.sh - dest: "{{generated_certs_dir}}/generate-jks.sh" - check_mode: no - -- name: Generate JKS pod template - template: - src: jks_pod.j2 - dest: "{{mktemp.stdout}}/jks_pod.yaml" - check_mode: no - changed_when: no - -# check if pod generated files exist -- if they all do don't run the pod -- name: Checking for elasticsearch.jks - stat: path="{{generated_certs_dir}}/elasticsearch.jks" - register: elasticsearch_jks - check_mode: no - -- name: Checking for logging-es.jks - stat: path="{{generated_certs_dir}}/logging-es.jks" - register: logging_es_jks - check_mode: no - -- name: Checking for system.admin.jks - stat: path="{{generated_certs_dir}}/system.admin.jks" - register: system_admin_jks - check_mode: no - -- name: Checking for truststore.jks - stat: path="{{generated_certs_dir}}/truststore.jks" - register: truststore_jks - check_mode: no - -- name: create JKS generation pod - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name - register: podoutput - check_mode: no - when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}} - register: result - until: result.stdout.find("Succeeded") != -1 - retries: 5 - delay: 10 - changed_when: no - when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists +- name: Creating necessary JKS certs + include: generate_jks.yaml # check for secret/logging-kibana-proxy - command: > diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml new file mode 100644 index 000000000..adb6c2b2d --- /dev/null +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -0,0 +1,111 @@ +--- +# check if pod generated files exist -- if they all do don't run the pod +- name: Checking for elasticsearch.jks + stat: path="{{generated_certs_dir}}/elasticsearch.jks" + register: elasticsearch_jks + check_mode: no + +- name: Checking for logging-es.jks + stat: path="{{generated_certs_dir}}/logging-es.jks" + register: logging_es_jks + check_mode: no + +- name: Checking for system.admin.jks + stat: path="{{generated_certs_dir}}/system.admin.jks" + register: system_admin_jks + check_mode: no + +- name: Checking for truststore.jks + stat: path="{{generated_certs_dir}}/truststore.jks" + register: truststore_jks + check_mode: no + +- name: Create temp directory for doing work in + local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX + register: local_tmp + changed_when: False + check_mode: no + +- name: Create placeholder for previously created JKS certs to prevent recreating... + file: + path: "{{local_tmp.stdout}}/elasticsearch.jks" + state: touch + mode: "u=rw,g=r,o=r" + when: elasticsearch_jks.stat.exists + changed_when: False + +- name: Create placeholder for previously created JKS certs to prevent recreating... + file: + path: "{{local_tmp.stdout}}/logging-es.jks" + state: touch + mode: "u=rw,g=r,o=r" + when: logging_es_jks.stat.exists + changed_when: False + +- name: Create placeholder for previously created JKS certs to prevent recreating... + file: + path: "{{local_tmp.stdout}}/system.admin.jks" + state: touch + mode: "u=rw,g=r,o=r" + when: system_admin_jks.stat.exists + changed_when: False + +- name: Create placeholder for previously created JKS certs to prevent recreating... + file: + path: "{{local_tmp.stdout}}/truststore.jks" + state: touch + mode: "u=rw,g=r,o=r" + when: truststore_jks.stat.exists + changed_when: False + +- name: pulling down signing items from host + fetch: + src: "{{generated_certs_dir}}/{{item}}" + dest: "{{local_tmp.stdout}}/{{item}}" + flat: yes + with_items: + - ca.crt + - ca.key + - ca.serial.txt + - ca.crl.srl + - ca.db + +- local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf + vars: + - top_dir: "{{local_tmp.stdout}}" + +- name: Run JKS generation script + local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} + check_mode: no + become: yes + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... + copy: + src: "{{local_tmp.stdout}}/elasticsearch.jks" + dest: "{{generated_certs_dir}}/elasticsearch.jks" + when: not elasticsearch_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... + copy: + src: "{{local_tmp.stdout}}/logging-es.jks" + dest: "{{generated_certs_dir}}/logging-es.jks" + when: not logging_es_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... + copy: + src: "{{local_tmp.stdout}}/system.admin.jks" + dest: "{{generated_certs_dir}}/system.admin.jks" + when: not system_admin_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... + copy: + src: "{{local_tmp.stdout}}/truststore.jks" + dest: "{{generated_certs_dir}}/truststore.jks" + when: not truststore_jks.stat.exists + +- name: Cleaning up temp dir + file: + path: "{{local_tmp.stdout}}" + state: absent + changed_when: False diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index af03e9371..a9699adb8 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -23,23 +23,30 @@ loop_control: loop_var: install_component +- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml + register: object_def_files + changed_when: no + +- slurp: src={{item}} + register: object_defs + with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}" + changed_when: no + - name: Create objects include: oc_apply.yaml vars: - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" - namespace: "{{ openshift_logging_namespace }}" - - file_name: "{{ file }}" - - file_content: "{{ lookup('file', file) | from_yaml }}" - with_fileglob: - - "{{ mktemp.stdout }}/templates/*.yaml" + - file_name: "{{ file.source }}" + - file_content: "{{ file.content | b64decode | from_yaml }}" + with_items: "{{ object_defs.results }}" loop_control: loop_var: file when: not ansible_check_mode - name: Printing out objects to create - debug: msg="{{lookup('file', file)|quote}}" - with_fileglob: - - "{{mktemp.stdout}}/templates/*.yaml" + debug: msg={{file.content | b64decode }} + with_items: "{{ object_defs.results }}" loop_control: loop_var: file when: ansible_check_mode diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index c4ec1b255..4c718805e 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -3,7 +3,6 @@ msg: Only one Fluentd nodeselector key pair should be provided when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" - - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX register: mktemp diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index 0f287e944..a61b0db5e 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -5,6 +5,10 @@ OpenShift Metrics Installation Requirements ------------ +This role has the following dependencies: + +- Java is required on the control node to generate keystores for the Java components +- httpd-tools is required on the control node to generate various passwords for the metrics components The following variables need to be set and will be validated: diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index bab37dbfb..ddaa54438 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -20,15 +20,23 @@ loop_control: loop_var: include_file +- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml + register: object_def_files + changed_when: no + +- slurp: src={{item.path}} + register: object_defs + with_items: "{{object_def_files.files}}" + changed_when: no + - name: Create objects include: oc_apply.yaml vars: kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" namespace: "{{ openshift_metrics_project }}" - file_name: "{{ item }}" - file_content: "{{ lookup('file',item) | from_yaml }}" - with_fileglob: - - "{{ mktemp.stdout }}/templates/*.yaml" + file_name: "{{ item.source }}" + file_content: "{{ item.content | b64decode | from_yaml }}" + with_items: "{{ object_defs.results }}" - name: Scaling up cluster include: start_metrics.yaml diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml index b0e4bec80..cc5acc6e5 100644 --- a/roles/openshift_metrics/tasks/install_support.yaml +++ b/roles/openshift_metrics/tasks/install_support.yaml @@ -1,4 +1,22 @@ --- +- name: Check control node to see if htpasswd is installed + local_action: command which htpasswd + register: htpasswd_check + failed_when: no + changed_when: no + +- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node" + when: htpasswd_check.rc == 1 + +- name: Check control node to see if keytool is installed + local_action: command which htpasswd + register: keytool_check + failed_when: no + changed_when: no + +- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node" + when: keytool_check.rc == 1 + - include: generate_certificates.yaml - include: generate_serviceaccounts.yaml - include: generate_services.yaml diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 91f118191..10036abed 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -17,8 +17,6 @@ dependencies: - role: openshift_docker - role: openshift_node_certificates - role: openshift_cloud_provider -- role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq | bool - role: os_firewall os_firewall_allow: - service: Kubernetes kubelet @@ -43,3 +41,5 @@ dependencies: - service: Kubernetes service NodePort UDP port: "{{ openshift_node_port_range | default('') }}/udp" when: openshift_node_port_range is defined +- role: openshift_node_dnsmasq + when: openshift.common.use_dnsmasq | bool |