summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging/files
diff options
context:
space:
mode:
authorScott Dodson <sdodson@redhat.com>2017-01-17 22:37:11 -0500
committerGitHub <noreply@github.com>2017-01-17 22:37:11 -0500
commit7b512bf5fc36ee9ad2df65d8e129aa52c939d98e (patch)
tree0d31786e7a29af19b2bfbabbe49b2ed72c862a54 /roles/openshift_logging/files
parenta2d9da8c95511c968a6b7d4c1247f017df14a5ce (diff)
parent598b2652ac9bfe94622cbe6324d4f121bf996c70 (diff)
downloadopenshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.tar.gz
openshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.tar.bz2
openshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.tar.xz
openshift-7b512bf5fc36ee9ad2df65d8e129aa52c939d98e.zip
Merge pull request #2640 from ewolinetz/logging_deployer_tasks
Logging deployer tasks
Diffstat (limited to 'roles/openshift_logging/files')
-rw-r--r--roles/openshift_logging/files/curator.yml18
-rw-r--r--roles/openshift_logging/files/elasticsearch-logging.yml72
-rw-r--r--roles/openshift_logging/files/es_migration.sh79
-rw-r--r--roles/openshift_logging/files/fluent.conf34
-rw-r--r--roles/openshift_logging/files/fluentd-throttle-config.yaml7
-rw-r--r--roles/openshift_logging/files/generate-jks.sh168
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging/files/server-tls.json5
9 files changed, 413 insertions, 0 deletions
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging/files/curator.yml
new file mode 100644
index 000000000..8d62d8e7d
--- /dev/null
+++ b/roles/openshift_logging/files/curator.yml
@@ -0,0 +1,18 @@
+# Logging example curator config file
+
+# uncomment and use this to override the defaults from env vars
+#.defaults:
+# delete:
+# days: 30
+# runhour: 0
+# runminute: 0
+
+# to keep ops logs for a different duration:
+#.operations:
+# delete:
+# weeks: 8
+
+# example for a normal project
+#myapp:
+# delete:
+# weeks: 1
diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/files/elasticsearch-logging.yml
new file mode 100644
index 000000000..377abe21f
--- /dev/null
+++ b/roles/openshift_logging/files/elasticsearch-logging.yml
@@ -0,0 +1,72 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: WARN
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+ io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
+ io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+ # search-guard
+ com.floragunn.searchguard: WARN
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh
new file mode 100644
index 000000000..339b5a1b2
--- /dev/null
+++ b/roles/openshift_logging/files/es_migration.sh
@@ -0,0 +1,79 @@
+CA=${1:-/etc/openshift/logging/ca.crt}
+KEY=${2:-/etc/openshift/logging/system.admin.key}
+CERT=${3:-/etc/openshift/logging/system.admin.crt}
+openshift_logging_es_host=${4:-logging-es}
+openshift_logging_es_port=${5:-9200}
+namespace=${6:-logging}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# skip indices that contain a uuid
+# get a list of unique project
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_proj_uuid_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
+ echo "No Elasticsearch pods found running. Cannot update common data model."
+ exit 1
+fi
+
+count=$(get_list_of_indices | wc -l)
+if [ $count -eq 0 ]; then
+ echo No matching indices found - skipping update_for_uuid
+else
+ echo Creating aliases for $count index patterns . . .
+ {
+ echo '{"actions":['
+ get_list_of_indices | \
+ while IFS=. read proj ; do
+ # e.g. make test.uuid.* an alias of test.* so we can search for
+ # /test.uuid.*/_search and get both the test.uuid.* and
+ # the test.* indices
+ uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
+ [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+ } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
+fi
+
+count=$(get_list_of_proj_uuid_indices | wc -l)
+if [ $count -eq 0 ] ; then
+ echo No matching indexes found - skipping update_for_common_data_model
+ exit 0
+fi
+
+echo Creating aliases for $count index patterns . . .
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+{
+ echo '{"actions":['
+ get_list_of_proj_uuid_indices | \
+ while IFS=. read proj uuid ; do
+ # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
+ # /project.test.uuid.*/_search and get both the test.uuid.* and
+ # the project.test.uuid.* indices
+ echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
new file mode 100644
index 000000000..aa843e983
--- /dev/null
+++ b/roles/openshift_logging/files/fluent.conf
@@ -0,0 +1,34 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+
+<label @INGRESS>
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+</label>
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging/files/fluentd-throttle-config.yaml
new file mode 100644
index 000000000..375621ff1
--- /dev/null
+++ b/roles/openshift_logging/files/fluentd-throttle-config.yaml
@@ -0,0 +1,7 @@
+# Logging example fluentd throttling config file
+
+#example-project:
+# read_lines_limit: 10
+#
+#.operations:
+# read_lines_limit: 100
diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh
new file mode 100644
index 000000000..995ec0b98
--- /dev/null
+++ b/roles/openshift_logging/files/generate-jks.sh
@@ -0,0 +1,168 @@
+#! /bin/sh
+set -ex
+
+function generate_JKS_chain() {
+ dir=${SCRATCH_DIR:-_output}
+ ADD_OID=$1
+ NODE_NAME=$2
+ CERT_NAMES=${3:-$NODE_NAME}
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ rm -rf $NODE_NAME
+
+ extension_names=""
+ for name in ${CERT_NAMES//,/ }; do
+ extension_names="${extension_names},dns:${name}"
+ done
+
+ if [ "$ADD_OID" = true ]; then
+ extension_names="${extension_names},oid:1.2.3.4.5.5"
+ fi
+
+ echo Generating keystore and certificate for node $NODE_NAME
+
+ keytool -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ keytool -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -file $dir/$NODE_NAME.csr \
+ -keyalg rsa \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Sign certificate request with CA
+
+ openssl ca \
+ -in $dir/$NODE_NAME.csr \
+ -notext \
+ -out $dir/$NODE_NAME.crt \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ keytool \
+ -import \
+ -file $dir/$NODE_NAME.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function generate_JKS_client_cert() {
+ NODE_NAME="$1"
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets
+
+ echo Generating keystore and certificate for node ${NODE_NAME}
+
+ keytool -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ keytool -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -file $dir/$NODE_NAME.jks.csr \
+ -keyalg rsa \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Sign certificate request with CA
+ openssl ca \
+ -in "$dir/$NODE_NAME.jks.csr" \
+ -notext \
+ -out "$dir/$NODE_NAME.jks.crt" \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ keytool \
+ -import \
+ -file $dir/$NODE_NAME.jks.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function join { local IFS="$1"; shift; echo "$*"; }
+
+function createTruststore() {
+
+ echo "Import CA to truststore for validating client certs"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/truststore.jks \
+ -storepass $ts_pass \
+ -noprompt -alias sig-ca
+}
+
+dir="$CERT_DIR"
+SCRATCH_DIR=$dir
+
+if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_client_cert "system.admin"
+fi
+
+if [[ ! -f $dir/elasticsearch.jks || -z "$(keytool -list -keystore $dir/elasticsearch.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_chain true elasticsearch "$(join , logging-es{,-ops})"
+fi
+
+if [[ ! -f $dir/logging-es.jks || -z "$(keytool -list -keystore $dir/logging-es.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_chain false logging-es "$(join , logging-es{,-ops}{,-cluster}{,.${PROJECT}.svc.cluster.local})"
+fi
+
+[ ! -f $dir/truststore.jks ] && createTruststore
+
+# necessary so that the job knows it completed successfully
+exit 0
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
new file mode 100644
index 000000000..334c9402b
--- /dev/null
+++ b/roles/openshift_logging/files/logging-deployer-sa.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: logging-deployer
+secrets:
+- name: logging-deployer
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging/files/server-tls.json b/roles/openshift_logging/files/server-tls.json
new file mode 100644
index 000000000..86deb23e3
--- /dev/null
+++ b/roles/openshift_logging/files/server-tls.json
@@ -0,0 +1,5 @@
+// See for available options: https://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener
+tls_options = {
+ ciphers: 'kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!DES:!EXP:!SEED:!IDEA:+3DES',
+ honorCipherOrder: true
+}