diff options
author | Matt Bruzek <mbruzek@gmail.com> | 2018-01-18 15:27:13 -0600 |
---|---|---|
committer | Matt Bruzek <mbruzek@gmail.com> | 2018-01-18 15:27:13 -0600 |
commit | cb581bfb67a53f887c4705d45fc7b0024a6816f9 (patch) | |
tree | 9c351ddd9282f5d3d37c1189af0ac2ad444c0125 /roles/openshift_logging | |
parent | c7a1c448cbd64de98e1f097d14b58ee9f6ccf511 (diff) | |
parent | 1a2a895356df638756d2117e3d324710167737db (diff) | |
download | openshift-cb581bfb67a53f887c4705d45fc7b0024a6816f9.tar.gz openshift-cb581bfb67a53f887c4705d45fc7b0024a6816f9.tar.bz2 openshift-cb581bfb67a53f887c4705d45fc7b0024a6816f9.tar.xz openshift-cb581bfb67a53f887c4705d45fc7b0024a6816f9.zip |
Merge branch 'master' into mbruzek-openshift-openstack
Diffstat (limited to 'roles/openshift_logging')
16 files changed, 313 insertions, 43 deletions
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 27cfc17d6..a192bd67e 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster: clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 +- `openshift_logging_mux_external_address`: The IP address that mux will listen + on for connections from *external* clients. Default is the default ipv4 + interface as reported by the `ansible_default_ipv4` fact. - `openshift_logging_mux_cpu_request`: 100m - `openshift_logging_mux_memory_limit`: 512Mi - `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index e1a5ea726..247c7e4df 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key): raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def serviceaccount_name(qualified_sa): ''' Returns the simple name from a fully qualified name ''' return qualified_sa.split(":")[-1] @@ -102,6 +94,28 @@ def serviceaccount_namespace(qualified_sa, default=None): return seg[-1] +def flatten_dict(data, parent_key=None): + """ This filter plugin will flatten a dict and its sublists into a single dict + """ + if not isinstance(data, dict): + raise RuntimeError("flatten_dict failed, expects to flatten a dict") + + merged = dict() + + for key in data: + if parent_key is not None: + insert_key = '.'.join((parent_key, key)) + else: + insert_key = key + + if isinstance(data[key], dict): + merged.update(flatten_dict(data[key], insert_key)) + else: + merged[insert_key] = data[key] + + return merged + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -112,10 +126,10 @@ class FilterModule(object): return { 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, - 'map_from_pairs': map_from_pairs, 'min_cpu': min_cpu, 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, 'serviceaccount_namespace': serviceaccount_namespace, - 'walk': walk + 'walk': walk, + "flatten_dict": flatten_dict } diff --git a/roles/openshift_logging/library/logging_patch.py b/roles/openshift_logging/library/logging_patch.py new file mode 100644 index 000000000..d2c0bc456 --- /dev/null +++ b/roles/openshift_logging/library/logging_patch.py @@ -0,0 +1,112 @@ +#!/usr/bin/python + +""" Ansible module to help with creating context patch file with whitelisting for logging """ + +import difflib +import re + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: logging_patch + +short_description: This will create a context patch file while giving ability + to whitelist some lines (excluding them from comparison) + +description: + - "To create configmap patches for logging" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + +''' + + +def account_for_whitelist(file_contents, white_list=None): + """ This method will remove lines that contain whitelist values from the content + of the file so that we aren't build a patch based on that line + + Usage: + + for file_contents: + + index: + number_of_shards: {{ es_number_of_shards | default ('1') }} + number_of_replicas: {{ es_number_of_replicas | default ('0') }} + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + + and white_list: + + ['number_of_shards', 'number_of_replicas'] + + + We would end up with: + + index: + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + """ + + for line in white_list: + file_contents = re.sub(r".*%s:.*\n" % line, "", file_contents) + + return file_contents + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + original_file=dict(type='str', required=True), + new_file=dict(type='str', required=True), + whitelist=dict(required=False, type='list', default=[]) + ), + supports_check_mode=True + ) + + original_fh = open(module.params['original_file'], "r") + original_contents = original_fh.read() + original_fh.close() + + original_contents = account_for_whitelist(original_contents, module.params['whitelist']) + + new_fh = open(module.params['new_file'], "r") + new_contents = new_fh.read() + new_fh.close() + + new_contents = account_for_whitelist(new_contents, module.params['whitelist']) + + uni_diff = difflib.unified_diff(new_contents.splitlines(), + original_contents.splitlines(), + lineterm='') + + return module.exit_json(changed=False, # noqa: F405 + raw_patch="\n".join(uni_diff)) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 98d0d1c4f..37ffb0204 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -204,6 +204,14 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: self.add_facts_for(comp, "services", name, dict()) + # pylint: disable=too-many-arguments + def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None): + '''Extracts facts in logging namespace from configmap''' + if yaml_file is not None: + config_facts = yaml.load(yaml_file) + self.facts[comp][kind][name][config_key] = config_facts + self.facts[comp][kind][name]["raw"] = yaml_file + def facts_for_configmaps(self, namespace): ''' Gathers facts for configmaps in logging namespace ''' self.default_keys_for("configmaps") @@ -214,7 +222,10 @@ class OpenshiftLoggingFacts(OCBaseCommand): name = item["metadata"]["name"] comp = self.comp(name) if comp is not None: - self.add_facts_for(comp, "configmaps", name, item["data"]) + self.add_facts_for(comp, "configmaps", name, dict(item["data"])) + if comp in ["elasticsearch", "elasticsearch_ops"]: + for config_key in item["data"]: + self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key]) def facts_for_oauthclients(self, namespace): ''' Gathers facts for oauthclients used with logging ''' @@ -265,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict()) # this needs to end up nested under the service account... @@ -277,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) # pylint: disable=no-self-use, too-many-return-statements diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index 9c480f73a..01ed4918f 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index 59d6098d4..4a2ee64f0 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index af36d67c6..fbc3e3fd1 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -109,14 +109,14 @@ # remove annotations added by logging - command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get namespaces -o name {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects - name: Remove Annotation of Operations Projects command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig annotate {{ project }} openshift.io/logging.ui.hostname- with_items: "{{ __logging_ops_projects.stdout_lines }}" @@ -126,7 +126,18 @@ - __logging_ops_projects.stderr | length == 0 ## EventRouter -- include_role: +- import_role: name: openshift_logging_eventrouter when: not openshift_logging_install_eventrouter | default(false) | bool + +# Update asset config in openshift-web-console namespace +- name: Remove Kibana route information from web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: loggingPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 082c0128f..0d7f8c056 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -17,7 +17,7 @@ - name: Generate certificates command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test check_mode: no @@ -139,10 +139,10 @@ # TODO: make idempotent - name: Generate proxy session - set_fact: session_secret={{ 200 | oo_random_word}} + set_fact: session_secret={{ 200 | lib_utils_oo_random_word}} check_mode: no # TODO: make idempotent - name: Generate oauth client secret - set_fact: oauth_secret={{ 64 | oo_random_word}} + set_fact: oauth_secret={{ 64 | lib_utils_oo_random_word}} check_mode: no diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml index d6ac88dcc..6e3204589 100644 --- a/roles/openshift_logging/tasks/generate_jks.yaml +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -24,25 +24,21 @@ local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r" when: elasticsearch_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r" when: logging_es_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r" when: system_admin_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r" when: truststore_jks.stat.exists changed_when: False - become: no - name: pulling down signing items from host fetch: @@ -61,12 +57,10 @@ vars: - top_dir: "{{local_tmp.stdout}}" when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - become: no - name: Run JKS generation script local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} check_mode: no - become: no when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Pushing locally generated JKS certs to remote host... diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index bb8ebec6b..f82e55b98 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -1,9 +1,12 @@ --- - name: Gather OpenShift Logging Facts openshift_logging_facts: - oc_bin: "{{openshift.common.client_binary}}" + oc_bin: "{{openshift_client_binary}}" openshift_logging_namespace: "{{openshift_logging_namespace}}" +## This is include vs import because we need access to group/inventory variables +- include_tasks: set_defaults_from_current.yml + - name: Set logging project oc_project: state: present @@ -84,14 +87,14 @@ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" _es_containers: "{{ outer_item.0.containers}}" _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" loop_control: @@ -111,7 +114,7 @@ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} loop_control: @@ -148,7 +151,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -166,7 +169,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" loop_control: @@ -190,7 +193,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -210,7 +213,7 @@ ## Kibana -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -223,7 +226,7 @@ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_kibana vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -253,7 +256,7 @@ - include_tasks: annotate_ops_projects.yaml ## Curator -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -263,7 +266,7 @@ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" -- include_role: +- import_role: name: openshift_logging_curator vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -281,7 +284,7 @@ - openshift_logging_use_ops | bool ## Mux -- include_role: +- import_role: name: openshift_logging_mux vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -294,7 +297,7 @@ ## Fluentd -- include_role: +- import_role: name: openshift_logging_fluentd vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -305,10 +308,27 @@ ## EventRouter -- include_role: +- import_role: name: openshift_logging_eventrouter when: openshift_logging_install_eventrouter | default(false) | bool +# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + +# Update asset config in openshift-web-console namespace +- name: Add Kibana route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_console_config.yml + vars: + console_config_edits: + - key: clusterInfo#loggingPublicURL + value: "https://{{ openshift_logging_kibana_hostname }}" + # Continue to set the old deprecated property until the + # origin-web-console image is updated for the new name. + # This will be removed in a future pull. + - key: loggingPublicURL + value: "https://{{ openshift_logging_kibana_hostname }}" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 9949bb95d..60cc399fa 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -17,7 +17,11 @@ register: local_tmp changed_when: False check_mode: no - become: no + +- name: Chmod local temp directory for doing work in + local_action: command chmod 777 "{{ local_tmp.stdout }}" + changed_when: False + check_mode: no - include_tasks: install_logging.yaml when: @@ -31,4 +35,3 @@ local_action: file path="{{local_tmp.stdout}}" state=absent tags: logging_cleanup changed_when: False - become: no diff --git a/roles/openshift_logging/tasks/patch_configmap_file.yaml b/roles/openshift_logging/tasks/patch_configmap_file.yaml new file mode 100644 index 000000000..30087fe6a --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_file.yaml @@ -0,0 +1,35 @@ +--- +## The purpose of this task file is to get a patch that is based on the diff +## between configmap_current_file and configmap_new_file. The module +## logging_patch takes the paths of two files to compare and also a list of +## variables whose line we exclude from the diffs. +## We then patch the new configmap file so that we can build a configmap +## using that file later. We then use oc apply to idempotenly modify any +## existing configmap. + +## The following variables are expected to be provided when including this task: +# __configmap_output -- This is provided to us from patch_configmap_files.yaml +# it is a dict of the configmap where configmap_current_file exists +# configmap_current_file -- The name of the data file in the __configmap_output +# configmap_new_file -- The path to the file that we intend to oc apply later +# we apply our generated patch to this file. +# configmap_protected_lines -- The list of variables to exclude from the diff + +- copy: + content: "{{ __configmap_output.results.results[0]['data'][configmap_current_file] }}" + dest: "{{ tempdir }}/current.yml" + +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + register: patch_output + +- copy: + content: "{{ patch_output.raw_patch }}\n" + dest: "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 + +- command: > + patch --force --quiet -u "{{ configmap_new_file }}" "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 diff --git a/roles/openshift_logging/tasks/patch_configmap_files.yaml b/roles/openshift_logging/tasks/patch_configmap_files.yaml new file mode 100644 index 000000000..74a9cc287 --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_files.yaml @@ -0,0 +1,31 @@ +--- +## The purpose of this task file is to take in a list of configmap files provided +## in the variable configmap_file_names, which correspond to the data sections +## within a configmap. We iterate over each of these files and create a patch +## from the diff between current_file and new_file to try to maintain any custom +## changes that a user may have made to a currently deployed configmap while +## trying to idempotently update with any role provided files. + +## The following variables are expected to be provided when including this task: +# configmap_name -- This is the name of the configmap that the files exist in +# configmap_namespace -- The namespace that the configmap lives in +# configmap_file_names -- This is expected to be passed in as a dict +# current_file -- The name of the data entry within the configmap +# new_file -- The file path to the file we are comparing to current_file +# protected_lines -- List of variables whose line will be excluded when creating a diff + +- oc_configmap: + name: "{{ configmap_name }}" + state: list + namespace: "{{ configmap_namespace }}" + register: __configmap_output + +- when: __configmap_output.results.stderr is undefined + include_tasks: patch_configmap_file.yaml + vars: + configmap_current_file: "{{ configmap_files.current_file }}" + configmap_new_file: "{{ configmap_files.new_file }}" + configmap_protected_lines: "{{ configmap_files.protected_lines | default([]) }}" + with_items: "{{ configmap_file_names }}" + loop_control: + loop_var: configmap_files diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index 00de0ca06..bc817075d 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -27,7 +27,7 @@ - name: Creating signed server cert and key for {{ cert_info.procure_component }} command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key --signer-serial={{generated_certs_dir}}/ca.serial.txt diff --git a/roles/openshift_logging/tasks/set_defaults_from_current.yml b/roles/openshift_logging/tasks/set_defaults_from_current.yml new file mode 100644 index 000000000..dde362abe --- /dev/null +++ b/roles/openshift_logging/tasks/set_defaults_from_current.yml @@ -0,0 +1,34 @@ +--- + +## We are pulling default values from configmaps if they exist already +## Using conditional_set_fact allows us to set the value of a variable based on +## the value of another one, if it is already defined. Else we don't set the +## left hand side (it stays undefined as well). + +## conditional_set_fact allows us to specify a fact source, so first we try to +## set variables in the logging-elasticsearch & logging-elasticsearch-ops configmaps +## afterwards we set the value of the variable based on the value in the inventory +## but fall back to using the value from a configmap as a default. If neither is set +## then the variable remains undefined and the role default will be used. + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_number_of_shards: index.number_of_shards + __openshift_logging_es_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch'] is defined + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_ops_number_of_shards: index.number_of_shards + __openshift_logging_es_ops_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops'] is defined + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_logging_es_number_of_shards: openshift_logging_es_number_of_shards | __openshift_logging_es_number_of_shards + openshift_logging_es_number_of_replicas: openshift_logging_es_number_of_replicas | __openshift_logging_es_number_of_replicas + openshift_logging_es_ops_number_of_shards: openshift_logging_es_ops_number_of_shards | __openshift_logging_es_ops_number_of_shards + openshift_logging_es_ops_number_of_replicas: openshift_logging_es_ops_number_of_replicas | __openshift_logging_es_ops_number_of_replicas diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index b96b8e29d..c0f42ba97 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding Kibana route information to loggingPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" |