diff options
34 files changed, 320 insertions, 231 deletions
diff --git a/HOOKS.md b/HOOKS.md new file mode 100644 index 000000000..9c5f80054 --- /dev/null +++ b/HOOKS.md @@ -0,0 +1,70 @@ +# Hooks + +The ansible installer allows for operators to execute custom tasks during +specific operations through a system called hooks. Hooks allow operators to +provide files defining tasks to execute before and/or after specific areas +during installations and upgrades. This can be very helpful to validate +or modify custom infrastructure when installing/upgrading OpenShift. + +It is important to remember that when a hook fails the operation fails. This +means a good hook can run multiple times and provide the same results. A great +hook is idempotent. + +**Note**: There is currently not a standard interface for hooks. In the future +a standard interface will be defined and any hooks that existed previously will +need to be updated to meet the new standard. + +## Using Hooks + +Hooks are defined in the ``hosts`` inventory file under the ``OSEv3:vars`` +section. + +Each hook should point to a yaml file which defines Ansible tasks. This file +will be used as an include meaning that the file can not be a playbook but +a set of tasks. Best practice suggests using absolute paths to the hook file to avoid any ambiguity. + +### Example +```ini +[OSEv3:vars] +# <snip> +openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +openshift_master_upgrade_hook=/usr/share/custom/master.yml +openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml +# <snip> +``` + +Hook files must be a yaml formatted file that defines a set of Ansible tasks. +The file may **not** be a playbook. + +### Example +```yaml +--- +# Trivial example forcing an operator to ack the start of an upgrade +# file=/usr/share/custom/pre_master.yml + +- name: note the start of a master upgrade + debug: + msg: "Master upgrade of {{ inventory_hostname }} is about to start" + +- name: require an operator agree to start an upgrade + pause: + prompt: "Hit enter to start the master upgrade" +``` + +## Upgrade Hooks + +### openshift_master_upgrade_pre_hook +- Runs **before** each master is upgraded. +- This hook runs against **each master** in serial. +- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). + +### openshift_master_upgrade_hook +- Runs **after** each master is upgraded but **before** it's service/system restart. +- This hook runs against **each master** in serial. +- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). + + +### openshift_master_upgrade_post_hook +- Runs **after** each master is upgraded and has had it's service/system restart. +- This hook runs against **each master** in serial. +- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). @@ -74,6 +74,12 @@ you are not running a stable release. - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html) - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html) + +## Installer Hooks + +See the [hooks documentation](HOOKS.md). + + ## Contributing See the [contribution guide](CONTRIBUTING.md). diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index dde172c4a..0a1b8c5c4 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -89,6 +89,25 @@ openshift_release=v1.4 # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. # docker_upgrade=False + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + + # Alternate image format string, useful if you've got your own registry mirror #oreg_url=example.com/openshift3/ose-${component}:${version} # If oreg_url points to a registry other than registry.access.redhat.com we can diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index c0dd8a1e8..89b9d7e48 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -89,6 +89,25 @@ openshift_release=v3.4 # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. # docker_upgrade=False + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + + # Alternate image format string, useful if you've got your own registry mirror #oreg_url=example.com/openshift3/ose-${component}:${version} # If oreg_url points to a registry other than registry.access.redhat.com we can diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 0f226f5f9..a95cb68b7 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -38,6 +38,9 @@ - set_fact: openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" when: openshift_docker_log_options is not defined + - set_fact: + openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" + when: openshift_docker_selinux_enabled is not defined - include: ../openshift-etcd/config.yml tags: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 7f738ea0f..77b37cdc2 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -51,6 +51,8 @@ roles: - openshift_master_facts +# The main master upgrade play. Should handle all changes to the system in one pass, with +# support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config vars: @@ -62,6 +64,14 @@ roles: - openshift_facts post_tasks: + + # Run the pre-upgrade hook if defined: + - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" + when: openshift_master_upgrade_pre_hook is defined + + - include: "{{ openshift_master_upgrade_pre_hook }}" + when: openshift_master_upgrade_pre_hook is defined + - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool @@ -102,12 +112,26 @@ state: link when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + # Run the upgrade hook prior to restarting services/system if defined: + - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" + when: openshift_master_upgrade_hook is defined + + - include: "{{ openshift_master_upgrade_hook }}" + when: openshift_master_upgrade_hook is defined + - include: ../../openshift-master/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' - include: ../../openshift-master/restart_services.yml when: openshift.common.rolling_restart_mode == 'services' + # Run the post-upgrade hook if defined: + - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}" + when: openshift_master_upgrade_post_hook is defined + + - include: "{{ openshift_master_upgrade_post_hook }}" + when: openshift_master_upgrade_post_hook is defined + - set_fact: master_update_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 86b344d7a..2bb460815 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -87,6 +87,19 @@ - name: Restart rpm node service service: name="{{ openshift.common.service_type }}-node" state=restarted when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool + + - name: Wait for node to be ready + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} --no-headers + register: node_output + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_upgrade + until: "{{ node_output.stdout.split()[1].startswith('Ready')}}" + # Give the node two minutes to come back online. Note that we pre-pull images now + # so containerized services should restart quickly as well. + retries: 24 + delay: 5 + - name: Set node schedulability command: > {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index a8935370a..66c9cfa0f 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -96,7 +96,7 @@ dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='\ - {% if ansible_selinux and ansible_selinux.status == '''enabled''' %} --selinux-enabled{% endif %}\ + {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ {% if docker_options is defined %} {{ docker_options }}{% endif %}\ diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml index c9ec3b82f..fe6069ea9 100644 --- a/roles/openshift_builddefaults/vars/main.yml +++ b/roles/openshift_builddefaults/vars/main.yml @@ -23,7 +23,6 @@ builddefaults_yaml: imageLabels: "{{ openshift_builddefaults_image_labels | default(None) }}" nodeSelector: "{{ openshift_builddefaults_nodeselectors | default(None) }}" annotations: "{{ openshift_builddefaults_annotations | default(None) }}" - #resources: "{{ openshift.builddefaults.resources | default(None) }}" resources: requests: cpu: "{{ openshift_builddefaults_resources_requests_cpu | default(None) }}" diff --git a/roles/openshift_buildoverrides/tasks/main.yml b/roles/openshift_buildoverrides/tasks/main.yml index 82fce1c5b..87d0e6f21 100644 --- a/roles/openshift_buildoverrides/tasks/main.yml +++ b/roles/openshift_buildoverrides/tasks/main.yml @@ -1,13 +1,4 @@ --- -#- name: Set buildoverrides -# openshift_facts: -# role: buildoverrides -# local_facts: -# force_pull: "{{ openshift_buildoverrides_force_pull | default(None) }}" -# image_labels: "{{ openshift_buildoverrides_image_labels | default(None) }}" -# nodeselectors: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" -# annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" - - name: Set buildoverrides config structure openshift_facts: role: buildoverrides diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml index f0f9c255b..cf49a6ebf 100644 --- a/roles/openshift_buildoverrides/vars/main.yml +++ b/roles/openshift_buildoverrides/vars/main.yml @@ -1,10 +1,11 @@ --- +force_pull: "{{ openshift_buildoverrides_force_pull | default('') }}" buildoverrides_yaml: BuildOverrides: configuration: apiVersion: v1 kind: BuildOverridesConfig - forcePull: "{{ openshift_buildoverrides_force_pull | default('', true) }}" + forcePull: "{{ '' if force_pull == '' else force_pull | bool }}" imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}" nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 613c237a3..049ceffe0 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -9,6 +9,7 @@ additional_registries: "{{ openshift_docker_additional_registries | default(None) }}" blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}" insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}" + selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" log_driver: "{{ openshift_docker_log_driver | default(None) }}" log_options: "{{ openshift_docker_log_options | default(None) }}" options: "{{ openshift_docker_options | default(None) }}" @@ -23,6 +24,7 @@ | default(omit) }}" docker_insecure_registries: "{{ openshift.docker.insecure_registries | default(omit) }}" + docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 10121f82a..f7506bd63 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1032,6 +1032,8 @@ def set_nodename(facts): if 'node' in facts and 'common' in facts: if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack': facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '') + elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce': + facts['node']['nodename'] = '.'.split(facts['provider']['metadata']['hostname'])[0] else: facts['node']['nodename'] = facts['common']['hostname'].lower() return facts @@ -1690,9 +1692,38 @@ def set_builddefaults_facts(facts): if 'admission_plugin_config' not in facts['master']: facts['master']['admission_plugin_config'] = dict() facts['master']['admission_plugin_config'].update(builddefaults['config']) + # if the user didn't actually provide proxy values, delete the proxy env variable defaults. + delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) + return facts +def delete_empty_keys(keylist): + """ Delete dictionary elements from keylist where "value" is empty. + + Args: + keylist(list): A list of builddefault configuration envs. + + Returns: + none + + Example: + keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}, + {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}, + {'name': 'NO_PROXY', 'value': ''}] + + After calling delete_empty_keys the provided list is modified to become: + + [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}, + {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}] + """ + count = 0 + for i in range(0, len(keylist)): + if len(keylist[i - count]['value']) == 0: + del keylist[i - count] + count += 1 + + def set_buildoverrides_facts(facts): """ Set build overrides diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index e9bc8b4ab..68bb4ace8 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -17,7 +17,7 @@ - name: Create the systemd unit files template: src: "haproxy.docker.service.j2" - dest: "{{ containerized_svc_dir }}/haproxy.service" + dest: "/etc/systemd/system/haproxy.service" when: openshift.common.is_containerized | bool notify: restart haproxy diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index 79e695001..24fd635ec 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -1,16 +1,20 @@ # Global settings #--------------------------------------------------------------------- global + maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} + log /dev/log local0 info +{% if openshift.common.is_containerized | bool %} + stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin +{% else %} chroot /var/lib/haproxy pidfile /var/run/haproxy.pid - maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} user haproxy group haproxy daemon - log /dev/log local0 info # turn on stats unix socket stats socket /var/lib/haproxy/stats +{% endif %} #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 624876ab0..5385df3b7 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -5,7 +5,7 @@ PartOf=docker.service [Service] ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer -ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint="haproxy -f /etc/haproxy/haproxy.cfg" {{ openshift.common.router_image }}:{{ openshift_image_tag }} +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop openshift_loadbalancer LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 9b71dc676..856cfa2b9 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -35,6 +35,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'. - `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'. - `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified. +- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land. - `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'. - `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. @@ -43,6 +44,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified. - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. +- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. @@ -67,6 +69,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'. - `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'. - `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'. +- `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the same as above for their non-ops counterparts, but apply to the OPS cluster instance: diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 20e50482e..740e490e1 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -88,56 +88,12 @@ - name: Creating necessary JKS certs include: generate_jks.yaml -# check for secret/logging-kibana-proxy -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}' - register: kibana_secret_oauth_check - ignore_errors: yes - changed_when: no - check_mode: no - -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}' - register: kibana_secret_session_check - ignore_errors: yes - changed_when: no - check_mode: no - -# check for oauthclient secret -- command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}' - register: oauth_secret_check - ignore_errors: yes - changed_when: no - check_mode: no - -# set or generate as needed +# TODO: make idempotent - name: Generate proxy session set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} check_mode: no - when: - - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == '' - -- name: Generate proxy session - set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }} - check_mode: no - when: - - kibana_secret_session_check.stdout is defined - - kibana_secret_session_check.stdout != '' +# TODO: make idempotent - name: Generate oauth client secret set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} check_mode: no - when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == '' - or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == '' - or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout - -- name: Generate oauth client secret - set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}} - check_mode: no - when: - - kibana_secret_oauth_check is defined - - kibana_secret_oauth_check.stdout != '' - - oauth_secret_check.stdout is defined - - oauth_secret_check.stdout != '' - - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml index adb6c2b2d..c6e2ccbc0 100644 --- a/roles/openshift_logging/tasks/generate_jks.yaml +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -27,34 +27,22 @@ check_mode: no - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/elasticsearch.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r" when: elasticsearch_jks.stat.exists changed_when: False - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/logging-es.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r" when: logging_es_jks.stat.exists changed_when: False - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/system.admin.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r" when: system_admin_jks.stat.exists changed_when: False - name: Create placeholder for previously created JKS certs to prevent recreating... - file: - path: "{{local_tmp.stdout}}/truststore.jks" - state: touch - mode: "u=rw,g=r,o=r" + local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r" when: truststore_jks.stat.exists changed_when: False @@ -69,15 +57,16 @@ - ca.serial.txt - ca.crl.srl - ca.db + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf vars: - top_dir: "{{local_tmp.stdout}}" + when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Run JKS generation script local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} check_mode: no - become: yes when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Pushing locally generated JKS certs to remote host... @@ -105,7 +94,5 @@ when: not truststore_jks.stat.exists - name: Cleaning up temp dir - file: - path: "{{local_tmp.stdout}}" - state: absent + local_action: file path="{{local_tmp.stdout}}" state=absent changed_when: False diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml index 8f2825552..fcfce4e1e 100644 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -31,6 +31,7 @@ curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" replicas: "{{curator_replica_count.stdout | default (0)}}" + curator_node_selector: "{{openshift_logging_curator_nodeselector | default({}) }}" check_mode: no changed_when: no @@ -46,6 +47,7 @@ curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" replicas: "{{curator_ops_replica_count.stdout | default (0)}}" + curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}" when: openshift_logging_use_ops check_mode: no changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index fbba46a35..9b1c004f2 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -33,6 +33,7 @@ volume_names: "{{es_pvc_pool | default([])}}" pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" deploy_name: "{{item.1}}" + es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}" with_indexed_items: - "{{es_dc_pool | default([])}}" check_mode: no @@ -98,6 +99,7 @@ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" + es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}" with_indexed_items: - "{{es_dc_pool_ops | default([])}}" when: diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml index de4b018dd..f4df7de0c 100644 --- a/roles/openshift_logging/tasks/install_kibana.yaml +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -35,6 +35,7 @@ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}" replicas: "{{kibana_replica_count.stdout | default (0)}}" + kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({}) }}" check_mode: no changed_when: no @@ -53,6 +54,7 @@ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}" replicas: "{{kibana_ops_replica_count.stdout | default (0)}}" + kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}" when: openshift_logging_use_ops check_mode: no changed_when: no diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml index aecb5d81b..bd5073381 100644 --- a/roles/openshift_logging/tasks/label_node.yaml +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -1,11 +1,34 @@ --- - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} + -o jsonpath='{.metadata.labels}' + register: node_labels + when: not ansible_check_mode + changed_when: no + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} + register: label_result + failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr + when: + - value is defined + - node_labels.stdout is defined + - label not in node_labels.stdout + - unlabel is not defined or not unlabel + - not ansible_check_mode + +- command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} -o jsonpath='{.metadata.labels.{{ label }}}' register: label_value - failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr - when: not ansible_check_mode + ignore_errors: yes changed_when: no + when: + - value is defined + - node_labels.stdout is defined + - label in node_labels.stdout + - unlabel is not defined or not unlabel + - not ansible_check_mode - command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 index d3b5d33a2..de6258eaa 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging/templates/curator.j2 @@ -28,6 +28,12 @@ spec: spec: terminationGracePeriod: 600 serviceAccountName: aggregated-logging-curator +{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} + nodeSelector: +{% for key, value in curator_node_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} containers: - name: "curator" diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index 291589690..ec84c6b76 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -30,6 +30,12 @@ spec: securityContext: supplementalGroups: - {{openshift_logging_es_storage_group}} +{% if es_node_selector is iterable and es_node_selector | length > 0 %} + nodeSelector: +{% for key, value in es_node_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} containers: - name: "elasticsearch" diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2 index 1ec97701a..b42f62850 100644 --- a/roles/openshift_logging/templates/kibana.j2 +++ b/roles/openshift_logging/templates/kibana.j2 @@ -27,6 +27,12 @@ spec: component: "{{component}}" spec: serviceAccountName: aggregated-logging-kibana +{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} + nodeSelector: +{% for key, value in kibana_node_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} containers: - name: "kibana" diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh index bb046df87..f4315ef34 100755 --- a/roles/openshift_metrics/files/import_jks_certs.sh +++ b/roles/openshift_metrics/files/import_jks_certs.sh @@ -114,5 +114,3 @@ function import_certs() { } import_certs - -exit 0 diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml index f6bf6c1a6..f5192b005 100644 --- a/roles/openshift_metrics/tasks/import_jks_certs.yaml +++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml @@ -1,76 +1,4 @@ --- -- name: Check for jks-generator service account - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - get serviceaccount/jks-generator --no-headers - register: serviceaccount_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no - -- name: Create jks-generator service account - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - create serviceaccount jks-generator - when: not ansible_check_mode and "not found" in serviceaccount_result.stderr - -- name: Check for hostmount-anyuid scc entry - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - get scc hostmount-anyuid - -o jsonpath='{.users}' - register: scc_result - when: not ansible_check_mode - changed_when: no - -- name: Add to hostmount-anyuid scc - command: > - {{ openshift.common.admin_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - policy add-scc-to-user hostmount-anyuid - -z jks-generator - when: - - not ansible_check_mode - - scc_result.stdout.find("system:serviceaccount:{{openshift_metrics_project}}:jks-generator") == -1 - -- name: Copy JKS generation script - copy: - src: import_jks_certs.sh - dest: "{{openshift_metrics_certs_dir}}/import_jks_certs.sh" - check_mode: no - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd - register: metrics_keystore_password - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd - register: cassandra_keystore_password - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd - register: jgroups_keystore_password - -- name: Generate JKS pod template - template: - src: jks_pod.j2 - dest: "{{mktemp.stdout}}/jks_pod.yaml" - vars: - metrics_keystore_passwd: "{{metrics_keystore_password.content}}" - cassandra_keystore_passwd: "{{cassandra_keystore_password.content}}" - metrics_truststore_passwd: "{{hawkular_truststore_password.content}}" - cassandra_truststore_passwd: "{{cassandra_truststore_password.content}}" - jgroups_passwd: "{{jgroups_keystore_password.content}}" - check_mode: no - changed_when: no - -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" - register: metrics_keystore - check_mode: no - - stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.keystore" register: cassandra_keystore check_mode: no @@ -79,6 +7,10 @@ register: cassandra_truststore check_mode: no +- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" + register: metrics_keystore + check_mode: no + - stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.truststore" register: metrics_truststore check_mode: no @@ -87,32 +19,52 @@ register: jgroups_keystore check_mode: no -- name: create JKS pod - command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - create -f {{mktemp.stdout}}/jks_pod.yaml - -o name - register: podoutput - check_mode: no - when: not metrics_keystore.stat.exists or - not metrics_truststore.stat.exists or - not cassandra_keystore.stat.exists or - not cassandra_truststore.stat.exists or - not jgroups_keystore.stat.exists +- block: + - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd + register: metrics_keystore_password + + - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd + register: cassandra_keystore_password + + - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd + register: jgroups_keystore_password + + - local_action: command mktemp -d + register: local_tmp + changed_when: False + + - fetch: + dest: "{{local_tmp.stdout}}/" + src: "{{ openshift_metrics_certs_dir }}/{{item}}" + flat: yes + changed_when: False + with_items: + - hawkular-metrics.pkcs12 + - hawkular-cassandra.pkcs12 + - hawkular-metrics.crt + - hawkular-cassandra.crt + - ca.crt + + - local_action: command {{role_path}}/files/import_jks_certs.sh + environment: + CERT_DIR: "{{local_tmp.stdout}}" + METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}" + CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}" + METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}" + CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}" + JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}" + changed_when: False + + - copy: + dest: "{{openshift_metrics_certs_dir}}/" + src: "{{item}}" + with_fileglob: "{{local_tmp.stdout}}/*.*store" + + - file: + path: "{{local_tmp.stdout}}" + state: absent + changed_when: False -- command: > - {{ openshift.common.client_binary }} - --config={{ mktemp.stdout }}/admin.kubeconfig - -n {{openshift_metrics_project}} - get {{podoutput.stdout}} - -o jsonpath='{.status.phase}' - register: result - until: result.stdout.find("Succeeded") != -1 - retries: 5 - delay: 10 - changed_when: no when: not metrics_keystore.stat.exists or not metrics_truststore.stat.exists or not cassandra_keystore.stat.exists or diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index c42440130..1808db5d5 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -7,6 +7,7 @@ - name: Create temp directory for all our templates file: path={{mktemp.stdout}}/templates state=directory mode=0755 changed_when: False + when: "{{ openshift_metrics_install_metrics | bool }}" - name: Copy the admin client config(s) command: > @@ -15,8 +16,4 @@ check_mode: no tags: metrics_init -- include: install_metrics.yaml - when: openshift_metrics_install_metrics | default(false) | bool - -- include: uninstall_metrics.yaml - when: not openshift_metrics_install_metrics | default(false) | bool +- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" diff --git a/roles/openshift_metrics/templates/jks_pod.j2 b/roles/openshift_metrics/templates/jks_pod.j2 deleted file mode 100644 index e86fe38a4..000000000 --- a/roles/openshift_metrics/templates/jks_pod.j2 +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - metrics-infra: support - generateName: jks-cert-gen- -spec: - containers: - - name: jks-cert-gen - image: {{openshift_metrics_image_prefix}}metrics-deployer:{{openshift_metrics_image_version}} - imagePullPolicy: Always - command: ["sh", "{{openshift_metrics_certs_dir}}/import_jks_certs.sh"] - securityContext: - runAsUser: 0 - volumeMounts: - - mountPath: {{openshift_metrics_certs_dir}} - name: certmount - env: - - name: CERT_DIR - value: {{openshift_metrics_certs_dir}} - - name: METRICS_KEYSTORE_PASSWD - value: {{metrics_keystore_passwd}} - - name: CASSANDRA_KEYSTORE_PASSWD - value: {{cassandra_keystore_passwd}} - - name: METRICS_TRUSTSTORE_PASSWD - value: {{metrics_truststore_passwd}} - - name: CASSANDRA_TRUSTSTORE_PASSWD - value: {{cassandra_truststore_passwd}} - - name: hawkular_cassandra_alias - value: {{cassandra_keystore_passwd}} - - name: JGROUPS_PASSWD - value: {{jgroups_passwd}} - restartPolicy: Never - serviceAccount: jks-generator - volumes: - - hostPath: - path: "{{openshift_metrics_certs_dir}}" - name: certmount diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 8ba650994..8d4878fa7 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -223,7 +223,9 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes def gen_cmd(self): cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables' - return ["/usr/sbin/%s" % cmd] + # Include -w (wait for xtables lock) in default arguments. + default_args = ['-w'] + return ["/usr/sbin/%s" % cmd] + default_args def gen_save_cmd(self): # pylint: disable=no-self-use return ['/usr/libexec/iptables/iptables.init', 'save'] diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 1101870be..c4db197ca 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -1,7 +1,8 @@ --- - name: Install firewalld packages - package: name=firewalld state=present - when: not openshift.common.is_containerized | bool + package: + name: firewalld + state: present - name: Ensure iptables services are not enabled systemd: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 291df6822..41673ee40 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'enterprise' - set_fact: - default_ose_version: '3.3' + default_ose_version: '3.4' when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] - set_fact: @@ -16,7 +16,7 @@ - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3'] ) + ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] ) - name: Enable RHEL repositories command: subscription-manager repos \ diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index f6a7bde10..aebfe7c39 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -13,3 +13,5 @@ pyOpenSSL yamllint tox detox +# Temporary work-around for flake8 vs maccabe version conflict +mccabe==0.5.3 |