diff options
Diffstat (limited to 'roles')
38 files changed, 673 insertions, 51 deletions
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml index c4d5efa14..b4dea4a07 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd_ca/tasks/main.yml @@ -60,7 +60,8 @@    delegate_to: "{{ etcd_ca_host }}"    run_once: true -- command: > +- name: Create etcd CA certificate +  command: >      openssl req -config {{ etcd_openssl_conf }} -newkey rsa:4096      -keyout {{ etcd_ca_key }} -new -out {{ etcd_ca_cert }}      -x509 -extensions {{ etcd_ca_exts_self }} -batch -nodes diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml index 1acdf1c85..242c1e997 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd_server_certificates/tasks/main.yml @@ -58,6 +58,7 @@                   ~ etcd_cert_prefix ~ 'server.crt' }}"    environment:      SAN: "IP:{{ etcd_ip }}" +  when: etcd_server_certs_missing | bool    delegate_to: "{{ etcd_ca_host }}"  - name: Create the peer csr diff --git a/roles/lib_openshift/tasks/main.yml b/roles/lib_openshift/tasks/main.yml new file mode 100644 index 000000000..2980c8a8d --- /dev/null +++ b/roles/lib_openshift/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: lib_openshift ensure python-ruamel-yaml package is on target +  package: +    name: python-ruamel-yaml +    state: present diff --git a/roles/lib_utils/tasks/main.yml b/roles/lib_utils/tasks/main.yml new file mode 100644 index 000000000..8a350da88 --- /dev/null +++ b/roles/lib_utils/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: lib_utils ensure python-ruamel-yaml package is on target +  package: +    name: python-ruamel-yaml +    state: present diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index e21397170..4efc77f11 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -41,10 +41,9 @@    run_once: true  - set_fact: -    master_ca_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool -                           else False in (g_master_ca_stat_result.results -                                         | oo_collect(attribute='stat.exists') -                                         | list) }}" +    master_ca_missing: "{{ False in (g_master_ca_stat_result.results +                                     | oo_collect(attribute='stat.exists') +                                     | list) }}"    run_once: true  - name: Retain original serviceaccount keys @@ -61,7 +60,6 @@    copy:      src: "{{ item.src }}"      dest: "{{ openshift_ca_config_dir }}/{{ item.dest }}" -    force: "{{ true if openshift_certificates_redeploy_ca | default(false) | bool else false }}"    with_items:    - src: "{{ (openshift_master_ca_certificate | default({'certfile':none})).certfile }}"      dest: ca.crt @@ -73,25 +71,35 @@  - name: Create ca serial    copy: -    content: "1" +    content: "00"      dest: "{{ openshift_ca_config_dir }}/ca.serial.txt" -    force: "{{ true if openshift_certificates_redeploy | default(false) | bool else false }}" +    force: "{{ openshift_certificates_redeploy | default(false) | bool }}"    when: openshift_master_ca_certificate is defined    delegate_to: "{{ openshift_ca_host }}"    run_once: true +- find: +    paths: "{{ openshift.common.config_base }}/master/legacy-ca/" +    patterns: ".*-ca.crt" +    use_regex: true +  register: g_master_legacy_ca_result + +# This should NOT replace the CA due to --overwrite=false when a CA already exists.  - name: Create the master certificates if they do not already exist    command: >      {{ openshift.common.client_binary }} adm create-master-certs      {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}      --certificate-authority {{ named_ca_certificate }}      {% endfor %} +    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} +    --certificate-authority {{ legacy_ca_certificate }} +    {% endfor %}      --hostnames={{ openshift.common.all_hostnames | join(',') }}      --master={{ openshift.master.api_url }}      --public-master={{ openshift.master.public_api_url }}      --cert-dir={{ openshift_ca_config_dir }}      --overwrite=false -  when: master_ca_missing | bool +  when: master_ca_missing | bool or openshift_certificates_redeploy | default(false) | bool    delegate_to: "{{ openshift_ca_host }}"    run_once: true diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 7c61da950..e72ab26fc 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2027,7 +2027,7 @@ class OpenShiftFacts(object):          if 'docker' in roles:              docker = dict(disable_push_dockerhub=False, -                          options='--log-driver=json-file --log-opt max-size=50m') +                          options='--log-driver=journald')              # NOTE: This is a workaround for a dnf output racecondition that can occur in              # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184              if self.system_facts['ansible_pkg_mgr'] == 'dnf': diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 856cfa2b9..8651e06e7 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -36,6 +36,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  - `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.  - `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.  - `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land. +- `openshift_logging_image_pull_secret`: The name of an existing pull secret to link to the logging service accounts  - `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.  - `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index 25877ebff..60694f67e 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -17,5 +17,5 @@      - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"}    loop_control:      loop_var: route_info -  when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana' +  when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops | bool) or route_info.name == 'logging-kibana'    changed_when: no diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml index 8eaac76c4..5091c1209 100644 --- a/roles/openshift_logging/tasks/generate_services.yaml +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -52,7 +52,7 @@      selector:        provider: openshift        component: es-ops -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool    check_mode: no    changed_when: no @@ -67,7 +67,7 @@      selector:        provider: openshift        component: es-ops -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool    check_mode: no    changed_when: no @@ -82,6 +82,6 @@      selector:        provider: openshift        component: kibana-ops -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool    check_mode: no    changed_when: no diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml index fcfce4e1e..5b474ff39 100644 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -15,7 +15,7 @@    register: curator_ops_replica_count    when:      - not ansible_check_mode -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool    ignore_errors: yes    changed_when: no @@ -48,6 +48,6 @@      curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"      replicas: "{{curator_ops_replica_count.stdout | default (0)}}"      curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}" -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool    check_mode: no    changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index 64e983557..1d6e55e44 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -56,7 +56,7 @@      es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"      cluster_size: "{{openshift_logging_es_ops_cluster_size}}"    when: -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool      - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}"    check_mode: no @@ -71,7 +71,7 @@      openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"      openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"    when: -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool    check_mode: no  - name: Init pool of DeploymentConfig names for Elasticsearch Ops @@ -80,7 +80,7 @@    loop_control:      loop_var: deploy_name    when: -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool  - name: Create new DeploymentConfig names for Elasticsearch Ops    set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}} @@ -92,7 +92,7 @@      cluster_size: "{{openshift_logging_es_ops_cluster_size}}"    with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}    when: -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool    check_mode: no  - name: Generate Elasticsearch DeploymentConfig for Ops @@ -116,6 +116,6 @@    with_indexed_items:      - "{{ es_ops_dc_pool | default([]) }}"    when: -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool    check_mode: no    changed_when: no diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml index f4df7de0c..3aeff2cac 100644 --- a/roles/openshift_logging/tasks/install_kibana.yaml +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -15,7 +15,7 @@    register: kibana_ops_replica_count    when:      - not ansible_check_mode -    - openshift_logging_use_ops +    - openshift_logging_use_ops | bool    ignore_errors: yes    changed_when: no @@ -55,6 +55,6 @@      kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"      replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"      kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}" -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool    check_mode: no    changed_when: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 00c79ee5e..d52429f03 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -57,6 +57,28 @@      loop_var: file    when: ansible_check_mode +  # TODO replace task with oc_secret module that supports +  # linking when available +- name: Link Pull Secrets With Service Accounts +  include: oc_secret.yaml +  vars: +    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" +    subcommand: link +    service_account: "{{sa_account}}" +    secret_name: "{{openshift_logging_image_pull_secret}}" +    add_args: "--for=pull" +  with_items: +    - default +    - aggregated-logging-elasticsearch +    - aggregated-logging-kibana +    - aggregated-logging-fluentd +    - aggregated-logging-curator +  register: link_pull_secret +  loop_control: +    loop_var: sa_account +  when: openshift_logging_image_pull_secret is defined +  failed_when: link_pull_secret.rc != 0 +  - name: Scaling up cluster    include: start_cluster.yaml    when: start_cluster | default(true) | bool diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 36fb827c3..4c718805e 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -12,10 +12,6 @@  - debug: msg="Created temp dir {{mktemp.stdout}}" -- name: Ensuring ruamel.yaml package is on target -  command: yum install -y ruamel.yaml -  check_mode: no -  - name: Copy the admin client config(s)    command: >      cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml index c362b7fca..cb9509de1 100644 --- a/roles/openshift_logging/tasks/oc_apply.yaml +++ b/roles/openshift_logging/tasks/oc_apply.yaml @@ -1,12 +1,13 @@  ---  - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} -  shell: > +  command: >      {{ openshift.common.client_binary }}      --config={{ kubeconfig }}      get {{file_content.kind}} {{file_content.metadata.name}}      -o jsonpath='{.metadata.resourceVersion}' -    -n {{namespace}} || echo 0 +    -n {{namespace}}    register: generation_init +  failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"    changed_when: no  - name: Applying {{file_name}} @@ -19,11 +20,33 @@    changed_when: no  - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} -  shell: > +  command: >      {{ openshift.common.client_binary }} --config={{ kubeconfig }}      get {{file_content.kind}} {{file_content.metadata.name}}      -o jsonpath='{.metadata.resourceVersion}' -    -n {{namespace}} || echo 0 +    -n {{namespace}}    register: generation_changed -  failed_when: "'error' in generation_changed.stderr" -  changed_when: generation_changed.stdout | int  > generation_init.stdout | int +  failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''" +  changed_when: generation_changed.stdout | default (0) | int  > generation_init.stdout | default(0) | int +  when: +    - "'field is immutable' not in generation_apply.stderr" + +- name: Removing previous {{file_name}} +  command: > +    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    delete -f {{ file_name }} +    -n {{ namespace }} +  register: generation_delete +  failed_when: "'error' in generation_delete.stderr" +  changed_when: generation_delete.rc == 0 +  when: "'field is immutable' in generation_apply.stderr" + +- name: Recreating {{file_name}} +  command: > +    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    apply -f {{ file_name }} +    -n {{ namespace }} +  register: generation_apply +  failed_when: "'error' in generation_apply.stderr" +  changed_when: generation_apply.rc == 0 +  when: "'field is immutable' in generation_apply.stderr" diff --git a/roles/openshift_logging/tasks/oc_secret.yaml b/roles/openshift_logging/tasks/oc_secret.yaml new file mode 100644 index 000000000..de37e4f6d --- /dev/null +++ b/roles/openshift_logging/tasks/oc_secret.yaml @@ -0,0 +1,7 @@ +--- +- command: > +    {{ openshift.common.client_binary }} +    --config={{ kubeconfig }} +    secret {{subcommand}} {{service_account}} {{secret_name}} +    {{add_args}} +    -n {{openshift_logging_namespace}} diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index 07489ae79..69d2b2b6b 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -86,7 +86,7 @@    with_items: "{{es_dc.stdout_lines}}"    loop_control:      loop_var: object -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool  - command: >      {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} @@ -104,7 +104,7 @@    with_items: "{{kibana_dc.stdout_lines}}"    loop_control:      loop_var: object -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool  - command: >      {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} @@ -122,4 +122,4 @@    with_items: "{{curator_dc.stdout_lines}}"    loop_control:      loop_var: object -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index 8e0df8344..7826efabe 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -81,7 +81,7 @@    with_items: "{{es_dc.stdout_lines}}"    loop_control:      loop_var: object -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool  - command: >      {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} @@ -98,7 +98,7 @@    with_items: "{{kibana_dc.stdout_lines}}"    loop_control:      loop_var: object -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool  - command: >      {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} @@ -115,4 +115,4 @@    with_items: "{{curator_dc.stdout_lines}}"    loop_control:      loop_var: object -  when: openshift_logging_use_ops +  when: openshift_logging_use_ops | bool diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index cf7ceacff..9ae54dac1 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -102,7 +102,11 @@ imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level  kind: MasterConfig  kubeletClientInfo:  {# TODO: allow user specified kubelet port #} +{% if openshift.common.version_gte_3_2_or_1_2 | bool %} +  ca: ca-bundle.crt +{% else %}    ca: ca.crt +{% endif %}    certFile: master.kubelet-client.crt    keyFile: master.kubelet-client.key    port: 10250 @@ -221,7 +225,11 @@ servingInfo:    bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}    bindNetwork: tcp4    certFile: master.server.crt +{% if openshift.common.version_gte_3_2_or_1_2 | bool %} +  clientCA: ca-bundle.crt +{% else %}    clientCA: ca.crt +{% endif %}    keyFile: master.server.key    maxRequestsInFlight: {{ openshift.master.max_requests_inflight }}    requestTimeoutSeconds: 3600 diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 4620dd877..7a5ed51ec 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -38,12 +38,22 @@    when: master_certs_missing | bool and inventory_hostname != openshift_ca_host    delegate_to: "{{ openshift_ca_host }}" +- find: +    paths: "{{ openshift_master_config_dir }}/legacy-ca/" +    patterns: ".*-ca.crt" +    use_regex: true +  register: g_master_legacy_ca_result +  delegate_to: "{{ openshift_ca_host }}" +  - name: Create the master server certificate    command: >      {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert      {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}      --certificate-authority {{ named_ca_certificate }}      {% endfor %} +    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} +    --certificate-authority {{ legacy_ca_certificate }} +    {% endfor %}      --hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}      --cert={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.crt      --key={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.key diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml index cc5acc6e5..5cefb273d 100644 --- a/roles/openshift_metrics/tasks/install_support.yaml +++ b/roles/openshift_metrics/tasks/install_support.yaml @@ -9,7 +9,7 @@    when: htpasswd_check.rc  == 1  - name: Check control node to see if keytool is installed -  local_action: command which htpasswd +  local_action: command which keytool    register: keytool_check    failed_when: no    changed_when: no diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index a74668b13..1aa826c09 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -8,3 +8,4 @@    systemd:      name: docker      state: restarted +  when: not openshift_certificates_redeploy | default(false) | bool diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index a263f4f3a..4cb89aba2 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -42,20 +42,30 @@    when: node_certs_missing | bool    delegate_to: "{{ openshift_ca_host }}" +- find: +    paths: "{{ openshift.common.config_base }}/master/legacy-ca/" +    patterns: ".*-ca.crt" +    use_regex: true +  register: g_master_legacy_ca_result +  delegate_to: "{{ openshift_ca_host }}" +  - name: Generate the node client config    command: >      {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config -      {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %} -      --certificate-authority {{ named_ca_certificate }} -      {% endfor %} -      --certificate-authority={{ openshift_ca_cert }} -      --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }} -      --groups=system:nodes -      --master={{ hostvars[openshift_ca_host].openshift.master.api_url }} -      --signer-cert={{ openshift_ca_cert }} -      --signer-key={{ openshift_ca_key }} -      --signer-serial={{ openshift_ca_serial }} -      --user=system:node:{{ hostvars[item].openshift.common.hostname }} +    {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %} +    --certificate-authority {{ named_ca_certificate }} +    {% endfor %} +    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} +    --certificate-authority {{ legacy_ca_certificate }} +    {% endfor %} +    --certificate-authority={{ openshift_ca_cert }} +    --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }} +    --groups=system:nodes +    --master={{ hostvars[openshift_ca_host].openshift.master.api_url }} +    --signer-cert={{ openshift_ca_cert }} +    --signer-key={{ openshift_ca_key }} +    --signer-serial={{ openshift_ca_serial }} +    --user=system:node:{{ hostvars[item].openshift.common.hostname }}    args:      creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"    with_items: "{{ hostvars diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md new file mode 100644 index 000000000..e21bee412 --- /dev/null +++ b/roles/openshift_node_upgrade/README.md @@ -0,0 +1,108 @@ +OpenShift/Atomic Enterprise Node upgrade +========= + +Role responsible for a single node upgrade. +It is expected a node is functioning and a part of an OpenShift cluster. + +Requirements +------------ + +TODO + +Role Variables +-------------- +From this role: + +| Name                           | Default value         |                                                        | +|--------------------------------|-----------------------|--------------------------------------------------------| +| deployment_type                |                       | Inventory var                                          | +| docker_upgrade_nuke_images     |                       | Optional inventory var                                 | +| docker_version                 |                       | Optional inventory var                                 | +| l_docker_upgrade               |                       |                                                        | +| node_config_hook               |                       |                                                        | +| openshift.docker.gte_1_10      |                       |                                                        | +| openshift_image_tag            |                       | Set by openshift_version role                          | +| openshift_pkg_version          |                       | Set by openshift_version role                          | +| openshift_release              |                       | Set by openshift_version role                          | +| skip_docker_restart            |                       |                                                        | +| openshift_cloudprovider_kind   |                       |                                                        | + +From openshift.common: + +| Name                               |  Default Value      |                     | +|------------------------------------|---------------------|---------------------| +| openshift.common.config_base       |---------------------|---------------------| +| openshift.common.data_dir          |---------------------|---------------------| +| openshift.common.hostname          |---------------------|---------------------| +| openshift.common.http_proxy        |---------------------|---------------------| +| openshift.common.is_atomic         |---------------------|---------------------| +| openshift.common.is_containerized  |---------------------|---------------------| +| openshift.common.portal_net        |---------------------|---------------------| +| openshift.common.service_type      |---------------------|---------------------| +| openshift.common.use_openshift_sdn |---------------------|---------------------| + +From openshift.master: + +| Name                               |  Default Value      |                     | +|------------------------------------|---------------------|---------------------| +| openshift.master.api_port          |---------------------|---------------------| + +From openshift.node: + +| Name                               |  Default Value      |                     | +|------------------------------------|---------------------|---------------------| +| openshift.node.debug_level         |---------------------|---------------------| +| openshift.node.node_image          |---------------------|---------------------| +| openshift.node.ovs_image           |---------------------|---------------------| + + +Dependencies +------------ +openshift_common + +TODO + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + +``` +--- +- name: Upgrade nodes +  hosts: oo_nodes_to_upgrade +  serial: 1 +  any_errors_fatal: true + +  pre_tasks: +  - name: Mark unschedulable +    command: > +      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false +    delegate_to: "{{ groups.oo_first_master.0 }}" + +  - name: Drain Node for Kubelet upgrade +    command: > +      {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data +    delegate_to: "{{ groups.oo_first_master.0 }}" + +  roles: +  - openshift_facts +  - docker +  - openshift_node_upgrade + +  post_tasks: +  - name: Set node schedulability +    command: > +      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true +    delegate_to: "{{ groups.oo_first_master.0 }}" +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +TODO diff --git a/roles/openshift_node_upgrade/files/nuke_images.sh b/roles/openshift_node_upgrade/files/nuke_images.sh new file mode 100644 index 000000000..8635eab0d --- /dev/null +++ b/roles/openshift_node_upgrade/files/nuke_images.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Stop any running containers +running_container_ids=`docker ps -q` +if test -n "$running_container_ids" +then +    docker stop $running_container_ids +fi + +# Delete all containers +container_ids=`docker ps -a -q` +if test -n "$container_ids" +then +    docker rm -f -v $container_ids +fi + +# Delete all images (forcefully) +image_ids=`docker images -aq` +if test -n "$image_ids" +then +    # Some layers are deleted recursively and are no longer present +    # when docker goes to remove them: +    docker rmi -f `docker images -aq` || true +fi + diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml new file mode 100644 index 000000000..cb51416d4 --- /dev/null +++ b/roles/openshift_node_upgrade/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: restart openvswitch +  systemd: name=openvswitch state=restarted +  when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool +  notify: +  - restart openvswitch pause + +- name: restart openvswitch pause +  pause: seconds=15 +  when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool + +- name: restart node +  systemd: name={{ openshift.common.service_type }}-node state=restarted +  when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool) diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml new file mode 100644 index 000000000..cd2f362aa --- /dev/null +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: +  author: your name +  description: OpenShift Node upgrade +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.1 +  platforms: +  - name: EL +    versions: +    - 7 +dependencies: +- role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml b/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml new file mode 100644 index 000000000..07b0ac715 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml @@ -0,0 +1,14 @@ +--- +# This is a hack to allow us to use systemd_units.yml, but skip the handlers which +# restart services. We will unconditionally restart all containerized services +# because we have to unconditionally restart Docker: +- set_fact: +    skip_node_svc_handlers: True + +- name: Update systemd units +  include: systemd_units.yml + +# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of +# play when the node has already been marked schedulable again. (this would look strange +# in logs otherwise) +- meta: flush_handlers diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/docker/restart.yml new file mode 100644 index 000000000..176fc3c0b --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/docker/restart.yml @@ -0,0 +1,33 @@ +--- +# input variables: +# - openshift.common.service_type +# - openshift.common.is_containerized +# - openshift.common.hostname +# - openshift.master.api_port + +- name: Restart docker +  service: name=docker state=restarted + +- name: Update docker facts +  openshift_facts: +    role: docker + +- name: Restart containerized services +  service: name={{ item }} state=started +  with_items: +    - etcd_container +    - openvswitch +    - "{{ openshift.common.service_type }}-master" +    - "{{ openshift.common.service_type }}-master-api" +    - "{{ openshift.common.service_type }}-master-controllers" +    - "{{ openshift.common.service_type }}-node" +  failed_when: false +  when: openshift.common.is_containerized | bool + +- name: Wait for master API to come back online +  wait_for: +    host: "{{ openshift.common.hostname }}" +    state: started +    delay: 10 +    port: "{{ openshift.master.api_port }}" +  when: inventory_hostname in groups.oo_masters_to_config diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml new file mode 100644 index 000000000..e91891ca9 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml @@ -0,0 +1,49 @@ +--- +# input variables: +# - openshift.common.service_type +# - openshift.common.is_containerized +# - docker_upgrade_nuke_images +# - docker_version +# - skip_docker_restart + +# We need docker service up to remove all the images, but these services will keep +# trying to re-start and thus re-pull the images we're trying to delete. +- name: Stop containerized services +  service: name={{ item }} state=stopped +  with_items: +    - "{{ openshift.common.service_type }}-master" +    - "{{ openshift.common.service_type }}-master-api" +    - "{{ openshift.common.service_type }}-master-controllers" +    - "{{ openshift.common.service_type }}-node" +    - etcd_container +    - openvswitch +  failed_when: false +  when: openshift.common.is_containerized | bool + +- name: Check Docker image count +  shell: "docker images -aq | wc -l" +  register: docker_image_count + +- debug: var=docker_image_count.stdout + +# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition +- name: Remove all containers and images +  script: nuke_images.sh +  register: nuke_images_result +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Check Docker image count +  shell: "docker images -aq | wc -l" +  register: docker_image_count +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- service: name=docker state=stopped + +- name: Upgrade Docker +  package: name=docker{{ '-' + docker_version }} state=present + +- include: restart.yml +  when: not skip_docker_restart | default(False) | bool diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml new file mode 100644 index 000000000..b1d5f0e0f --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -0,0 +1,77 @@ +--- +# input variables: +# - l_docker_upgrade +# - openshift.common.is_atomic +# - node_config_hook +# - openshift_pkg_version +# - openshift.common.is_containerized +# - deployment_type +# - openshift_release + +# tasks file for openshift_node_upgrade +- include: docker/upgrade.yml +  vars: +    # We will restart Docker ourselves after everything is ready: +    skip_docker_restart: True +  when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + +- include: "{{ node_config_hook }}" +  when: node_config_hook is defined + +- include: rpm_upgrade.yml +  vars: +    component: "node" +    openshift_version: "{{ openshift_pkg_version | default('') }}" +  when: not openshift.common.is_containerized | bool + +- name: Remove obsolete docker-sdn-ovs.conf +  file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent +  when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>=')) + +- include: containerized_node_upgrade.yml +  when: openshift.common.is_containerized | bool + +- name: Ensure containerized services stopped before Docker restart +  service: name={{ item }} state=stopped +  with_items: +  - etcd_container +  - openvswitch +  - "{{ openshift.common.service_type }}-master" +  - "{{ openshift.common.service_type }}-master-api" +  - "{{ openshift.common.service_type }}-master-controllers" +  - "{{ openshift.common.service_type }}-node" +  failed_when: false +  when: openshift.common.is_containerized | bool + +- name: Upgrade openvswitch +  package: +    name: openvswitch +    state: latest +  register: ovs_pkg +  when: not openshift.common.is_containerized | bool + +- name: Restart openvswitch +  systemd: +    name: openvswitch +    state: restarted +  when: +  - not openshift.common.is_containerized | bool +  - ovs_pkg | changed + +# Mandatory Docker restart, ensure all containerized services are running: +- include: docker/restart.yml + +- name: Restart rpm node service +  service: name="{{ openshift.common.service_type }}-node" state=restarted +  when: not openshift.common.is_containerized | bool + +- name: Wait for node to be ready +  command: > +    {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} --no-headers +  register: node_output +  delegate_to: "{{ groups.oo_first_master.0 }}" +  until: "{{ node_output.stdout.split()[1].startswith('Ready')}}" +  # Give the node two minutes to come back online. Note that we pre-pull images now +  # so containerized services should restart quickly as well. +  retries: 24 +  delay: 5 diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml new file mode 100644 index 000000000..480e87d58 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml @@ -0,0 +1,14 @@ +--- +# input variables: +# - openshift.common.service_type +# - component +# - openshift_pkg_version +# - openshift.common.is_atomic + +# We verified latest rpm available is suitable, so just yum update. +- name: Upgrade packages +  package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" + +- name: Ensure python-yaml present for config upgrade +  package: name=PyYAML state=present +  when: not openshift.common.is_atomic | bool diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml new file mode 100644 index 000000000..862cd19c4 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -0,0 +1,119 @@ +--- +# input variables +# - openshift.node.node_image +# - openshift_image_tag +# - openshift.common.is_containerized +# - openshift.node.ovs_image +# - openshift.common.use_openshift_sdn +# - openshift.common.service_type +# - openshift.node.debug_level +# - openshift.common.config_base +# - openshift.common.http_proxy +# - openshift.common.portal_net +# - openshift.common +# - openshift.common.http_proxy +# notify: +# - restart openvswitch +# - restart node + +# This file is included both in the openshift_master role and in the upgrade +# playbooks. + +- name: Pre-pull node image +  command: > +    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Downloaded newer image' in pull_result.stdout" +  when: openshift.common.is_containerized | bool + +- name: Pre-pull openvswitch image +  command: > +    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Downloaded newer image' in pull_result.stdout" +  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool + +- name: Install Node dependencies docker service file +  template: +    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" +    src: openshift.docker.node.dep.service +  register: install_node_dep_result +  when: openshift.common.is_containerized | bool + +- name: Install Node docker service file +  template: +    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" +    src: openshift.docker.node.service +  register: install_node_result +  when: openshift.common.is_containerized | bool + +- name: Create the openvswitch service env file +  template: +    src: openvswitch.sysconfig.j2 +    dest: /etc/sysconfig/openvswitch +  when: openshift.common.is_containerized | bool +  register: install_ovs_sysconfig +  notify: +  - restart openvswitch + +# May be a temporary workaround. +# https://bugzilla.redhat.com/show_bug.cgi?id=1331590 +- name: Create OpenvSwitch service.d directory +  file: path=/etc/systemd/system/openvswitch.service.d/ state=directory +  when: openshift.common.use_openshift_sdn | default(true) | bool + +- name: Install OpenvSwitch service OOM fix +  template: +    dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf" +    src: openvswitch-avoid-oom.conf +  when: openshift.common.use_openshift_sdn | default(true) | bool +  register: install_oom_fix_result +  notify: +  - restart openvswitch + +- name: Install OpenvSwitch docker service file +  template: +    dest: "/etc/systemd/system/openvswitch.service" +    src: openvswitch.docker.service +  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool +  notify: +  - restart openvswitch + +- name: Configure Node settings +  lineinfile: +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node +    regexp: "{{ item.regex }}" +    line: "{{ item.line }}" +    create: true +  with_items: +  - regex: '^OPTIONS=' +    line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" +  - regex: '^CONFIG_FILE=' +    line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" +  - regex: '^IMAGE_VERSION=' +    line: "IMAGE_VERSION={{ openshift_image_tag }}" +  notify: +  - restart node + +- name: Configure Proxy Settings +  lineinfile: +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node +    regexp: "{{ item.regex }}" +    line: "{{ item.line }}" +    create: true +  with_items: +  - regex: '^HTTP_PROXY=' +    line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}" +  - regex: '^HTTPS_PROXY=' +    line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}" +  - regex: '^NO_PROXY=' +    line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" +  when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') +  notify: +  - restart node + +- name: Reload systemd units +  command: systemctl daemon-reload +  when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed +  notify: +  - restart node diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service new file mode 100644 index 000000000..0fb34cffd --- /dev/null +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service @@ -0,0 +1,11 @@ +[Unit] +Requires=docker.service +After=docker.service +PartOf={{ openshift.common.service_type }}-node.service +Before={{ openshift.common.service_type }}-node.service + + +[Service] +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStop= +SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service new file mode 100644 index 000000000..e33d5d497 --- /dev/null +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -0,0 +1,26 @@ +[Unit] +After={{ openshift.common.service_type }}-master.service +After=docker.service +After=openvswitch.service +PartOf=docker.service +Requires=docker.service +{% if openshift.common.use_openshift_sdn %} +Requires=openvswitch.service +{% endif %} +Wants={{ openshift.common.service_type }}-master.service +Requires={{ openshift.common.service_type }}-node-dep.service +After={{ openshift.common.service_type }}-node-dep.service + +[Service] +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep +ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node +SyslogIdentifier={{ openshift.common.service_type }}-node +Restart=always +RestartSec=5s + +[Install] +WantedBy=docker.service diff --git a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf b/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf new file mode 100644 index 000000000..3229bc56b --- /dev/null +++ b/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf @@ -0,0 +1,3 @@ +# Avoid the OOM killer for openvswitch and it's children: +[Service] +OOMScoreAdjust=-1000 diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service new file mode 100644 index 000000000..1e1f8967d --- /dev/null +++ b/roles/openshift_node_upgrade/templates/openvswitch.docker.service @@ -0,0 +1,17 @@ +[Unit] +After=docker.service +Requires=docker.service +PartOf=docker.service + +[Service] +EnvironmentFile=/etc/sysconfig/openvswitch +ExecStartPre=-/usr/bin/docker rm -f openvswitch +ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION} +ExecStartPost=/usr/bin/sleep 5 +ExecStop=/usr/bin/docker stop openvswitch +SyslogIdentifier=openvswitch +Restart=always +RestartSec=5s + +[Install] +WantedBy=docker.service diff --git a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2 b/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2 new file mode 100644 index 000000000..da7c3742a --- /dev/null +++ b/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2 @@ -0,0 +1 @@ +IMAGE_VERSION={{ openshift_image_tag }}  | 
