From afa4fd5799b0ff43d625d061e4c2bde66b5fb86a Mon Sep 17 00:00:00 2001
From: Michael Gugino <mgugino@redhat.com>
Date: Thu, 9 Nov 2017 17:00:00 -0500
Subject: Combine openshift_node and openshift_node_upgrade

Currently, having openshift_node and openshift_node_upgrade
as two distinct roles has created a duplication across
handlers, templates, and some tasks.

This commit combines the roles to reduce duplication
and bugs encountered by not putting code in both places.
---
 .../config/install-node-docker-service-file.yml    |   8 +
 roles/openshift_node/tasks/docker/upgrade.yml      |  40 +++++
 roles/openshift_node/tasks/upgrade.yml             | 183 +++++++++++++++++++++
 .../tasks/upgrade/containerized_node_upgrade.yml   |  14 ++
 roles/openshift_node/tasks/upgrade/restart.yml     |  46 ++++++
 roles/openshift_node/tasks/upgrade/rpm_upgrade.yml |  29 ++++
 6 files changed, 320 insertions(+)
 create mode 100644 roles/openshift_node/tasks/config/install-node-docker-service-file.yml
 create mode 100644 roles/openshift_node/tasks/docker/upgrade.yml
 create mode 100644 roles/openshift_node/tasks/upgrade.yml
 create mode 100644 roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
 create mode 100644 roles/openshift_node/tasks/upgrade/restart.yml
 create mode 100644 roles/openshift_node/tasks/upgrade/rpm_upgrade.yml

(limited to 'roles/openshift_node/tasks')

diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
new file mode 100644
index 000000000..f92ff79b5
--- /dev/null
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Node docker service file
+  template:
+    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+    src: openshift.docker.node.service
+  notify:
+  - reload systemd units
+  - restart node
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
new file mode 100644
index 000000000..ebe87d6fd
--- /dev/null
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -0,0 +1,40 @@
+---
+# input variables:
+# - openshift.common.service_type
+# - openshift.common.is_containerized
+# - docker_upgrade_nuke_images
+# - docker_version
+# - skip_docker_restart
+
+- name: Check Docker image count
+  shell: "docker images -aq | wc -l"
+  register: docker_image_count
+
+- debug: var=docker_image_count.stdout
+
+# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
+- name: Remove all containers and images
+  script: nuke_images.sh
+  register: nuke_images_result
+  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+  shell: "docker images -aq | wc -l"
+  register: docker_image_count
+  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- service:
+    name: docker
+    state: stopped
+  register: l_openshift_node_upgrade_docker_stop_result
+  until: not l_openshift_node_upgrade_docker_stop_result | failed
+  retries: 3
+  delay: 30
+
+- name: Upgrade Docker
+  package: name=docker{{ '-' + docker_version }} state=present
+
+# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
new file mode 100644
index 000000000..2bca1e974
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -0,0 +1,183 @@
+---
+# input variables:
+# - l_docker_upgrade
+# - openshift.common.is_atomic
+# - node_config_hook
+# - openshift_pkg_version
+# - openshift.common.is_containerized
+# - deployment_type
+# - openshift_release
+
+# tasks file for openshift_node_upgrade
+
+- include: registry_auth.yml
+
+- name: Stop node and openvswitch services
+  service:
+    name: "{{ item }}"
+    state: stopped
+  with_items:
+  - "{{ openshift.common.service_type }}-node"
+  - openvswitch
+  failed_when: false
+
+- name: Stop additional containerized services
+  service:
+    name: "{{ item }}"
+    state: stopped
+  with_items:
+  - "{{ openshift.common.service_type }}-master-controllers"
+  - "{{ openshift.common.service_type }}-master-api"
+  - etcd_container
+  failed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Pre-pull node image
+  command: >
+    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+  register: pull_result
+  changed_when: "'Downloaded newer image' in pull_result.stdout"
+  when: openshift.common.is_containerized | bool
+
+- name: Pre-pull openvswitch image
+  command: >
+    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+  register: pull_result
+  changed_when: "'Downloaded newer image' in pull_result.stdout"
+  when:
+  - openshift.common.is_containerized | bool
+  - openshift_use_openshift_sdn | bool
+
+- include: docker/upgrade.yml
+  vars:
+    # We will restart Docker ourselves after everything is ready:
+    skip_docker_restart: True
+  when:
+  - l_docker_upgrade is defined
+  - l_docker_upgrade | bool
+
+- include: "{{ node_config_hook }}"
+  when: node_config_hook is defined
+
+- include: upgrade/rpm_upgrade.yml
+  vars:
+    component: "node"
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  when: not openshift.common.is_containerized | bool
+
+- name: Remove obsolete docker-sdn-ovs.conf
+  file:
+    path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
+    state: absent
+
+- include: upgrade/containerized_node_upgrade.yml
+  when: openshift.common.is_containerized | bool
+
+- name: Ensure containerized services stopped before Docker restart
+  service:
+    name: "{{ item }}"
+    state: stopped
+  with_items:
+  - etcd_container
+  - openvswitch
+  - "{{ openshift.common.service_type }}-master-api"
+  - "{{ openshift.common.service_type }}-master-controllers"
+  - "{{ openshift.common.service_type }}-node"
+  failed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Stop rpm based services
+  service:
+    name: "{{ item }}"
+    state: stopped
+  with_items:
+  - "{{ openshift.common.service_type }}-node"
+  - openvswitch
+  failed_when: false
+  when: not openshift.common.is_containerized | bool
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
+- name: Clean up dockershim data
+  file:
+    path: "/var/lib/dockershim/sandbox/"
+    state: absent
+
+- name: Upgrade openvswitch
+  package:
+    name: openvswitch
+    state: latest
+  when: not openshift.common.is_containerized | bool
+
+- name: Update oreg value
+  yedit:
+    src: "{{ openshift.common.config_base }}/node/node-config.yaml"
+    key: 'imageConfig.format'
+    value: "{{ oreg_url | default(oreg_url_node) }}"
+  when: oreg_url is defined or oreg_url_node is defined
+
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+  command: grep "^[^#].*swap" /etc/fstab
+  # grep: match any lines which don't begin with '#' and contain 'swap'
+  changed_when: false
+  failed_when: false
+  register: swap_result
+
+  # Disable Swap Block
+- block:
+
+  - name: Disable swap
+    command: swapoff --all
+
+  - name: Remove swap entries from /etc/fstab
+    replace:
+      dest: /etc/fstab
+      regexp: '(^[^#].*swap.*)'
+      replace: '# \1'
+      backup: yes
+
+  - name: Add notice about disabling swap
+    lineinfile:
+      dest: /etc/fstab
+      line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+      state: present
+
+  when:
+  - swap_result.stdout_lines | length > 0
+  - openshift_disable_swap | default(true) | bool
+  # End Disable Swap Block
+
+- name: Reset selinux context
+  command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
+  when:
+  - ansible_selinux is defined
+  - ansible_selinux.status == 'enabled'
+
+- name: Apply 3.6 dns config changes
+  yedit:
+    src: /etc/origin/node/node-config.yaml
+    key: "{{ item.key }}"
+    value: "{{ item.value }}"
+  with_items:
+  - key: "dnsBindAddress"
+    value: "127.0.0.1:53"
+  - key: "dnsRecursiveResolvConf"
+    value: "/etc/origin/node/resolv.conf"
+
+# Restart all services
+- include: upgrade/restart.yml
+
+- name: Wait for node to be ready
+  oc_obj:
+    state: list
+    kind: node
+    name: "{{ openshift.common.hostname | lower }}"
+  register: node_output
+  delegate_to: "{{ groups.oo_first_master.0 }}"
+  until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+  # Give the node two minutes to come back online.
+  retries: 24
+  delay: 5
+
+- include_role:
+    name: openshift_node_dnsmasq
diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
new file mode 100644
index 000000000..96b94d8b6
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
@@ -0,0 +1,14 @@
+---
+# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
+# restart services. We will unconditionally restart all containerized services
+# because we have to unconditionally restart Docker:
+- set_fact:
+    skip_node_svc_handlers: True
+
+- name: Update systemd units
+  include: ../systemd_units.yml
+
+# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
+# play when the node has already been marked schedulable again. (this would look strange
+# in logs otherwise)
+- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
new file mode 100644
index 000000000..a4fa51172
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -0,0 +1,46 @@
+---
+# input variables:
+# - openshift.common.service_type
+# - openshift.common.is_containerized
+# - openshift.common.hostname
+# - openshift.master.api_port
+
+# NOTE: This is needed to make sure we are using the correct set
+#       of systemd unit files. The RPMs lay down defaults but
+#       the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+#       require a service to be part of the call.
+- name: Reload systemd to ensure latest unit files
+  command: systemctl daemon-reload
+
+- name: Restart docker
+  service:
+    name: "{{ openshift.docker.service_name }}"
+    state: started
+  register: docker_start_result
+  until: not docker_start_result | failed
+  retries: 3
+  delay: 30
+
+- name: Update docker facts
+  openshift_facts:
+    role: docker
+
+- name: Start services
+  service: name={{ item }} state=started
+  with_items:
+    - etcd_container
+    - openvswitch
+    - "{{ openshift.common.service_type }}-master-api"
+    - "{{ openshift.common.service_type }}-master-controllers"
+    - "{{ openshift.common.service_type }}-node"
+  failed_when: false
+
+- name: Wait for master API to come back online
+  wait_for:
+    host: "{{ openshift.common.hostname }}"
+    state: started
+    delay: 10
+    port: "{{ openshift.master.api_port }}"
+    timeout: 600
+  when: inventory_hostname in groups.oo_masters_to_config
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
new file mode 100644
index 000000000..a998acf21
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -0,0 +1,29 @@
+---
+# input variables:
+# - openshift.common.service_type
+# - component
+# - openshift_pkg_version
+# - openshift.common.is_atomic
+
+# We verified latest rpm available is suitable, so just yum update.
+- name: Upgrade packages
+  package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+
+- name: Ensure python-yaml present for config upgrade
+  package: name=PyYAML state=present
+  when: not openshift.common.is_atomic | bool
+
+- name: Install Node service file
+  template:
+    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+    src: "node.service.j2"
+  register: l_node_unit
+
+# NOTE: This is needed to make sure we are using the correct set
+#       of systemd unit files. The RPMs lay down defaults but
+#       the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+#       require a service to be part of the call.
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: l_node_unit | changed
-- 
cgit v1.2.3