summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j223
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml81
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml31
-rw-r--r--playbooks/byo/openshift-preflight/check.yml39
-rw-r--r--playbooks/certificate_expiry/default.yaml10
-rw-r--r--playbooks/certificate_expiry/easy-mode.yaml18
-rw-r--r--playbooks/certificate_expiry/html_and_json_default_paths.yaml12
-rw-r--r--playbooks/certificate_expiry/longer-warning-period-json-results.yaml13
-rw-r--r--playbooks/certificate_expiry/longer_warning_period.yaml12
l---------playbooks/certificate_expiry/roles1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml45
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml16
14 files changed, 163 insertions, 198 deletions
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
deleted file mode 100644
index 10454ad11..000000000
--- a/playbooks/adhoc/s3_registry/s3_registry.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-version: 0.1
-log:
- level: debug
-http:
- addr: :5000
-storage:
- cache:
- layerinfo: inmemory
- s3:
- accesskey: {{ aws_access_key }}
- secretkey: {{ aws_secret_key }}
- region: {{ aws_bucket_region }}
- bucket: {{ aws_bucket_name }}
- encrypt: true
- secure: true
- v4auth: true
- rootdirectory: /registry
-auth:
- openshift:
- realm: openshift
-middleware:
- repository:
- - name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
deleted file mode 100644
index d6758dae5..000000000
--- a/playbooks/adhoc/s3_registry/s3_registry.yml
+++ /dev/null
@@ -1,81 +0,0 @@
----
-# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
-# Usage:
-# ansible-playbook s3_registry.yml -e clusterid="mycluster" -e aws_bucket="clusterid-docker" -e aws_region="us-east-1"
-#
-# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
-# The 'clusterid' is the short name of your cluster.
-
-- hosts: tag_clusterid_{{ clusterid }}:&tag_host-type_openshift-master
- remote_user: root
- gather_facts: False
-
- vars:
- aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
- aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
- aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}"
- aws_bucket_region: "{{ aws_region | default(lookup('env', 'S3_REGION') | default('us-east-1', true)) }}"
- aws_create_bucket: "{{ aws_create | default(True) }}"
- aws_tmp_path: "{{ aws_tmp_pathfile | default('/root/config.yml')}}"
- aws_delete_tmp_file: "{{ aws_delete_tmp | default(True) }}"
-
- tasks:
-
- - name: Check for AWS creds
- fail:
- msg: "Couldn't find {{ item }} creds in ENV"
- when: "{{ item }} == ''"
- with_items:
- - aws_access_key
- - aws_secret_key
-
- - name: Scale down registry
- command: oc scale --replicas=0 dc/docker-registry
-
- - name: Create S3 bucket
- when: aws_create_bucket | bool
- local_action:
- module: s3 bucket="{{ aws_bucket_name }}" mode=create
-
- - name: Set up registry environment variable
- command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
-
- - name: Generate docker registry config
- template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
-
- - name: Determine if new secrets are needed
- command: oc get secrets
- register: secrets
-
- - name: Create registry secrets
- command: oc secrets new dockerregistry /root/config.yml
- when: "'dockerregistry' not in secrets.stdout"
-
- - name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- - name: Add secrets to registry service account
- oc_serviceaccount_secret:
- service_account: registry
- secret: dockerregistry
- namespace: default
- state: present
-
- - name: Determine if deployment config contains secrets
- command: oc volume dc/docker-registry --list
- register: dc
-
- - name: Add secrets to registry deployment config
- command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
- when: "'dockersecrets' not in dc.stdout"
-
- - name: Wait for deployment config to take effect before scaling up
- pause: seconds=30
-
- - name: Scale up registry
- command: oc scale --replicas=1 dc/docker-registry
-
- - name: Delete temporary config file
- file: path={{ aws_tmp_path }} state=absent
- when: aws_delete_tmp_file | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 13e1da961..5d3280328 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -22,12 +22,24 @@
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
+
+ roles:
+ - lib_openshift
+
tasks:
- - name: Prepare for Node draining
- command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
+ - name: Mark node unschedulable
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+ - inventory_hostname in groups.oo_nodes_to_upgrade
- name: Drain Node for Kubelet upgrade
command: >
@@ -39,7 +51,12 @@
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
- command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
index 32673d01d..c5f05d0f0 100644
--- a/playbooks/byo/openshift-preflight/check.yml
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -1,31 +1,12 @@
---
- hosts: OSEv3
- roles:
- - openshift_preflight/init
-
-- hosts: OSEv3
- name: checks that apply to all hosts
- gather_facts: no
- ignore_errors: yes
- roles:
- - openshift_preflight/common
-
-- hosts: masters
- name: checks that apply to masters
- gather_facts: no
- ignore_errors: yes
- roles:
- - openshift_preflight/masters
-
-- hosts: nodes
- name: checks that apply to nodes
- gather_facts: no
- ignore_errors: yes
- roles:
- - openshift_preflight/nodes
-
-- hosts: OSEv3
- name: verify check results
- gather_facts: no
- roles:
- - openshift_preflight/verify_status
+ name: run OpenShift health checks
+ roles:
+ - openshift_health_checker
+ post_tasks:
+ # NOTE: we need to use the old "action: name" syntax until
+ # https://github.com/ansible/ansible/issues/20513 is fixed.
+ - action: openshift_health_check
+ args:
+ checks:
+ - '@preflight'
diff --git a/playbooks/certificate_expiry/default.yaml b/playbooks/certificate_expiry/default.yaml
new file mode 100644
index 000000000..630135cae
--- /dev/null
+++ b/playbooks/certificate_expiry/default.yaml
@@ -0,0 +1,10 @@
+---
+# Default behavior, you will need to ensure you run ansible with the
+# -v option to see report results:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/certificate_expiry/easy-mode.yaml b/playbooks/certificate_expiry/easy-mode.yaml
new file mode 100644
index 000000000..ae41c7c14
--- /dev/null
+++ b/playbooks/certificate_expiry/easy-mode.yaml
@@ -0,0 +1,18 @@
+---
+# This example playbook is great if you're just wanting to try the
+# role out.
+#
+# This example enables HTML and JSON reports
+#
+# All certificates (healthy or not) are included in the results
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/certificate_expiry/html_and_json_default_paths.yaml
new file mode 100644
index 000000000..d80cb6ff4
--- /dev/null
+++ b/playbooks/certificate_expiry/html_and_json_default_paths.yaml
@@ -0,0 +1,12 @@
+---
+# Generate HTML and JSON artifacts in their default paths:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
new file mode 100644
index 000000000..87a0f3be4
--- /dev/null
+++ b/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
@@ -0,0 +1,13 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out) and save the results as a JSON file:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/certificate_expiry/longer_warning_period.yaml b/playbooks/certificate_expiry/longer_warning_period.yaml
new file mode 100644
index 000000000..960457c4b
--- /dev/null
+++ b/playbooks/certificate_expiry/longer_warning_period.yaml
@@ -0,0 +1,12 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out):
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/certificate_expiry/roles b/playbooks/certificate_expiry/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/certificate_expiry/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 999e4af65..6771cc98d 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -48,12 +48,15 @@
# Replace dc/docker-registry certificate secret contents if set.
- block:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Retrieve registry service IP
- command: >
- {{ openshift.common.client_binary }} get service docker-registry
- -o jsonpath='{.spec.clusterIP}'
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
+ oc_service:
+ namespace: default
+ name: docker-registry
+ state: list
register: docker_registry_service_ip
changed_when: false
@@ -67,7 +70,7 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
- --hostnames="{{ docker_registry_service_ip.stdout }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
+ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index db2c27919..a4aefcdac 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -238,29 +238,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -268,17 +261,19 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e45b635f7..e3a98fd9b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -7,29 +7,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -37,20 +30,22 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
- include: ../reset_excluder.yml
tags:
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index a9750e40f..67ba0aa2e 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -7,14 +7,26 @@
ignore_errors: true
become: yes
+# WARNING: This process is riddled with weird behavior.
+
+# Workaround for https://github.com/ansible/ansible/issues/21269
+- set_fact:
+ wait_for_host: "{{ ansible_host }}"
+
+# Ansible's blog documents this *without* the port, which appears to now
+# just wait until the timeout value and then proceed without checking anything.
+# port is now required.
+#
+# However neither ansible_ssh_port or ansible_port are reliably defined, likely
+# only if overridden. Assume a default of 22.
- name: Wait for master to restart
local_action:
module: wait_for
- host="{{ ansible_host }}"
+ host="{{ wait_for_host }}"
state=started
delay=10
timeout=600
- port="{{ ansible_ssh_port }}"
+ port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}"
become: no
# Now that ssh is back up we can wait for API on the remote system,