summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml4
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml35
-rw-r--r--playbooks/init/evaluate_groups.yml7
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml4
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml1
-rw-r--r--playbooks/openshift-node/private/additional_config.yml1
-rw-r--r--playbooks/openstack/advanced-configuration.md21
9 files changed, 41 insertions, 47 deletions
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index cab2f1e40..e7bed4f6e 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -18,7 +18,7 @@
name: lib_openshift
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
@@ -30,7 +30,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
index 238a7eb2f..cf76c9d10 100644
--- a/playbooks/aws/provisioning-inventory.example.ini
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -11,6 +11,7 @@ etcd
openshift_deployment_type=origin
openshift_master_bootstrap_enabled=True
+openshift_master_api_port=443
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 37fc8a0f6..7b82fe05b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,20 +2,6 @@
###############################################################################
# Upgrade Masters
###############################################################################
-
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 47410dff3..4fc897a57 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -11,25 +11,19 @@
msg: "Ensure that new scale groups were provisioned before proceeding to update."
when:
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
- name: initialize upgrade bits
import_playbook: init.yml
-- name: Drain and upgrade nodes
+- name: unschedule nodes
hosts: oo_sg_current_nodes
- # This var must be set with -e on invocation, as it is not a per-host inventory var
- # and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
- pre_tasks:
+ tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: ../roles/lib_openshift
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -40,14 +34,27 @@
register: node_unschedulable
until: node_unschedulable|succeeded
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result | failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
# Alright, let's clean up!
- name: clean up the old scale group
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 8787c87e1..8087f6ffc 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -46,14 +46,9 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
+ Running etcd as an embedded service is no longer supported.
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 9ddb4afe2..313ed8bec 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -19,7 +19,6 @@
name: etcd
tasks_from: migrate.pre_check.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
@@ -49,7 +48,6 @@
tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
@@ -84,7 +82,6 @@
name: etcd
tasks_from: migrate.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -97,7 +94,6 @@
name: etcd
tasks_from: clean_data.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 22ed7e610..7dfea07f1 100644
--- a/playbooks/openshift-etcd/private/upgrade_backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -9,7 +9,6 @@
tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index 261e2048f..b86cb3cc2 100644
--- a/playbooks/openshift-node/private/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -33,7 +33,6 @@
roles:
- role: flannel
etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
when: openshift_use_flannel | default(false) | bool
- name: Additional node config
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index 403e0e1a7..2c9b70b5f 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -159,11 +159,22 @@ So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
without dnsmasq proxies.
-The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all
-your servers will be under. With the default values, this will be
-`openshift.example.com`. For workloads, the default subdomain is 'apps'.
-That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in
-the inventory.
+The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
+will form the cluster's public DNS domain all your servers will be under. With
+the default values, this will be `openshift.example.com`. For workloads, the
+default subdomain is 'apps'. That sudomain can be set as well by the
+`openshift_openstack_app_subdomain` variable in the inventory.
+
+If you want to use a two sets of hostnames for public and private/prefixed DNS
+records for your externally managed public DNS server, you can specify
+`openshift_openstack_public_hostname_suffix` and/or
+`openshift_openstack_private_hostname_suffix`. The suffixes will be added
+to the nsupdate records sent to the external DNS server. Those are empty by default.
+
+**Note** the real hostnames, Nova servers' or ansible hostnames and inventory
+variables will not be updated. The deployment may be done on arbitrary named
+hosts with the hostnames managed by cloud-init. Inventory hostnames will ignore
+the suffixes.
The `openstack_<role name>_hostname` is a set of variables used for customising
public names of Nova servers provisioned with a given role. When such a variable stays commented,