diff options
Diffstat (limited to 'playbooks')
79 files changed, 730 insertions, 393 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 44a2ef534..69b2541bb 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -8,7 +8,7 @@ hosts: masters:!masters[0] pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" tasks: - include_role: name: openshift_logging diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 5ed55a817..0c2a2c7e8 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -126,10 +126,14 @@ - origin-sdn-ovs - tuned-profiles-openshift-node - tuned-profiles-origin-node + register: result + until: result | success - name: Remove flannel package package: name=flannel state=absent when: openshift_use_flannel | default(false) | bool + register: result + until: result | success when: not is_atomic | bool - shell: systemctl reset-failed @@ -382,6 +386,8 @@ - origin-master - pacemaker - pcs + register: result + until: result | success - shell: systemctl reset-failed changed_when: False @@ -497,6 +503,8 @@ with_items: - etcd - etcd3 + register: result + until: result | success - shell: systemctl reset-failed changed_when: False @@ -554,6 +562,8 @@ when: not is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - haproxy + register: result + until: result | success - shell: systemctl reset-failed changed_when: False diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index c2c8bea50..cab2f1e40 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -14,7 +14,7 @@ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - name: bring lib_openshift into scope - include_role: + import_role: name: lib_openshift - name: fetch masters diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml deleted file mode 100644 index c26f11772..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml - vars: - g_check_expiry_hosts: 'oo_etcd_to_config' - -- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml - -- include: ../../openshift-etcd/private/certificates.yml - vars: - etcd_certificates_redeploy: true - -- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml - -- include: ../../openshift-master/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml - -- include: ../../openshift-node/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../openshift-etcd/private/restart.yml - vars: - g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" - -- include: ../../openshift-master/private/restart.yml - -- include: ../../openshift-node/private/restart.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/router.yml - when: openshift_hosted_manage_router | default(true) | bool - -- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml - when: openshift_hosted_manage_registry | default(true) | bool - -- include: ../../openshift-master/private/revert-client-ca.yml - -- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml deleted file mode 100644 index 77dd121b3..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml deleted file mode 100644 index 94e50cc28..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml - vars: - g_check_expiry_hosts: 'oo_etcd_to_config' - -- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml - -- include: ../../openshift-etcd/private/certificates.yml - vars: - etcd_certificates_redeploy: true - -- include: ../../openshift-etcd/private/restart.yml - vars: - g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" - -- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml deleted file mode 100644 index 88e52f809..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml - -- include: ../../openshift-master/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml deleted file mode 100644 index 2abbe78f1..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml - -- include: ../../openshift-node/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../openshift-node/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml deleted file mode 100644 index 9cebeb1ee..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml deleted file mode 100644 index 36b6250a7..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml deleted file mode 100644 index 181e03381..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/router.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml new file mode 100644 index 000000000..14b0f85d4 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml @@ -0,0 +1,7 @@ +--- +# +# Node Scale Group Upgrade Playbook +# +# Upgrades scale group nodes only. +# +- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md index d9be6ae3b..815e49c28 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md @@ -1,4 +1,4 @@ -# v3.6 Major and Minor Upgrade Playbook +# v3.8 Major and Minor Upgrade Playbook ## Overview This playbook currently performs the following steps. diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md new file mode 100644 index 000000000..0ab3d3a52 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md @@ -0,0 +1,20 @@ +# v3.9 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml new file mode 100644 index 000000000..e8f9d94e2 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -0,0 +1,5 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml new file mode 100644 index 000000000..acb4195e3 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -0,0 +1,14 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml new file mode 100644 index 000000000..df19097e1 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -0,0 +1,7 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins deleted file mode 120000 index b1213dedb..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library deleted file mode 120000 index 9a53f009d..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/library +++ /dev/null @@ -1 +0,0 @@ -../../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins deleted file mode 120000 index aff753026..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles deleted file mode 120000 index 4bdbcbad3..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 83f16ac0d..3b779becb 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -6,10 +6,6 @@ retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name={{ item }} state=started with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 808cc562c..83be290e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -41,6 +41,8 @@ - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present + register: result + until: result | success - include: restart.yml when: not skip_docker_restart | default(False) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 3aa9e0460..c458184c9 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -14,8 +14,9 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift + - name: Collect all routers oc_obj: state: list diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 446f315d6..84b740227 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -6,7 +6,7 @@ - name: Update oreg_auth docker login credentials if necessary include_role: - name: docker + name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index d08c6e940..503d75ba0 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -143,10 +143,6 @@ roles: - { role: openshift_cli } vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles @@ -291,7 +287,7 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 5dc8193a7..75ffd3fe9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -8,7 +8,7 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml new file mode 100644 index 000000000..d9ce3a7e3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -0,0 +1,59 @@ +--- +- name: create new scale group + hosts: localhost + tasks: + - name: build upgrade scale groups + include_role: + name: openshift_aws + tasks_from: upgrade_node_group.yml + + - fail: + msg: "Ensure that new scale groups were provisioned before proceeding to update." + when: + - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0" + +- name: initialize upgrade bits + include: init.yml + +- name: Drain and upgrade nodes + hosts: oo_sg_current_nodes + # This var must be set with -e on invocation, as it is not a per-host inventory var + # and is evaluated early. Values such as "20%" can also be used. + serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" + + pre_tasks: + - name: Load lib_openshift modules + include_role: + name: ../roles/lib_openshift + + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node + # or docker actually needs an upgrade before proceeding. Perhaps best to save this until + # we merge upgrade functionality into the base roles and a normal config.yml playbook run. + - name: Mark node unschedulable + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False + delegate_to: "{{ groups.oo_first_master.0 }}" + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded + + - name: Drain Node for Kubelet upgrade + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 + +# Alright, let's clean up! +- name: clean up the old scale group + hosts: localhost + tasks: + - name: clean up scale group + include_role: + name: openshift_aws + tasks_from: remove_scale_group.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 6cb6a665f..5f9c56867 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -73,12 +73,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8f48bedcc..1aac3d014 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index f25cfe0d0..306b76422 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 2b99568c7..6d4949542 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d3d2046e6..0a592896b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -81,12 +81,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index c0546bd2d..b381d606a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index b602cdd0e..e7d7756d1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index da81e6dea..be362e3ff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -81,12 +81,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index abd56e762..6e68116b0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml new file mode 100644 index 000000000..1d4d1919c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml @@ -0,0 +1,20 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml new file mode 100644 index 000000000..94c16cae0 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -0,0 +1,142 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../../../init/version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../openshift-master/private/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml new file mode 100644 index 000000000..2045f6379 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -0,0 +1,144 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../../../init/version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../openshift-master/private/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml new file mode 100644 index 000000000..6134f8653 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -0,0 +1,115 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../../../init/version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml new file mode 100644 index 000000000..4bd2d87b1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.9 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 05142f9b6..d41f365dc 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -84,6 +84,8 @@ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - yum-utils + register: result + until: result | success - name: Ensure various deps for running system containers are installed package: @@ -100,6 +102,8 @@ or (openshift_use_openvswitch_system_container | default(False)) | bool or (openshift_use_node_system_container | default(False)) | bool or (openshift_use_master_system_container | default(False)) | bool + register: result + until: result | success - name: Gather Cluster facts and set is_containerized if needed openshift_facts: @@ -131,11 +135,13 @@ - openshift_http_proxy is defined or openshift_https_proxy is defined - openshift_generate_no_proxy_hosts | default(True) | bool + - name: Initialize openshift.node.sdn_mtu + openshift_facts: + role: node + local_facts: + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" + - name: initialize_facts set_fact repoquery command set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" - - - name: initialize_facts set_fact on openshift_docker_hosted_registry_network - set_fact: - openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 1d4f41ffc..5a7483b72 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -24,6 +24,7 @@ - import_playbook: repos.yml - import_playbook: version.yml + when: not (skip_verison | default(False)) - name: Initialization Checkpoint End hosts: all diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml index d738c8207..d738c8207 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml +++ b/playbooks/openshift-etcd/private/certificates-backup.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index 438f704bc..cc5d57031 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -21,7 +21,7 @@ name: etcd tasks_from: remove_ca_certificates -- include: ../../../openshift-etcd/private/ca.yml +- include: ca.yml - name: Create temp directory for syncing certs hosts: localhost @@ -44,7 +44,7 @@ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" -- include: ../../../openshift-etcd/private/restart.yml +- include: restart.yml # Do not restart etcd when etcd certificates were previously expired. when: ('expired' not in (hostvars | oo_select_keys(groups['etcd']) @@ -82,7 +82,7 @@ state: absent changed_when: false -- include: ../../../openshift-master/private/restart.yml +- include: ../../openshift-master/private/restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml index 4a9fbf7eb..cc1e6adf5 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml +++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml @@ -1,6 +1,6 @@ --- - name: Check cert expirys - hosts: "{{ g_check_expiry_hosts }}" + hosts: oo_etcd_to_config vars: openshift_certificate_expiry_show_all: yes roles: @@ -10,3 +10,9 @@ # this playbook. Service restarts will be skipped if any # certificates were previously expired. - role: openshift_certificate_expiry + +- include: certificates-backup.yml + +- include: certificates.yml + vars: + etcd_certificates_redeploy: true diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml new file mode 100644 index 000000000..b1d23675d --- /dev/null +++ b/playbooks/openshift-etcd/redeploy-ca.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-ca.yml diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml new file mode 100644 index 000000000..1bd302c03 --- /dev/null +++ b/playbooks/openshift-etcd/redeploy-certificates.yml @@ -0,0 +1,10 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-certificates.yml + +- include: private/restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" + +- include: ../openshift-master/private/restart.yml diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml index 75c1f0300..917b729f9 100644 --- a/playbooks/openshift-glusterfs/private/registry.yml +++ b/playbooks/openshift-glusterfs/private/registry.yml @@ -1,40 +1,11 @@ --- - import_playbook: config.yml -- name: Initialize GlusterFS registry PV and PVC vars - hosts: oo_first_master - tags: hosted - tasks: - - set_fact: - glusterfs_pv: [] - glusterfs_pvc: [] - - - set_fact: - glusterfs_pv: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - storage: - glusterfs: - endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" - path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" - readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" - glusterfs_pvc: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - when: openshift.hosted.registry.storage.glusterfs.swap - - name: Create persistent volumes hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: Create Hosted Resources hosts: oo_first_master diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml index 359132dd0..d6529425b 100644 --- a/playbooks/openshift-hosted/private/cockpit-ui.yml +++ b/playbooks/openshift-hosted/private/cockpit-ui.yml @@ -5,4 +5,4 @@ - role: cockpit-ui when: - openshift_hosted_manage_registry | default(true) | bool - - not openshift.docker.hosted_registry_insecure | default(false) | bool + - not (openshift_docker_hosted_registry_insecure | default(false)) | bool diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml index 8a60a30b8..41ae2eb69 100644 --- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml +++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml @@ -1,9 +1,5 @@ --- - name: Create Hosted Resources - persistent volumes hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index 7e9363c5f..7e9363c5f 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index 2116c745c..2116c745c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml new file mode 100644 index 000000000..65fb0abda --- /dev/null +++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-registry-certificates.yml diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml new file mode 100644 index 000000000..8dc052751 --- /dev/null +++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-router-certificates.yml diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml index d737b836b..78fe663db 100644 --- a/playbooks/openshift-loadbalancer/private/config.yml +++ b/playbooks/openshift-loadbalancer/private/config.yml @@ -11,14 +11,12 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" -- name: Configure firewall and docker for load balancers +- name: Configure firewall load balancers hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config vars: openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: - role: os_firewall - - role: openshift_docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - name: Configure load balancers hosts: oo_lb_to_config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml index 4dbc041b0..4dbc041b0 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml +++ b/playbooks/openshift-master/private/certificates-backup.yml diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml new file mode 100644 index 000000000..3bd38a61d --- /dev/null +++ b/playbooks/openshift-master/private/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- include: certificates-backup.yml + +- include: certificates.yml + vars: + openshift_certificates_redeploy: true diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 5a837d80d..59657574a 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -207,7 +207,7 @@ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" -- include: ../../../openshift-master/private/restart.yml +- include: restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters @@ -272,7 +272,7 @@ state: absent changed_when: false -- include: ../../../openshift-node/private/restart.yml +- include: ../../openshift-node/private/restart.yml # Do not restart nodes when node, master or etcd certificates were previously expired. when: # nodes diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml new file mode 100644 index 000000000..df727247b --- /dev/null +++ b/playbooks/openshift-master/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-certificates.yml + +- include: private/restart.yml diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml new file mode 100644 index 000000000..3ae74c7a0 --- /dev/null +++ b/playbooks/openshift-master/redeploy-openshift-ca.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-openshift-ca.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/openshift-node/private/certificates-backup.yml index 2ad84b3b9..2ad84b3b9 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml +++ b/playbooks/openshift-node/private/certificates-backup.yml diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 06f3df9fa..dc5d7a57e 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -4,7 +4,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index 3c3ac3646..5afa83be7 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -5,7 +5,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml index fffed4dfb..7211787be 100644 --- a/playbooks/openshift-node/private/network_manager.yml +++ b/playbooks/openshift-node/private/network_manager.yml @@ -9,6 +9,8 @@ package: name: 'NetworkManager' state: present + register: result + until: result | success - name: configure NetworkManager lineinfile: diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml new file mode 100644 index 000000000..3bd38a61d --- /dev/null +++ b/playbooks/openshift-node/private/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- include: certificates-backup.yml + +- include: certificates.yml + vars: + openshift_certificates_redeploy: true diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c3beb59b7..41eb00f99 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,10 +16,6 @@ retries: 3 delay: 30 - - name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name: "{{ item }}" diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml new file mode 100644 index 000000000..df727247b --- /dev/null +++ b/playbooks/openshift-node/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- include: ../init/main.yml + +- include: private/redeploy-certificates.yml + +- include: private/restart.yml diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index f3fe13530..f567242cd 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -6,7 +6,7 @@ etc.). The result is an environment ready for OpenShift installation via [openshift-ansible]. We provide everything necessary to be able to install OpenShift on -OpenStack (including the DNS and load balancer servers when +OpenStack (including the load balancer servers when necessary). In addition, we work on providing integration with the OpenStack-native services (storage, lbaas, baremetal as a service, dns, etc.). @@ -24,7 +24,7 @@ The OpenStack release must be Newton (for Red Hat OpenStack this is version 10) or newer. It must also satisfy these requirements: * Heat (Orchestration) must be available -* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment image (CentOS 7.4 or RHEL 7) must be loaded * The deployment flavor must be available to your user - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing - look at @@ -38,18 +38,6 @@ Optional: * External Neutron network with a floating IP address pool -## DNS Requirements - -OpenShift requires DNS to operate properly. OpenStack supports DNS-as-a-service -in the form of the Designate project, but the playbooks here don't support it -yet. Until we do, you will need to provide a DNS solution yourself (or in case -you are not running Designate when we do). - -If your server supports nsupdate, we will use it to add the necessary records. - -TODO(shadower): describe how to build a sample DNS server and how to configure -our playbooks for nsupdate. - ## Installation @@ -57,14 +45,13 @@ There are four main parts to the installation: 1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies) 2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster) -3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc) -4. [Installing OpenShift](#4-installing-openshift) +3. [Creating the OpenStack Resources and Installing OpenShift](#3-creating-the-openstack-resources-and-installing-openshift) This guide is going to install [OpenShift Origin][origin] with [CentOS 7][centos7] images with minimal customisation. -We will create the VMs for running OpenShift, in a new Neutron -network, assign Floating IP addresses and configure DNS. +We will create the VMs for running OpenShift, in a new Neutron network and +assign Floating IP addresses. The OpenShift cluster will have a single Master node that will run `etcd`, a single Infra node and two App nodes. @@ -156,14 +143,6 @@ $ vi inventory/group_vars/all.yml 4. Set the `openshift_openstack_default_flavor` to the flavor you want your OpenShift VMs to use. - See `openstack flavor list` for the list of available flavors. -5. Set the `openshift_openstack_dns_nameservers` to the list of the IP addresses - of the DNS servers used for the **private** address resolution. - -**NOTE ON DNS**: at minimum, the OpenShift nodes need to be able to access each -other by their hostname. OpenStack doesn't provide this by default, so you -need to provide a DNS server. Put the address of that DNS server in -`openshift_openstack_dns_nameservers` variable. - @@ -191,7 +170,7 @@ the [Sample OpenShift Inventory][sample-openshift-inventory] and the [advanced configuration][advanced-configuration]. -### 3. Creating the OpenStack resources (VMs, networking, etc.) +### 3. Creating the OpenStack Resources and Installing OpenShift We provide an `ansible.cfg` file which has some useful defaults -- you should copy it to the directory you're going to run `ansible-playbook` from. @@ -200,13 +179,18 @@ copy it to the directory you're going to run `ansible-playbook` from. $ cp openshift-ansible/ansible.cfg ansible.cfg ``` -Then run the provisioning playbook -- this will create the OpenStack +Then run the provision + install playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision.yaml +$ ansible-playbook --user openshift -i inventory \ + openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \ + -e openshift_repos_enable_testing=true ``` +Note, you may want to use the testing repo for development purposes only. +Normally, `openshift_repos_enable_testing` should not be specified. + If you're using multiple inventories, make sure you pass the path to the right one to `-i`. @@ -214,15 +198,6 @@ If your SSH private key is not in `~/.ssh/id_rsa` use the `--private-key` option to specify the correct path. -### 4. Installing OpenShift - -Run the `byo/config.yml` playbook on top of the OpenStack nodes we have -prepared. - -```bash -$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml -``` - ### Next Steps @@ -240,7 +215,6 @@ advanced configuration: * [External Dns][external-dns] * Multiple Clusters (TODO) * [Cinder Registry][cinder-registry] -* [Bastion Node][bastion] [ansible]: https://www.ansible.com/ @@ -259,4 +233,3 @@ advanced configuration: [loadbalancer]: ./advanced-configuration.md#multi-master-configuration [external-dns]: ./advanced-configuration.md#dns-configuration-variables [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry -[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index 90cc20b98..cb8af4a9e 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -47,44 +47,42 @@ Otherwise, even if there are differences between the two versions, installation ## Accessing the OpenShift Cluster -### Use the Cluster DNS +### Configure DNS -In addition to the OpenShift nodes, we created a DNS server with all -the necessary entries. We will configure your *Ansible host* to use -this new DNS and talk to the deployed OpenShift. +OpenShift requires two DNS records to function fully. The first one points to +the master/load balancer and provides the UI/API access. The other one is a +wildcard domain that resolves app route requests to the infra node. -First, get the DNS IP address: +If you followed the default installation from the README section, there is no +DNS configured. You should add two entries to the `/etc/hosts` file on the +Ansible host (where you to do a quick validation. A real deployment will +however require a DNS server with the following entries set. -```bash -$ openstack server show dns-0.openshift.example.com --format value --column addresses -openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129 -``` - -Note the floating IP address (it's `10.40.128.129` in this case) -- if -you're not sure, try pinging them both -- it's the one that responds -to pings. +First, run the `openstack server list` command and note the floating IP +addresses of the *master* and *infra* nodes (we will use `10.40.128.130` for +master and `10.40.128.134` for infra here). -Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your -**first entry**. - -If your `/etc/resolv.conf` currently looks like this: +Then add the following entries to your `/etc/hosts`: ``` -; generated by /usr/sbin/dhclient-script -search openstacklocal -nameserver 192.168.0.3 -nameserver 192.168.0.2 +10.40.128.130 console.openshift.example.com +10.40.128.134 cakephp-mysql-example-test.apps.openshift.example.com ``` -Change it to this: +This points the cluster domain (as defined in the +`openshift_master_cluster_public_hostname` Ansible variable in `OSEv3`) to the +master node and any routes for deployed apps to the infra node. + +If you deploy another app, it will end up with a different URL (e.g. +myapp-test.apps.openshift.example.com) and you will need to add that too. This +is why a real deployment should always run a DNS where the second entry will be +a wildcard `*.apps.openshift.example.com). + +This will be sufficient to validate the cluster here. + +Take a look at the [External DNS](#dns-configuration-variables) section for +configuring a DNS service. -``` -; generated by /usr/sbin/dhclient-script -search openstacklocal -nameserver 10.40.128.129 -nameserver 192.168.0.3 -nameserver 192.168.0.2 -``` ### Get the `oc` Client @@ -330,14 +328,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the prerequisite packages to be installed before to deploy an OpenShift cluster. Those are ignored though, if the `manage_packages: False`. -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. - ## Multi-master configuration Please refer to the official documentation for the @@ -540,43 +530,6 @@ You can also run the registry setup playbook directly: -## Configure static inventory and access via a bastion node - -Example inventory variables: - - openshift_openstack_use_bastion: true - openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/id_rsa - openstack_inventory: static - openstack_inventory_path: ../../../../inventory - openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. -And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - - ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. - -To verify nodes connectivity, use the command: - - ansible -v -i inventory/hosts -m ping all - -If something is broken, double-check the inventory variables, paths and the -generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files. - -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. - ## Using Docker on the Ansible host If you don't want to worry about the dependencies, you can use the @@ -606,28 +559,6 @@ the playbooks: ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: - - . keystonerc - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. - -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: - - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml - -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. - - ## Running Custom Post-Provision Actions A custom playbook can be run like this: @@ -735,21 +666,6 @@ Once it succeeds, you can install openshift by running: OpenShift UI may be accessed via the 1st master node FQDN, port 8443. -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - - openshift_openstack_ui_ssh_tunnel: True - -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - - 127.0.0.1 master-0.openshift.example.com - -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. - ## Scale Deployment up/down ### Scaling up @@ -768,5 +684,3 @@ Usage: ``` ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>] ``` - -Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 1e55adb9e..68d898d9a 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,12 +1,13 @@ --- +## Openshift product versions and repos to install from openshift_deployment_type: origin +#openshift_repos_enable_testing: true #openshift_deployment_type: openshift-enterprise #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" openshift_master_cluster_method: native -openshift_master_cluster_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_hostname }}" +openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" osm_default_node_selector: 'region=primary' diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index 921edb867..ae1528123 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -10,7 +10,6 @@ openshift_openstack_dns_nameservers: [] #openshift_openstack_node_hostname: "app-node" #openshift_openstack_lb_hostname: "lb" #openshift_openstack_etcd_hostname: "etcd" -#openshift_openstack_dns_hostname: "dns" openshift_openstack_keypair_name: "openshift" openshift_openstack_external_network_name: "public" @@ -34,7 +33,6 @@ openshift_openstack_external_network_name: "public" #openshift_openstack_node_image_name: "centos7" #openshift_openstack_lb_image_name: "centos7" #openshift_openstack_etcd_image_name: "centos7" -#openshift_openstack_dns_image_name: "centos7" openshift_openstack_default_image_name: "centos7" openshift_openstack_num_masters: 1 @@ -49,7 +47,6 @@ openshift_openstack_num_nodes: 2 #openshift_openstack_node_flavor: "m1.medium" #openshift_openstack_lb_flavor: "m1.medium" #openshift_openstack_etcd_flavor: "m1.medium" -#openshift_openstack_dns_flavor: "m1.medium" openshift_openstack_default_flavor: "m1.medium" # # Numerical index of nodes to remove @@ -62,7 +59,6 @@ openshift_openstack_default_flavor: "m1.medium" #openshift_openstack_docker_infra_volume_size: "15" #openshift_openstack_docker_node_volume_size: "15" #openshift_openstack_docker_etcd_volume_size: "2" -#openshift_openstack_docker_dns_volume_size: "1" #openshift_openstack_docker_lb_volume_size: "5" openshift_openstack_docker_volume_size: "15" @@ -110,7 +106,6 @@ openshift_openstack_subnet_prefix: "192.168.99" # # Roll-your-own DNS -#openshift_openstack_num_dns: 0 #openshift_openstack_external_nsupdate_keys: # public: # key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' @@ -121,10 +116,6 @@ openshift_openstack_subnet_prefix: "192.168.99" # key_algorithm: 'hmac-md5' # server: '192.168.1.2' -# # Customize DNS server security options -#named_public_recursion: 'no' -#named_private_recursion: 'yes' - # NOTE(shadower): Do not change this value. The Ansible user is currently # hardcoded to `openshift`. diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 47c56d94d..ad3fd936b 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -79,10 +79,19 @@ def build_inventory(): public_v4 = server.public_v4 or server.private_v4 if public_v4: - hostvars['public_v4'] = public_v4 + hostvars['public_v4'] = server.public_v4 + hostvars['openshift_public_ip'] = server.public_v4 # TODO(shadower): what about multiple networks? if server.private_v4: hostvars['private_v4'] = server.private_v4 + # NOTE(shadower): Yes, we set both hostname and IP to the private + # IP address for each node. OpenStack doesn't resolve nodes by + # name at all, so using a hostname here would require an internal + # DNS which would complicate the setup and potentially introduce + # performance issues. + hostvars['openshift_ip'] = server.private_v4 + hostvars['openshift_hostname'] = server.private_v4 + hostvars['openshift_public_hostname'] = server.name node_labels = server.metadata.get('node_labels') if node_labels: diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 582dfe794..7dd59c5d8 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,7 +1,12 @@ --- -- name: Place holder for prerequisites - hosts: localhost - gather_facts: false +- include: init/main.yml + vars: + skip_verison: True + +- hosts: "{{ l_containerized_host_groups }}" + vars: + l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" tasks: - - name: Debug placeholder - debug: msg="Prerequisites ran." + - include_role: + name: container_runtime diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml new file mode 100644 index 000000000..45135c10e --- /dev/null +++ b/playbooks/redeploy-certificates.yml @@ -0,0 +1,26 @@ +--- +- include: init/main.yml + +- include: openshift-etcd/private/redeploy-certificates.yml + +- include: openshift-master/private/redeploy-certificates.yml + +- include: openshift-node/private/redeploy-certificates.yml + +- include: openshift-etcd/private/restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" + +- include: openshift-master/private/restart.yml + +- include: openshift-node/private/restart.yml + +- include: openshift-hosted/private/redeploy-router-certificates.yml + when: openshift_hosted_manage_router | default(true) | bool + +- include: openshift-hosted/private/redeploy-registry-certificates.yml + when: openshift_hosted_manage_registry | default(true) | bool + +- include: openshift-master/private/revert-client-ca.yml + +- include: openshift-master/private/restart.yml diff --git a/playbooks/roles b/playbooks/roles new file mode 120000 index 000000000..d8c4472ca --- /dev/null +++ b/playbooks/roles @@ -0,0 +1 @@ +../roles
\ No newline at end of file |