diff options
Diffstat (limited to 'playbooks')
38 files changed, 47 insertions, 229 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 44a2ef534..69b2541bb 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -8,7 +8,7 @@    hosts: masters:!masters[0]    pre_tasks:    - set_fact: -      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"    tasks:    - include_role:        name: openshift_logging diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0c2a2c7e8..ed7a7bd1a 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -62,7 +62,6 @@      - origin-master      - origin-master-api      - origin-master-controllers -    - pcsd      failed_when: false  - hosts: etcd @@ -384,8 +383,6 @@      - origin-excluder      - origin-docker-excluder      - origin-master -    - pacemaker -    - pcs      register: result      until: result | success @@ -456,8 +453,6 @@      - /etc/sysconfig/origin-master-api      - /etc/sysconfig/origin-master-controllers      - /usr/share/openshift/examples -    - /var/lib/pacemaker -    - /var/lib/pcsd      - /usr/lib/systemd/system/atomic-openshift-master-api.service      - /usr/lib/systemd/system/atomic-openshift-master-controllers.service      - /usr/lib/systemd/system/origin-master-api.service diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml index da7ec9d21..1dabae357 100644 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -4,7 +4,7 @@  - include: ../../openshift-metrics/private/config.yml    when: openshift_metrics_install_metrics | default(false) | bool -- include: ../../common/openshift-cluster/openshift_logging.yml +- include: ../../openshift-logging/private/config.yml    when: openshift_logging_install_logging | default(false) | bool  - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index 74e186f33..76bd47c4f 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -1,9 +1,3 @@  --- -# -# This playbook is a preview of upcoming changes for installing -# Hosted logging on.  See inventory/byo/hosts.*.example for the -# currently supported method. -# -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/openshift_logging.yml +# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. +- include: ../../openshift-logging/config.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 2eeb81b86..a8ca5e686 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -23,7 +23,7 @@  - include: ../../openshift-metrics/private/config.yml    when: openshift_metrics_install_metrics | default(false) | bool -- include: openshift_logging.yml +- include: ../../openshift-logging/private/config.yml    when: openshift_logging_install_logging | default(false) | bool  - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 83f16ac0d..3b779becb 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -6,10 +6,6 @@    retries: 3    delay: 30 -- name: Update docker facts -  openshift_facts: -    role: docker -  - name: Restart containerized services    service: name={{ item }} state=started    with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 446f315d6..84b740227 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -6,7 +6,7 @@  - name: Update oreg_auth docker login credentials if necessary    include_role: -    name: docker +    name: container_runtime      tasks_from: registry_auth.yml    when: oreg_auth_user is defined diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index d7a52707c..503d75ba0 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -143,10 +143,6 @@    roles:    - { role: openshift_cli }    vars: -    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" -    # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe -    # restart. -    skip_docker_role: True      __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"    tasks:    - name: Reconcile Cluster Roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 6cb6a665f..5f9c56867 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -73,12 +73,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8f48bedcc..1aac3d014 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -77,12 +77,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index f25cfe0d0..306b76422 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -66,12 +66,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - name: Verify masters are already upgraded    hosts: oo_masters_to_config    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 2b99568c7..6d4949542 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -77,12 +77,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d3d2046e6..0a592896b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -81,12 +81,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index c0546bd2d..b381d606a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -66,12 +66,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - name: Verify masters are already upgraded    hosts: oo_masters_to_config    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index b602cdd0e..e7d7756d1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -77,12 +77,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index da81e6dea..be362e3ff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -81,12 +81,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index abd56e762..6e68116b0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -66,12 +66,6 @@      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True -  - name: Verify masters are already upgraded    hosts: oo_masters_to_config    tags: diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 1166ac538..d41f365dc 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -135,11 +135,13 @@      - openshift_http_proxy is defined or openshift_https_proxy is defined      - openshift_generate_no_proxy_hosts | default(True) | bool +  - name: Initialize openshift.node.sdn_mtu +    openshift_facts: +      role: node +      local_facts: +        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" +    - name: initialize_facts set_fact repoquery command      set_fact:        repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"        repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" - -  - name: initialize_facts set_fact on openshift_docker_hosted_registry_network -    set_fact: -      openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 1d4f41ffc..5a7483b72 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -24,6 +24,7 @@  - import_playbook: repos.yml  - import_playbook: version.yml +  when: not (skip_verison | default(False))  - name: Initialization Checkpoint End    hosts: all diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml index 75c1f0300..917b729f9 100644 --- a/playbooks/openshift-glusterfs/private/registry.yml +++ b/playbooks/openshift-glusterfs/private/registry.yml @@ -1,40 +1,11 @@  ---  - import_playbook: config.yml -- name: Initialize GlusterFS registry PV and PVC vars -  hosts: oo_first_master -  tags: hosted -  tasks: -  - set_fact: -      glusterfs_pv: [] -      glusterfs_pvc: [] - -  - set_fact: -      glusterfs_pv: -      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" -        capacity: "{{ openshift.hosted.registry.storage.volume.size }}" -        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" -        storage: -          glusterfs: -            endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" -            path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" -            readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" -      glusterfs_pvc: -      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" -        capacity: "{{ openshift.hosted.registry.storage.volume.size }}" -        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" -    when: openshift.hosted.registry.storage.glusterfs.swap -  - name: Create persistent volumes    hosts: oo_first_master -  tags: -  - hosted -  vars: -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"    roles:    - role: openshift_persistent_volumes -    when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 +    when: openshift_hosted_registry_storage_glusterfs_swap | default(False)  - name: Create Hosted Resources    hosts: oo_first_master diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml index 359132dd0..d6529425b 100644 --- a/playbooks/openshift-hosted/private/cockpit-ui.yml +++ b/playbooks/openshift-hosted/private/cockpit-ui.yml @@ -5,4 +5,4 @@    - role: cockpit-ui      when:      - openshift_hosted_manage_registry | default(true) | bool -    - not openshift.docker.hosted_registry_insecure | default(false) | bool +    - not (openshift_docker_hosted_registry_insecure | default(false)) | bool diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml index 8a60a30b8..41ae2eb69 100644 --- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml +++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml @@ -1,9 +1,5 @@  ---  - name: Create Hosted Resources - persistent volumes    hosts: oo_first_master -  vars: -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"    roles:    - role: openshift_persistent_volumes -    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml index d737b836b..78fe663db 100644 --- a/playbooks/openshift-loadbalancer/private/config.yml +++ b/playbooks/openshift-loadbalancer/private/config.yml @@ -11,14 +11,12 @@            status: "In Progress"            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" -- name: Configure firewall and docker for load balancers +- name: Configure firewall load balancers    hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config    vars:      openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"    roles:    - role: os_firewall -  - role: openshift_docker -    when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool  - name: Configure load balancers    hosts: oo_lb_to_config diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml new file mode 100644 index 000000000..8837a2d32 --- /dev/null +++ b/playbooks/openshift-logging/config.yml @@ -0,0 +1,9 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on.  See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: ../init/main.yml + +- include: private/config.yml diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/openshift-logging/private/config.yml diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/openshift-logging/private/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/openshift-logging/private/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index b7cfbe4e4..a90cd6b22 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -19,8 +19,6 @@      openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"      omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"    roles: -  - role: openshift_master_cluster -    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"    - role: openshift_project_request_template      when: openshift_project_request_template_manage    - role: openshift_examples diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 97acc5d5d..ecf8f15d9 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -183,7 +183,6 @@    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when:    - yedit_output.changed -  - openshift.master.cluster_method == 'native'  # We retry the controllers because the API may not be 100% initialized yet.  - name: restart master controllers @@ -194,7 +193,6 @@    until: result.rc == 0    when:    - yedit_output.changed -  - openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 5dbb21502..1077d0b9c 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -14,9 +14,6 @@      - role: common        local_facts:          rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" -    - role: master -      local_facts: -        cluster_method: "{{ openshift_master_cluster_method | default(None) }}"  # Creating a temp file on localhost, we then check each system that will  # be rebooted to see if that file exists, if so we know we're running diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 06f3df9fa..dc5d7a57e 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -4,7 +4,6 @@    vars:      openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"      openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" -    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']                                                      | union(groups['oo_masters_to_config'])                                                      | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index 3c3ac3646..5afa83be7 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -5,7 +5,6 @@    vars:      openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"      openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" -    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']                                                      | union(groups['oo_masters_to_config'])                                                      | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c3beb59b7..41eb00f99 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,10 +16,6 @@      retries: 3      delay: 30 -  - name: Update docker facts -    openshift_facts: -      role: docker -    - name: Restart containerized services      service:        name: "{{ item }}" diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index c762169eb..f567242cd 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -24,7 +24,7 @@ The OpenStack release must be Newton (for Red Hat OpenStack this is  version 10) or newer. It must also satisfy these requirements:  * Heat (Orchestration) must be available -* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment image (CentOS 7.4 or RHEL 7) must be loaded  * The deployment flavor must be available to your user    - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing    - look at @@ -183,9 +183,14 @@ Then run the provision + install playbook -- this will create the OpenStack  resources:  ```bash -$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml +$ ansible-playbook --user openshift -i inventory \ +  openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \ +  -e openshift_repos_enable_testing=true  ``` +Note, you may want to use the testing repo for development purposes only. +Normally, `openshift_repos_enable_testing` should not be specified. +  If you're using multiple inventories, make sure you pass the path to  the right one to `-i`. @@ -210,7 +215,6 @@ advanced configuration:  * [External Dns][external-dns]  * Multiple Clusters (TODO)  * [Cinder Registry][cinder-registry] -* [Bastion Node][bastion]  [ansible]: https://www.ansible.com/ @@ -229,4 +233,3 @@ advanced configuration:  [loadbalancer]: ./advanced-configuration.md#multi-master-configuration  [external-dns]: ./advanced-configuration.md#dns-configuration-variables  [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry -[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index c0bdf5020..f22243fbd 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -328,14 +328,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the  prerequisite packages to be installed before to deploy an OpenShift cluster.  Those are ignored though, if the `manage_packages: False`. -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. -  ## Multi-master configuration  Please refer to the official documentation for the @@ -345,7 +337,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst  in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node  under the ansible group named `ext_lb`: -    openshift_master_cluster_method: native      openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"      openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" @@ -538,43 +529,6 @@ You can also run the registry setup playbook directly: -## Configure static inventory and access via a bastion node - -Example inventory variables: - -    openshift_openstack_use_bastion: true -    openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" -    openstack_private_ssh_key: ~/.ssh/id_rsa -    openstack_inventory: static -    openstack_inventory_path: ../../../../inventory -    openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. -And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - -    ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. - -To verify nodes connectivity, use the command: - -    ansible -v -i inventory/hosts -m ping all - -If something is broken, double-check the inventory variables, paths and the -generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files. - -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. -  ## Using Docker on the Ansible host  If you don't want to worry about the dependencies, you can use the @@ -604,28 +558,6 @@ the playbooks:      ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: - -    . keystonerc -    ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. - -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: - -    ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml - -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. - -  ## Running Custom Post-Provision Actions  A custom playbook can be run like this: @@ -733,21 +665,6 @@ Once it succeeds, you can install openshift by running:  OpenShift UI may be accessed via the 1st master node FQDN, port 8443. -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - -   openshift_openstack_ui_ssh_tunnel: True - -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - -    127.0.0.1 master-0.openshift.example.com - -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. -  ## Scale Deployment up/down  ### Scaling up @@ -766,5 +683,3 @@ Usage:  ```  ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>]  ``` - -Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 90608bbc0..933117127 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,10 +1,11 @@  --- +## Openshift product versions and repos to install from  openshift_deployment_type: origin +#openshift_repos_enable_testing: true  #openshift_deployment_type: openshift-enterprise  #openshift_release: v3.5  openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_method: native  openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"  osm_default_node_selector: 'region=primary' diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 582dfe794..7dd59c5d8 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,7 +1,12 @@  --- -- name: Place holder for prerequisites -  hosts: localhost -  gather_facts: false +- include: init/main.yml +  vars: +    skip_verison: True + +- hosts: "{{ l_containerized_host_groups }}" +  vars: +    l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" +    l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"    tasks: -  - name: Debug placeholder -    debug: msg="Prerequisites ran." +    - include_role: +        name: container_runtime diff --git a/playbooks/roles b/playbooks/roles new file mode 120000 index 000000000..d8c4472ca --- /dev/null +++ b/playbooks/roles @@ -0,0 +1 @@ +../roles
\ No newline at end of file  | 
