diff options
Diffstat (limited to 'playbooks')
24 files changed, 127 insertions, 111 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml deleted file mode 100644 index faeb332ad..000000000 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- hosts: masters[0] - roles: - - role: openshift_logging - openshift_hosted_logging_cleanup: no - -- name: Update master-config for publicLoggingURL - hosts: masters:!masters[0] - pre_tasks: - - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" - tasks: - - import_role: - name: openshift_logging - tasks_from: update_master_config - when: openshift_hosted_logging_deploy | default(false) | bool diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index bdc98d1e0..cf811ca84 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning. These will incl ## Uninstall / Deprovisioning -At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step. - -To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. +To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. ``` ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml @@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi This should result in removal of the security groups and VPC that were created. +Cleaning up the S3 bucket contents can be accomplished with: + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml +``` + NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file. diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml new file mode 100644 index 000000000..448b47aee --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml @@ -0,0 +1,10 @@ +--- +- name: Empty/delete s3 bucket + hosts: localhost + connection: local + tasks: + - name: empty/delete s3 bucket + include_role: + name: openshift_aws + tasks_from: uninstall_s3.yml + when: openshift_aws_create_s3 | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml index 23a3fcbb5..23a3fcbb5 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index de612da21..f44ab3580 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -113,6 +113,22 @@ registry_url: "{{ openshift.master.registry_url }}" openshift_hosted_templates_import_command: replace + post_tasks: + # we need to migrate customers to the new pattern of pushing to the registry via dns + # Step 1: verify the certificates have the docker registry service name + - shell: > + echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' + register: cert_output + + # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs + - name: set a fact to include the registry certs playbook if needed + set_fact: + openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" + +# Run the redeploy certs based upon the certificates +- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry + import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml + # Check for warnings to be printed at the end of the upgrade: - name: Clean up and display warnings hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index 2b27f8dd0..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -5,8 +5,6 @@ # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml - - name: Update repos on upgrade hosts hosts: "{{ l_upgrade_repo_hosts }}" roles: @@ -53,6 +51,8 @@ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml +- import_playbook: verify_cluster.yml + # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded hosts: oo_masters_to_config @@ -60,7 +60,7 @@ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." when: - l_upgrade_nodes_only | default(False) | bool - - openshift.common.version != openshift_version + - not openshift.common.version | match(openshift_version) # If we're only upgrading nodes, skip this. - import_playbook: ../../../../openshift-master/private/validate_restart.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 5ee8a9d78..463a05688 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -17,6 +17,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') - fail: @@ -25,6 +26,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index a10fd4bee..9b5ba3482 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -56,7 +56,6 @@ register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -72,8 +71,6 @@ # support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 roles: - openshift_facts @@ -96,6 +93,12 @@ - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined + - name: Disable master controller + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: false + when: openshift.common.rolling_restart_mode == 'system' + - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' @@ -118,7 +121,6 @@ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true @@ -254,7 +256,6 @@ register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index bf6e8605e..ec1da6d39 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -2,54 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- import_playbook: ../init.yml +- import_playbook: upgrade_control_plane.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: '3.9' - openshift_upgrade_min: '3.7' - openshift_release: '3.9' - -- import_playbook: ../pre/config.yml - vars: - l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" - l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" - openshift_protect_installed_version: False - -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - set_fact: - pre_upgrade_complete: True - -# Pre-upgrade completed - -- import_playbook: ../upgrade_control_plane.yml - -# All controllers must be stopped at the same time then restarted -- name: Cycle all controller services to force new leader election mode - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - tasks: - - name: Stop {{ openshift_service_type }}-master-controllers - systemd: - name: "{{ openshift_service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift_service_type }}-master-controllers - systemd: - name: "{{ openshift_service_type }}-master-controllers" - state: started - -- import_playbook: ../upgrade_nodes.yml - -- import_playbook: ../post_control_plane.yml +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index fe1fdefff..8792295c6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -26,6 +26,7 @@ openshift_upgrade_min: '3.7' openshift_release: '3.8' _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + openshift_pkg_version: '' _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" l_double_upgrade_cp: True when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') @@ -61,9 +62,8 @@ # Pre-upgrade completed -- import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.8' +- name: Intermediate 3.8 Upgrade + import_playbook: ../upgrade_control_plane.yml when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') ## 3.8 upgrade complete we should now be able to upgrade to 3.9 @@ -76,7 +76,7 @@ openshift_upgrade_target: '3.9' openshift_upgrade_min: '3.8' openshift_release: '3.9' - openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" + openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}" # Set the user's specified image_tag for 3.9 upgrade if it was provided. - set_fact: openshift_image_tag: "{{ _requested_image_tag }}" @@ -105,6 +105,7 @@ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" openshift_protect_installed_version: False + openshift_version_reinit: True - name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config @@ -113,8 +114,6 @@ pre_upgrade_complete: True - import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.9' # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode @@ -123,14 +122,16 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift_service_type }}-master-controllers - systemd: + - name: Restart master controllers to force new leader election mode + service: name: "{{ openshift_service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift_service_type }}-master-controllers - systemd: + state: restart + when: openshift.common.rolling_restart_mode == 'service' + - name: Re-enable master controllers to force new leader election mode + service: name: "{{ openshift_service_type }}-master-controllers" - state: started + enabled: true + when: openshift.common.rolling_restart_mode == 'system' - import_playbook: ../post_control_plane.yml diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml index 0a730a88a..81f4dd183 100644 --- a/playbooks/init/base_packages.yml +++ b/playbooks/init/base_packages.yml @@ -16,8 +16,9 @@ - iproute - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}" + - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}" - yum-utils + when: item != '' register: result until: result is succeeded diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml index 06a4e7291..a9bf06693 100644 --- a/playbooks/init/basic_facts.yml +++ b/playbooks/init/basic_facts.yml @@ -67,3 +67,11 @@ first_master_client_binary: "{{ openshift_client_binary }}" #Some roles may require this to be set for first master openshift_client_binary: "{{ openshift_client_binary }}" + +- name: Disable web console if required + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + openshift_web_console_install: False + when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features ) diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml new file mode 100644 index 000000000..2453329dd --- /dev/null +++ b/playbooks/openshift-hosted/deploy_registry.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/openshift_hosted_registry.yml diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml new file mode 100644 index 000000000..e832eeeea --- /dev/null +++ b/playbooks/openshift-hosted/deploy_router.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/openshift_hosted_router.yml diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml index 62fe0dd60..c59ebcead 100644 --- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml +++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml @@ -3,4 +3,6 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere'] diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index d6b26647c..07aa8bfde 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -24,6 +24,7 @@ - import_role: name: openshift_logging tasks_from: update_master_config + when: not openshift.common.version_gte_3_9 - name: Logging Install Checkpoint End hosts: all diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index 85be0e600..ca514ed26 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -16,7 +16,6 @@ vars: cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - role: openshift_project_request_template diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 153ea9993..d2fc2eed8 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -78,7 +78,6 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - name: Inspect state of first master config settings @@ -166,7 +165,6 @@ hosts: oo_masters_to_config any_errors_fatal: true vars: - openshift_master_ha: "{{ openshift.master.ha }}" openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" @@ -186,6 +184,7 @@ - role: openshift_buildoverrides - role: nickhammond.logrotate - role: openshift_master + openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}" openshift_master_hosts: "{{ groups.oo_masters_to_config }}" r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml index 5cb284935..17d90533c 100644 --- a/playbooks/openshift-master/private/restart.yml +++ b/playbooks/openshift-master/private/restart.yml @@ -3,16 +3,13 @@ - name: Restart masters hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - import_tasks: ../../../roles/openshift_master/handlers/main.yml roles: - openshift_facts post_tasks: - include_tasks: tasks/restart_hosts.yml when: openshift_rolling_restart_mode | default('services') == 'system' - - - include_tasks: tasks/restart_services.yml + - import_role: + name: openshift_master + tasks_from: restart.yml when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml index 007b23ea3..20ebf70d3 100644 --- a/playbooks/openshift-master/private/scaleup.yml +++ b/playbooks/openshift-master/private/scaleup.yml @@ -8,7 +8,6 @@ - openshift_facts: role: master local_facts: - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - name: Update master count modify_yaml: diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml index 1e237e3f0..889ea77b1 100644 --- a/playbooks/openshift-metrics/private/config.yml +++ b/playbooks/openshift-metrics/private/config.yml @@ -25,6 +25,7 @@ import_role: name: openshift_metrics tasks_from: update_master_config.yaml + when: not openshift.common.version_gte_3_9 - name: Metrics Install Checkpoint End hosts: all diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index 7249ced70..7371bd7ac 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,6 +16,7 @@ until: not (l_docker_restart_docker_in_node_result is failed) retries: 3 delay: 30 + when: openshift_node_restart_docker_required | default(True) - name: Restart containerized services service: diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml index 8b7272485..cdf816fbf 100644 --- a/playbooks/openshift-node/redeploy-certificates.yml +++ b/playbooks/openshift-node/redeploy-certificates.yml @@ -4,3 +4,5 @@ - import_playbook: private/redeploy-certificates.yml - import_playbook: private/restart.yml + vars: + openshift_node_restart_docker_required: False diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py index 76e658eb7..d5a8c3e24 100755 --- a/playbooks/openstack/inventory.py +++ b/playbooks/openstack/inventory.py @@ -15,18 +15,10 @@ import json import shade -def build_inventory(): - '''Build the dynamic inventory.''' - cloud = shade.openstack_cloud() - +def base_openshift_inventory(cluster_hosts): + '''Set the base openshift inventory.''' inventory = {} - # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` - # environment variable. - cluster_hosts = [ - server for server in cloud.list_servers() - if 'metadata' in server and 'clusterid' in server.metadata] - masters = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'master'] @@ -67,6 +59,34 @@ def build_inventory(): inventory['dns'] = {'hosts': dns} inventory['lb'] = {'hosts': load_balancers} + return inventory + + +def get_docker_storage_mountpoints(volumes): + '''Check volumes to see if they're being used for docker storage''' + docker_storage_mountpoints = {} + for volume in volumes: + if volume.metadata.get('purpose') == "openshift_docker_storage": + for attachment in volume.attachments: + if attachment.server_id in docker_storage_mountpoints: + docker_storage_mountpoints[attachment.server_id].append(attachment.device) + else: + docker_storage_mountpoints[attachment.server_id] = [attachment.device] + return docker_storage_mountpoints + + +def build_inventory(): + '''Build the dynamic inventory.''' + cloud = shade.openstack_cloud() + + # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` + # environment variable. + cluster_hosts = [ + server for server in cloud.list_servers() + if 'metadata' in server and 'clusterid' in server.metadata] + + inventory = base_openshift_inventory(cluster_hosts) + for server in cluster_hosts: if 'group' in server.metadata: group = server.metadata.group @@ -76,6 +96,9 @@ def build_inventory(): inventory['_meta'] = {'hostvars': {}} + # cinder volumes used for docker storage + docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes()) + for server in cluster_hosts: ssh_ip_address = server.public_v4 or server.private_v4 hostvars = { @@ -111,6 +134,11 @@ def build_inventory(): if node_labels: hostvars['openshift_node_labels'] = node_labels + # check for attached docker storage volumes + if 'os-extended-volumes:volumes_attached' in server: + if server.id in docker_storage_mountpoints: + hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id]) + inventory['_meta']['hostvars'][server.name] = hostvars return inventory |