diff options
Diffstat (limited to 'playbooks')
15 files changed, 270 insertions, 65 deletions
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index c9ae923bb..b6a2d2f26 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -27,9 +27,8 @@ gather_facts: no vars: - cli_volume_type: io1 + cli_volume_type: gp2 cli_volume_size: 30 - cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: - fail: @@ -104,7 +103,6 @@ volume_size: "{{ cli_volume_size | default(30, True)}}" volume_type: "{{ cli_volume_type }}" device_name: /dev/xvdb - iops: "{{ 30 * cli_volume_size }}" register: vol - debug: var=vol @@ -142,10 +140,3 @@ - debug: var=dockerstart - - name: Wait for docker to stabilize - pause: - seconds: 30 - - # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - - name: update zabbix docker items - command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..614b2537a --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,104 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +# To run: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device> +# +# Example: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +# Notes: +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive + hosts: "{{ cli_name }}" + user: root + connection: ssh + gather_facts: no + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_docker_device + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: "check to see if {{ cli_docker_device }} exists" + command: "test -e {{ cli_docker_device }}" + register: docker_dev_check + ignore_errors: yes + + - debug: var=docker_dev_check + + - name: "fail if {{ cli_docker_device }} doesn't exist" + fail: + msg: "{{ cli_docker_device }} doesn't exist. Please investigate" + when: docker_dev_check.rc != 0 + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: copy the docker-storage-setup config file + copy: + content: > + DEVS={{ cli_docker_device }} + VG=docker_vg + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + - name: start docker + command: systemctl start docker.service + register: dockerstart + + - debug: var=dockerstart diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index ef9b45abd..63d473146 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -172,7 +172,7 @@ - name: pvmove onto new volume command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" - async: 3600 + async: 43200 poll: 10 - name: Remove the old docker drive from the volume group diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index 026b24456..acfa89515 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -7,8 +7,8 @@ storage: cache: layerinfo: inmemory s3: - accesskey: {{ accesskey }} - secretkey: {{ secretkey }} + accesskey: {{ aws_access_key }} + secretkey: {{ aws_secret_key }} region: us-east-1 bucket: {{ clusterid }}-docker encrypt: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 30b873db3..5dc1abf17 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,7 +1,7 @@ --- # This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" +# ansible-playbook s3_registry.yml -e clusterid="mycluster" # # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. @@ -10,11 +10,22 @@ remote_user: root gather_facts: False + vars: + aws_access_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" tasks: + - name: Check for AWS creds + fail: + msg: "Couldn't find {{ item }} creds in ENV" + when: "{{ item }} == ''" + with_items: + - aws_access_key + - aws_secret_key + - name: Create S3 bucket local_action: - module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + module: s3 bucket="{{ clusterid }}-docker" mode=create - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 64cf7a65b..14ec82e85 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -221,6 +221,15 @@ - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool +- name: Enable cockpit + hosts: oo_first_master + vars: + cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" + roles: + - role: cockpit + when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and + (osm_use_cockpit | bool or osm_use_cockpit is undefined ) + # Additional instance config for online deployments - name: Additional instance config hosts: oo_masters_deployment_type_online diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index fd5dfcc72..6ca4f7395 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -10,6 +10,8 @@ - set_fact: g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" + use_sdn: "{{ do_we_use_openshift_sdn }}" + sdn_plugin: "{{ sdn_network_plugin }}" - include: ../../common/openshift-cluster/config.yml vars: @@ -18,7 +20,10 @@ g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" + openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}" + os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml new file mode 100644 index 000000000..0dfa3e9d7 --- /dev/null +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -0,0 +1,49 @@ +--- +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ node_ip }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ node_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_nodes_to_config + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_first_master + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +#- include: config.yml +- include: ../../common/openshift-node/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ ansible_default_ipv4.address }}" + openshift_use_openshift_sdn: true + openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " + os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" + osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 7a3b80da0..c22b897d5 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -34,27 +34,28 @@ count: "{{ num_infra }}" - include: tasks/launch_instances.yml vars: - instances: "{{ infra_names }}" + instances: "{{ node_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - set_fact: - a_infra: "{{ infra_names[0] }}" - - add_host: name={{ a_infra }} groups=service_master + - add_host: + name: "{{ master_names.0 }}" + groups: service_master + when: master_names is defined and master_names.0 is defined - include: update.yml - -- name: Deploy OpenShift Services - hosts: service_master - connection: ssh - gather_facts: yes - roles: - - openshift_registry - - openshift_router - -- include: ../../common/openshift-cluster/create_services.yml - vars: - g_svc_master: "{{ service_master }}" +# +#- name: Deploy OpenShift Services +# hosts: service_master +# connection: ssh +# gather_facts: yes +# roles: +# - openshift_registry +# - openshift_router +# +#- include: ../../common/openshift-cluster/create_services.yml +# vars: +# g_svc_master: "{{ service_master }}" - include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 5ba0f5a48..53b2b9a5e 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,11 +14,11 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) - name: List instance(s) hosts: oo_list_hosts gather_facts: no tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" + msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 6307ecc27..c428cb465 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -10,14 +10,33 @@ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" + zone: "{{ lookup('env', 'zone') }}" + network: "{{ lookup('env', 'network') }}" +# unsupported in 1.9.+ + #service_account_permissions: "datastore,logging-write" tags: - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} - env-{{ cluster }} - host-type-{{ type }} - - sub-host-type-{{ sub_host_type }} + - sub-host-type-{{ g_sub_host_type }} - env-host-type-{{ cluster }}-openshift-{{ type }} + when: instances |length > 0 register: gce +- set_fact: + node_label: + # There doesn't seem to be a way to get the region directly, so parse it out of the zone. + region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" + type: "{{ g_sub_host_type }}" + when: instances |length > 0 and type == "node" + +- set_fact: + node_label: + # There doesn't seem to be a way to get the region directly, so parse it out of the zone. + region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" + type: "{{ type }}" + when: instances |length > 0 and type != "node" + - name: Add new instances to groups and set variables needed add_host: hostname: "{{ item.name }}" @@ -27,16 +46,17 @@ groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data + openshift_node_labels: "{{ node_label }}" + with_items: gce.instance_data | default([], true) - name: Wait for ssh wait_for: port=22 host={{ item.public_ip }} - with_items: gce.instance_data + with_items: gce.instance_data | default([], true) - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" register: result until: result.rc == 0 - retries: 20 - delay: 10 - with_items: gce.instance_data + retries: 30 + delay: 5 + with_items: gce.instance_data | default([], true) diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 098b0df73..e20e0a8bc 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,25 +1,18 @@ --- - name: Terminate instance(s) hosts: localhost + connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node + - set_fact: scratch_group=tag_env-{{ cluster_id }} - add_host: name: "{{ item }}" - groups: oo_hosts_to_terminate, oo_nodes_to_terminate + groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate, oo_masters_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -32,14 +25,34 @@ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] -- include: ../openshift-node/terminate.yml - vars: - gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +- name: Terminate instances(s) + hosts: localhost + connection: local + gather_facts: no + vars_files: + - vars.yml + tasks: + + - name: Terminate instances that were previously launched + local_action: + module: gce + state: 'absent' + name: "{{ item }}" + service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + project_id: "{{ lookup('env', 'gce_project_id') }}" + zone: "{{ lookup('env', 'zone') }}" + with_items: groups['oo_hosts_to_terminate'] | default([], true) + when: item is defined -- include: ../openshift-master/terminate.yml - vars: - gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +#- include: ../openshift-node/terminate.yml +# vars: +# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +# +#- include: ../openshift-master/terminate.yml +# vars: +# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index ae33083b9..6de007807 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1,8 +1,11 @@ --- +do_we_use_openshift_sdn: true +sdn_network_plugin: redhat/openshift-ovs-subnet +# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation deployment_vars: origin: - image: centos-7 - ssh_user: + image: preinstalled-slave-50g-v5 + ssh_user: root sudo: yes online: image: libra-rhel7 @@ -12,4 +15,3 @@ deployment_vars: image: rhel-7 ssh_user: sudo: yes - diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 2a0c90b46..4b91c6da8 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -64,7 +64,7 @@ register: nb_allocated_ips until: nb_allocated_ips.stdout == '{{ instances | length }}' retries: 60 - delay: 1 + delay: 3 when: instances | length != 0 - name: Collect IP addresses of the VMs diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index 77b788109..eacae7c7e 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -19,5 +19,5 @@ system_info: ssh_authorized_keys: - {{ lookup('file', '~/.ssh/id_rsa.pub') }} -bootcmd: +runcmd: - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart |