diff options
Diffstat (limited to 'playbooks')
27 files changed, 366 insertions, 904 deletions
diff --git a/playbooks/adhoc/metrics_setup/README.md b/playbooks/adhoc/metrics_setup/README.md deleted file mode 100644 index 71aa1e109..000000000 --- a/playbooks/adhoc/metrics_setup/README.md +++ /dev/null @@ -1,25 +0,0 @@ -## Playbook for adding [Metrics](https://github.com/openshift/origin-metrics) to Openshift - -See OSE Ansible [readme](https://github.com/openshift/openshift-ansible/blob/master/README_OSE.md) for general install instructions. Playbook has been tested on OSE 3.1/RHEL7.2 cluster - - -Add the following vars to `[OSEv3:vars]` section of your inventory file -``` -[OSEv3:vars] -# Enable cluster metrics -use_cluster_metrics=true -metrics_external_service=< external service name for metrics > -metrics_image_prefix=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ -metrics_image_version=3.1.0 -``` - -Run playbook -``` -ansible-playbook -i $INVENTORY_FILE playbooks/install.yml -``` - -## Contact -Email: hawkular-dev@lists.jboss.org - -## Credits -Playbook adapted from install shell scripts by Matt Mahoney diff --git a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml deleted file mode 100644 index f70e0b18b..000000000 --- a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "List" -metadata: - name: metrics-deployer-setup - annotations: - description: "Required dependencies for the metrics deployer pod." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -items: -- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: metrics-deployer - secrets: - - name: metrics-deployer diff --git a/playbooks/adhoc/metrics_setup/files/metrics.yaml b/playbooks/adhoc/metrics_setup/files/metrics.yaml deleted file mode 100644 index d823b2587..000000000 --- a/playbooks/adhoc/metrics_setup/files/metrics.yaml +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: REDEPLOY - value: ${REDEPLOY} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "hawkular/" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "0.7.0-SNAPSHOT" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "If set to true the deployer will try and delete all the existing components before trying to redeploy." - name: REDEPLOY - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "1Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml deleted file mode 100644 index a9ec3c1ef..000000000 --- a/playbooks/adhoc/metrics_setup/playbooks/install.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- include: master_config_facts.yml -- name: "Install metrics" - hosts: masters - vars: - metrics_public_url: "https://{{ metrics_external_service }}/hawkular/metrics" - tasks: - - name: "Add metrics url to master config" - lineinfile: "state=present dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL' insertbefore='^\ \ publicURL' line='\ \ metricsPublicURL: {{ metrics_public_url }}'" - - - name: "Restart master service" - service: name=atomic-openshift-master state=restarted - - - name: "Copy metrics-deployer yaml to remote" - copy: "src=../files/metrics-deployer-setup.yaml dest=/tmp/metrics-deployer-setup.yaml force=yes" - - - name: "Add metrics-deployer" - command: "{{item}}" - run_once: true - register: output - failed_when: ('already exists' not in output.stderr) and (output.rc != 0) - with_items: - - oc project openshift-infra - - oc create -f /tmp/metrics-deployer-setup.yaml - - - name: "Give metrics-deployer SA permissions" - command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer" - run_once: true - - - name: "Give heapster SA permissions" - command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster" - run_once: true - - - name: "Create metrics-deployer secret" - command: "oc secrets new metrics-deployer nothing=/dev/null" - register: output - failed_when: ('already exists' not in output.stderr) and (output.rc != 0) - run_once: true - - - name: "Copy metrics.yaml to remote" - copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes" - - - name: "Process yml template" - shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -" - run_once: true
\ No newline at end of file diff --git a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml deleted file mode 100644 index 65de11bc4..000000000 --- a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: "Load master config" - hosts: masters - vars: - master_config_file: "/tmp/ansible-metrics-{{ ansible_hostname }}" - tasks: - - name: "Fetch master config from remote" - fetch: "src=/etc/origin/master/master-config.yaml dest={{ master_config_file }} flat=yes" - - name: "Load config" - include_vars: "{{ master_config_file }}" diff --git a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml deleted file mode 100644 index 06c4586ee..000000000 --- a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: "Uninstall metrics" - hosts: masters - tasks: - - name: "Remove metrics url from master config" - lineinfile: "state=absent dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL'" - - - name: "Delete metrics objects" - command: "{{item}}" - with_items: - - oc delete all --selector=metrics-infra - # - oc delete secrets --selector=metrics-infra - # - oc delete sa --selector=metrics-infra - - oc delete templates --selector=metrics-infra - - oc delete sa metrics-deployer - - oc delete secret metrics-deployer diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 789f66b14..4ea639cbe 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -338,6 +338,7 @@ failed_when: False with_items: - etcd + - etcd3 - firewalld - name: Stop additional atomic services @@ -352,6 +353,7 @@ when: not is_atomic | bool with_items: - etcd + - etcd3 - shell: systemctl reset-failed changed_when: False @@ -365,6 +367,7 @@ - /etc/ansible/facts.d/openshift.fact - /etc/etcd - /etc/systemd/system/etcd_container.service + - /etc/profile.d/etcdctl.sh # Intenationally using rm command over file module because if someone had mounted a filesystem # at /var/lib/etcd then the contents was not removed correctly diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 4934ae6d0..ed8aac398 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -16,11 +16,8 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}" + oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}" with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - -- name: List Hosts - hosts: oo_list_hosts - gather_facts: no - tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}" + msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml new file mode 100644 index 000000000..c25f96212 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -0,0 +1,26 @@ +--- +- include: ../../../common/openshift-cluster/verify_ansible_version.yml + +- name: Create initial host groups for localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tags: + - always + tasks: + - include_vars: ../cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: "{{ g_all_hosts | default([]) }}" + +- name: Create initial host groups for all hosts + hosts: l_oo_all_hosts + gather_facts: no + tags: + - always + tasks: + - include_vars: ../cluster_hosts.yml + +- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 7112a6084..2f384ddea 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,5 +1,21 @@ --- # NOTE: requires openshift_facts be run +- hosts: l_oo_all_hosts + gather_facts: no + tasks: + # See: + # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 + # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 + # https://github.com/openshift/openshift-ansible/issues/1138 + - name: Check for bad combinations of yum and subscription-manager + command: > + {{ repoquery_cmd }} --installed --qf '%{version}' "yum" + register: yum_ver_test + changed_when: false + - fail: + msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. + when: "'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" + - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml new file mode 100644 index 000000000..57b156b1c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -0,0 +1,73 @@ +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=present" + when: not openshift.common.is_atomic | bool + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml new file mode 100644 index 000000000..35f391f8c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml @@ -0,0 +1,47 @@ +--- +- name: Verify cluster is healthy pre-upgrade + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + +- name: Get current image + shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}' + register: current_image + +- name: Set new_etcd_image + set_fact: + new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=') + else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}" + +- name: Pull new etcd image + command: "docker pull {{ new_etcd_image }}" + +- name: Update to latest etcd image + replace: + dest: /etc/systemd/system/etcd_container.service + regexp: "{{ current_image.stdout }}$" + replace: "{{ new_etcd_image }}" + +- name: Restart etcd_container + systemd: + name: etcd_container + daemon_reload: yes + state: restarted + +## TODO: probably should just move this into the backup playbooks, also this +## will fail on atomic host. We need to revisit how to do etcd backups there as +## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) +- name: Upgrade etcd for etcdctl when not atomic + action: "{{ ansible_pkg_mgr }} name=etcd ensure=latest" + when: not openshift.common.is_atomic | bool + +- name: Verify cluster is healthy + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + register: etcdctl + until: etcdctl.rc == 0 + retries: 3 + delay: 10 + +- name: Store new etcd_image + openshift_facts: + role: etcd + local_facts: + etcd_image: "{{ new_etcd_image }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml new file mode 100644 index 000000000..30232110e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml @@ -0,0 +1,23 @@ +--- +# F23 GA'd with etcd 2.0, currently has 2.2 in updates +# F24 GA'd with etcd-2.2, currently has 2.2 in updates +# F25 Beta currently has etcd 3.0 +- name: Verify cluster is healthy pre-upgrade + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + +- name: Update etcd + package: + name: "etcd" + state: "latest" + +- name: Restart etcd + service: + name: etcd + state: restarted + +- name: Verify cluster is healthy + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + register: etcdctl + until: etcdctl.rc == 0 + retries: 3 + delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh new file mode 120000 index 000000000..641e04e44 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh @@ -0,0 +1 @@ +../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml new file mode 100644 index 000000000..cce844403 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -0,0 +1,122 @@ +--- +# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to +# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius +# task for RHEL and CENTOS it's simply not possible in Fedora unless you've +# mirrored packages on your own because only the GA and latest versions are +# available in the repos. So for Fedora we'll simply skip this, sorry. + +- include: ../../evaluate_groups.yml + tags: + - always + +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_upgrade + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_upgrade, etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + +- name: Backup etcd before upgrading anything + include: backup.yml + vars: + backup_tag: "pre-upgrade-" + +- name: Drop etcdctl profiles + hosts: etcd_hosts_to_upgrade + tasks: + - include: roles/etcd/tasks/etcdctl.yml + +- name: Determine etcd version + hosts: etcd_hosts_to_upgrade + tasks: + - name: Record RPM based etcd version + command: rpm -qa --qf '%{version}' etcd\* + register: etcd_installed_version + failed_when: false + when: not openshift.common.is_containerized | bool + - name: Record containerized etcd version + command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* + register: etcd_installed_version + failed_when: false + when: openshift.common.is_containerized | bool + +# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop +# through hosts, then loop through tasks only when appropriate +- name: Upgrade to 2.1 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '2.1' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade RPM hosts to 2.2 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '2.2' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade containerized hosts to 2.2.5 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: 2.2.5 + tasks: + - include: containerized_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool + +- name: Upgrade RPM hosts to 2.3 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '2.3' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade containerized hosts to 2.3.7 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: 2.3.7 + tasks: + - include: containerized_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool + +- name: Upgrade RPM hosts to 3.0 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '3.0' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade containerized hosts to etcd3 image + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: 3.0.3 + tasks: + - include: containerized_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool + +- name: Upgrade fedora to latest + hosts: etcd_hosts_to_upgrade + serial: 1 + tasks: + - include: fedora_tasks.yml + when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool + +- name: Backup etcd + include: backup.yml + vars: + backup_tag: "post-3.0-" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml new file mode 100644 index 000000000..8e7dc9d9b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml @@ -0,0 +1,23 @@ +--- +- name: Verify cluster is healthy pre-upgrade + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + +- name: Update etcd package but exclude etcd3 + command: "{{ ansible_pkg_mgr }} install -y etcd-{{ upgrade_version }}\\* --exclude etcd3" + when: upgrade_version | version_compare('3.0','<') + +- name: Update etcd package not excluding etcd3 + command: "{{ ansible_pkg_mgr }} install -y etcd3-{{ upgrade_version }}\\*" + when: not upgrade_version | version_compare('3.0','<') + +- name: Restart etcd + service: + name: etcd + state: restarted + +- name: Verify cluster is healthy + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + register: etcdctl + until: etcdctl.rc == 0 + retries: 3 + delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf new file mode 120000 index 000000000..514526fe2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openvswitch-avoid-oom.conf
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 927d9b4ca..57c25aa41 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -13,80 +13,22 @@ groups: etcd_hosts_to_backup with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" -- name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" +# If facts cache were for some reason deleted, this fact may not be set, and if not set +# it will always default to true. This causes problems for the etcd data dir fact detection +# so we must first make sure this is set correctly before attempting the backup. +- name: Set master embedded_etcd fact + hosts: oo_masters_to_config roles: - openshift_facts tasks: - # Ensure we persist the etcd role for this host in openshift_facts - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - - name: Install etcd (for etcdctl) - action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - when: not openshift.common.is_atomic | bool - - - name: Generate etcd backup - command: > - etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" - + role: master + local_facts: + embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}" -- name: Gate on etcd backup - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" - when: etcd_backup_failed | length > 0 +- name: Backup etcd + include: ./etcd/backup.yml - name: Upgrade master packages hosts: oo_masters_to_config @@ -215,6 +157,12 @@ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool run_once: true + - name: Reconcile Jenkins Pipeline Role Bindings + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm + run_once: true + when: openshift.common.version_gte_3_4_or_1_4 | bool + - name: Reconcile Security Context Constraints command: > {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index a53c55c14..5fcb850a2 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -53,7 +53,7 @@ when: openshift_hosted_metrics_deployer_prefix is not defined - set_fact: openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}" - when: openshift_hosted_metrics_deployer_prefix is not defined + when: openshift_hosted_metrics_deployer_version is not defined roles: - openshift_facts post_tasks: diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py deleted file mode 100644 index fcaa3b850..000000000 --- a/playbooks/gce/openshift-cluster/library/gce.py +++ /dev/null @@ -1,543 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -DOCUMENTATION = ''' ---- -module: gce -version_added: "1.4" -short_description: create or terminate GCE instances -description: - - Creates or terminates Google Compute Engine (GCE) instances. See - U(https://cloud.google.com/products/compute-engine) for an overview. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - image: - description: - - image string to use for the instance - required: false - default: "debian-7" - instance_names: - description: - - a comma-separated list of instance names to create or destroy - required: false - default: null - machine_type: - description: - - machine type to use for the instance, use 'n1-standard-1' by default - required: false - default: "n1-standard-1" - metadata: - description: - - a hash/dictionary of custom data for the instance; - '{"key":"value", ...}' - required: false - default: null - service_account_email: - version_added: "1.5.1" - description: - - service account email - required: false - default: null - service_account_permissions: - version_added: "2.0" - description: - - service account permissions (see - U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), - --scopes section for detailed information) - required: false - default: null - choices: [ - "bigquery", "cloud-platform", "compute-ro", "compute-rw", - "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", - "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", - "storage-rw", "taskqueue", "userinfo-email" - ] - pem_file: - version_added: "1.5.1" - description: - - path to the pem file associated with the service account email - required: false - default: null - project_id: - version_added: "1.5.1" - description: - - your GCE project ID - required: false - default: null - name: - description: - - identifier when working with a single instance - required: false - network: - description: - - name of the network, 'default' will be used if not specified - required: false - default: "default" - persistent_boot_disk: - description: - - if set, create the instance with a persistent boot disk - required: false - default: "false" - disks: - description: - - a list of persistent disks to attach to the instance; a string value - gives the name of the disk; alternatively, a dictionary value can - define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry - will be the boot disk (which must be READ_WRITE). - required: false - default: null - version_added: "1.7" - state: - description: - - desired state of the resource - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - tags: - description: - - a comma-separated list of tags to associate with the instance - required: false - default: null - zone: - description: - - the GCE zone to use - required: true - default: "us-central1-a" - ip_forward: - version_added: "1.9" - description: - - set to true if the instance can forward ip packets (useful for - gateways) - required: false - default: "false" - external_ip: - version_added: "1.9" - description: - - type of external ip, ephemeral by default - required: false - default: "ephemeral" - disk_auto_delete: - version_added: "1.9" - description: - - if set boot disk will be removed after instance destruction - required: false - default: "true" - -requirements: - - "python >= 2.6" - - "apache-libcloud >= 0.13.3" -notes: - - Either I(name) or I(instance_names) is required. -author: "Eric Johnson (@erjohnso) <erjohnso@google.com>" -''' - -EXAMPLES = ''' -# Basic provisioning example. Create a single Debian 7 instance in the -# us-central1-a Zone of n1-standard-1 machine type. -- local_action: - module: gce - name: test-instance - zone: us-central1-a - machine_type: n1-standard-1 - image: debian-7 - -# Example using defaults and with metadata to create a single 'foo' instance -- local_action: - module: gce - name: foo - metadata: '{"db":"postgres", "group":"qa", "id":500}' - - -# Launch instances from a control node, runs some tasks on the new instances, -# and then terminate them -- name: Create a sandbox instance - hosts: localhost - vars: - names: foo,bar - machine_type: n1-standard-1 - image: debian-6 - zone: us-central1-a - service_account_email: unique-email@developer.gserviceaccount.com - pem_file: /path/to/pem_file - project_id: project-id - tasks: - - name: Launch instances - local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} - service_account_email={{ service_account_email }} - pem_file={{ pem_file }} project_id={{ project_id }} - register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{item.public_ip}} port=22 delay=10 - timeout=60 state=started - with_items: {{gce.instance_data}} - -- name: Configure instance(s) - hosts: launched - sudo: True - roles: - - my_awesome_role - - my_awesome_tasks - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - instance_names: {{gce.instance_names}} - -''' - -try: - import libcloud - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceInUseError, ResourceNotFoundError - _ = Provider.GCE - HAS_LIBCLOUD = True -except ImportError: - HAS_LIBCLOUD = False - -try: - from ast import literal_eval - HAS_PYTHON26 = True -except ImportError: - HAS_PYTHON26 = False - - -def get_instance_info(inst): - """Retrieves instance information from an instance object and returns it - as a dictionary. - - """ - metadata = {} - if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: - for md in inst.extra['metadata']['items']: - metadata[md['key']] = md['value'] - - try: - netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] - except: - netname = None - if 'disks' in inst.extra: - disk_names = [disk_info['source'].split('/')[-1] - for disk_info - in sorted(inst.extra['disks'], - key=lambda disk_info: disk_info['index'])] - else: - disk_names = [] - - if len(inst.public_ips) == 0: - public_ip = None - else: - public_ip = inst.public_ips[0] - - return({ - 'image': inst.image is not None and inst.image.split('/')[-1] or None, - 'disks': disk_names, - 'machine_type': inst.size, - 'metadata': metadata, - 'name': inst.name, - 'network': netname, - 'private_ip': inst.private_ips[0], - 'public_ip': public_ip, - 'status': ('status' in inst.extra) and inst.extra['status'] or None, - 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], - 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) - - -def create_instances(module, gce, instance_names): - """Creates new instances. Attributes other than instance_names are picked - up from 'module' - - module : AnsibleModule object - gce: authenticated GCE libcloud driver - instance_names: python list of instance names to create - - Returns: - A list of dictionaries with instance information - about the instances that were launched. - - """ - image = module.params.get('image') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - disks = module.params.get('disks') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - ip_forward = module.params.get('ip_forward') - external_ip = module.params.get('external_ip') - disk_auto_delete = module.params.get('disk_auto_delete') - service_account_permissions = module.params.get('service_account_permissions') - service_account_email = module.params.get('service_account_email') - - if external_ip == "none": - external_ip = None - - new_instances = [] - changed = False - - lc_image = gce.ex_get_image(image) - lc_disks = [] - disk_modes = [] - for i, disk in enumerate(disks or []): - if isinstance(disk, dict): - lc_disks.append(gce.ex_get_volume(disk['name'])) - disk_modes.append(disk['mode']) - else: - lc_disks.append(gce.ex_get_volume(disk)) - # boot disk is implicitly READ_WRITE - disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') - lc_network = gce.ex_get_network(network) - lc_machine_type = gce.ex_get_size(machine_type) - lc_zone = gce.ex_get_zone(zone) - - # Try to convert the user's metadata value into the format expected - # by GCE. First try to ensure user has proper quoting of a - # dictionary-like syntax using 'literal_eval', then convert the python - # dict into a python list of 'key' / 'value' dicts. Should end up - # with: - # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] - if metadata: - if isinstance(metadata, dict): - md = metadata - else: - try: - md = literal_eval(str(metadata)) - if not isinstance(md, dict): - raise ValueError('metadata must be a dict') - except ValueError as e: - module.fail_json(msg='bad metadata: %s' % str(e)) - except SyntaxError as e: - module.fail_json(msg='bad metadata syntax') - - if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': - items = [] - for k, v in md.items(): - items.append({"key": k, "value": v}) - metadata = {'items': items} - else: - metadata = md - - ex_sa_perms = [] - bad_perms = [] - if service_account_permissions: - for perm in service_account_permissions: - if perm not in gce.SA_SCOPES_MAP.keys(): - bad_perms.append(perm) - if len(bad_perms) > 0: - module.fail_json(msg='bad permissions: %s' % str(bad_perms)) - if service_account_email: - ex_sa_perms.append({'email': service_account_email}) - else: - ex_sa_perms.append({'email': "default"}) - ex_sa_perms[0]['scopes'] = service_account_permissions - - # These variables all have default values but check just in case - if not lc_image or not lc_network or not lc_machine_type or not lc_zone: - module.fail_json(msg='Missing required create instance variable', - changed=False) - - for name in instance_names: - pd = None - if lc_disks: - pd = lc_disks[0] - elif persistent_boot_disk: - try: - pd = gce.create_volume(None, "%s" % name, image=lc_image) - except ResourceExistsError: - pd = gce.ex_get_volume("%s" % name, lc_zone) - inst = None - try: - inst = gce.create_node( - name, lc_machine_type, lc_image, location=lc_zone, - ex_network=network, ex_tags=tags, ex_metadata=metadata, - ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, - ex_service_accounts=ex_sa_perms - ) - changed = True - except ResourceExistsError: - inst = gce.ex_get_node(name, lc_zone) - except GoogleBaseError as e: - module.fail_json(msg='Unexpected error attempting to create ' + - 'instance %s, error: %s' % (name, e.value)) - - for i, lc_disk in enumerate(lc_disks): - # Check whether the disk is already attached - if (len(inst.extra['disks']) > i): - attached_disk = inst.extra['disks'][i] - if attached_disk['source'] != lc_disk.extra['selfLink']: - module.fail_json( - msg=("Disk at index %d does not match: requested=%s found=%s" % ( - i, lc_disk.extra['selfLink'], attached_disk['source']))) - elif attached_disk['mode'] != disk_modes[i]: - module.fail_json( - msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( - i, disk_modes[i], attached_disk['mode']))) - else: - continue - gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) - # Work around libcloud bug: attached volumes don't get added - # to the instance metadata. get_instance_info() only cares about - # source and index. - if len(inst.extra['disks']) != i+1: - inst.extra['disks'].append( - {'source': lc_disk.extra['selfLink'], 'index': i}) - - if inst: - new_instances.append(inst) - - instance_names = [] - instance_json_data = [] - for inst in new_instances: - d = get_instance_info(inst) - instance_names.append(d['name']) - instance_json_data.append(d) - - return (changed, instance_json_data, instance_names) - - -def terminate_instances(module, gce, instance_names, zone_name): - """Terminates a list of instances. - - module: Ansible module object - gce: authenticated GCE connection object - instance_names: a list of instance names to terminate - zone_name: the zone where the instances reside prior to termination - - Returns a dictionary of instance names that were terminated. - - """ - changed = False - terminated_instance_names = [] - for name in instance_names: - inst = None - try: - inst = gce.ex_get_node(name, zone_name) - except ResourceNotFoundError: - pass - except Exception as e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if inst: - gce.destroy_node(inst) - terminated_instance_names.append(inst.name) - changed = True - - return (changed, terminated_instance_names) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - image=dict(default='debian-7'), - instance_names=dict(), - machine_type=dict(default='n1-standard-1'), - metadata=dict(), - name=dict(), - network=dict(default='default'), - persistent_boot_disk=dict(type='bool', default=False), - disks=dict(type='list'), - state=dict(choices=['active', 'present', 'absent', 'deleted'], - default='present'), - tags=dict(type='list'), - zone=dict(default='us-central1-a'), - service_account_email=dict(), - service_account_permissions=dict(type='list'), - pem_file=dict(), - project_id=dict(), - ip_forward=dict(type='bool', default=False), - external_ip=dict(choices=['ephemeral', 'none'], - default='ephemeral'), - disk_auto_delete=dict(type='bool', default=True), - ) - ) - - if not HAS_PYTHON26: - module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+") - if not HAS_LIBCLOUD: - module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module') - - gce = gce_connect(module) - - image = module.params.get('image') - instance_names = module.params.get('instance_names') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - name = module.params.get('name') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - ip_forward = module.params.get('ip_forward') - changed = False - - inames = [] - if isinstance(instance_names, list): - inames = instance_names - elif isinstance(instance_names, str): - inames = instance_names.split(',') - if name: - inames.append(name) - if not inames: - module.fail_json(msg='Must specify a "name" or "instance_names"', - changed=False) - if not zone: - module.fail_json(msg='Must specify a "zone"', changed=False) - - json_output = {'zone': zone} - if state in ['absent', 'deleted']: - json_output['state'] = 'absent' - (changed, terminated_instance_names) = terminate_instances( - module, gce, inames, zone) - - # based on what user specified, return the same variable, although - # value could be different if an instance could not be destroyed - if instance_names: - json_output['instance_names'] = terminated_instance_names - elif name: - json_output['name'] = name - - elif state in ['active', 'present']: - json_output['state'] = 'present' - (changed, instance_data, instance_name_list) = create_instances( - module, gce, inames) - json_output['instance_data'] = instance_data - if instance_names: - json_output['instance_names'] = instance_name_list - elif name: - json_output['name'] = name - - json_output['changed'] = changed - module.exit_json(**json_output) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * -if __name__ == '__main__': - main() diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 34dcd2496..34ab09533 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -16,18 +16,8 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}" + oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}" with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}" - -- name: List Hosts - hosts: oo_list_hosts - -- name: List Hosts - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - debug: msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 7c8189224..b7604580c 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -9,6 +9,7 @@ project_id: "{{ lookup('env', 'gce_project_id') }}" zone: "{{ lookup('env', 'zone') }}" network: "{{ lookup('env', 'network') }}" + subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}" # unsupported in 1.9.+ #service_account_permissions: "datastore,logging-write" tags: diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 86d5d0aad..579cd7ac6 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -16,18 +16,8 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "" + oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}" with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - -- name: List Hosts - hosts: oo_list_hosts - -- name: List Hosts - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - debug: msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index de68f5207..6c6f671be 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -17,18 +17,8 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}" + oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}" with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - -- name: List Hosts - hosts: oo_list_hosts - -- name: List Hosts - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - debug: msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}" |