From 6a4b7a5eb6c4b5e747bab795e2428d7c3992f559 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 1 Apr 2015 15:09:19 -0400 Subject: Configuration updates for latest builds and major refactor Configuration updates for latest builds - Switch to using create-node-config - Switch sdn services to use etcd over SSL - This re-uses the client certificate deployed on each node - Additional node registration changes - Do not assume that metadata service is available in openshift_facts module - Call systemctl daemon-reload after installing openshift-master, openshift-sdn-master, openshift-node, openshift-sdn-node - Fix bug overriding openshift_hostname and openshift_public_hostname in byo playbooks - Start moving generated configs to /etc/openshift - Some custom module cleanup - Add known issue with ansible-1.9 to README_OSE.md - Update to genericize the kubernetes_register_node module - Default to use kubectl for commands - Allow for overriding kubectl_cmd - In openshift_register_node role, override kubectl_cmd to openshift_kube - Set default openshift_registry_url for enterprise when deployment_type is enterprise - Fix openshift_register_node for client config change - Ensure that master certs directory is created - Add roles and filter_plugin symlinks to playbooks/common/openshift-master and node - Allow non-root user with sudo nopasswd access - Updates for README_OSE.md - Update byo inventory for adding additional comments - Updates for node cert/config sync to work with non-root user using sudo - Move node config/certs to /etc/openshift/node - Don't use path for mktemp. addresses: https://github.com/openshift/openshift-ansible/issues/154 Create common playbooks - create common/openshift-master/config.yml - create common/openshift-node/config.yml - update playbooks to use new common playbooks - update launch playbooks to call update playbooks - fix openshift_registry and openshift_node_ip usage Set default deployment type to origin - openshift_repo updates for enabling origin deployments - also separate repo and gpgkey file structure - remove kubernetes repo since it isn't currently needed - full deployment type support for bin/cluster - honor OS_DEPLOYMENT_TYPE env variable - add --deployment-type option, which will override OS_DEPLOYMENT_TYPE if set - if neither OS_DEPLOYMENT_TYPE or --deployment-type is set, defaults to origin installs Additional changes: - Add separate config action to bin/cluster that runs ansible config but does not update packages - Some more duplication reduction in cluster playbooks. - Rename task files in playbooks dirs to have tasks in their name for clarity. - update aws/gce scripts to use a directory for inventory (otherwise when there are no hosts returned from dynamic inventory there is an error) libvirt refactor and update - add libvirt dynamic inventory - updates to use dynamic inventory for libvirt --- README_OSE.md | 191 +++-- README_libvirt.md | 78 +- bin/cluster | 91 ++- filter_plugins/oo_filters.py | 16 +- inventory/aws/ec2.ini | 62 -- inventory/aws/ec2.py | 798 --------------------- inventory/aws/group_vars/all | 2 - inventory/aws/hosts/ec2.ini | 62 ++ inventory/aws/hosts/ec2.py | 798 +++++++++++++++++++++ inventory/aws/hosts/hosts | 1 + inventory/byo/group_vars/all | 28 - inventory/byo/hosts | 26 +- inventory/gce/gce.py | 287 -------- inventory/gce/group_vars/all | 2 - inventory/gce/hosts/gce.py | 287 ++++++++ inventory/gce/hosts/hosts | 1 + inventory/libvirt/group_vars/all | 2 - inventory/libvirt/hosts | 2 - inventory/libvirt/hosts/hosts | 1 + inventory/libvirt/hosts/libvirt.ini | 20 + inventory/libvirt/hosts/libvirt_generic.py | 179 +++++ playbooks/aws/openshift-cluster/config.yml | 36 + playbooks/aws/openshift-cluster/launch.yml | 73 +- .../aws/openshift-cluster/launch_instances.yml | 63 -- playbooks/aws/openshift-cluster/list.yml | 15 +- .../openshift-cluster/tasks/launch_instances.yml | 69 ++ playbooks/aws/openshift-cluster/terminate.yml | 24 +- playbooks/aws/openshift-cluster/update.yml | 25 +- playbooks/aws/openshift-cluster/vars.yml | 19 + playbooks/aws/openshift-master/config.yml | 27 +- playbooks/aws/openshift-master/launch.yml | 8 +- playbooks/aws/openshift-master/terminate.yml | 17 +- playbooks/aws/openshift-master/vars.yml | 3 - playbooks/aws/openshift-node/config.yml | 110 +-- playbooks/aws/openshift-node/launch.yml | 10 +- playbooks/aws/openshift-node/terminate.yml | 17 +- playbooks/aws/openshift-node/vars.yml | 3 - playbooks/byo/openshift-master/config.yml | 20 +- playbooks/byo/openshift-node/config.yml | 90 +-- playbooks/byo/openshift_facts.yml | 10 + playbooks/common/openshift-cluster/config.yml | 4 + playbooks/common/openshift-cluster/filter_plugins | 1 + playbooks/common/openshift-cluster/roles | 1 + .../set_master_launch_facts_tasks.yml | 11 + .../set_node_launch_facts_tasks.yml | 11 + .../update_repos_and_packages.yml | 7 + playbooks/common/openshift-master/config.yml | 19 + playbooks/common/openshift-master/filter_plugins | 1 + playbooks/common/openshift-master/roles | 1 + playbooks/common/openshift-node/config.yml | 121 ++++ playbooks/common/openshift-node/filter_plugins | 1 + playbooks/common/openshift-node/roles | 1 + playbooks/gce/openshift-cluster/config.yml | 37 + playbooks/gce/openshift-cluster/launch.yml | 72 +- .../gce/openshift-cluster/launch_instances.yml | 44 -- playbooks/gce/openshift-cluster/list.yml | 15 +- .../openshift-cluster/tasks/launch_instances.yml | 42 ++ playbooks/gce/openshift-cluster/terminate.yml | 22 +- playbooks/gce/openshift-cluster/update.yml | 25 +- playbooks/gce/openshift-cluster/vars.yml | 14 + playbooks/gce/openshift-master/config.yml | 24 +- playbooks/gce/openshift-master/launch.yml | 6 +- playbooks/gce/openshift-master/terminate.yml | 11 +- playbooks/gce/openshift-master/vars.yml | 3 - playbooks/gce/openshift-node/config.yml | 106 +-- playbooks/gce/openshift-node/launch.yml | 6 +- playbooks/gce/openshift-node/terminate.yml | 11 +- playbooks/gce/openshift-node/vars.yml | 3 - playbooks/libvirt/openshift-cluster/config.yml | 38 + playbooks/libvirt/openshift-cluster/launch.yml | 81 +-- .../libvirt/openshift-cluster/launch_instances.yml | 102 --- playbooks/libvirt/openshift-cluster/list.yml | 50 +- .../openshift-cluster/tasks/configure_libvirt.yml | 6 + .../tasks/configure_libvirt_network.yml | 27 + .../tasks/configure_libvirt_storage_pool.yml | 27 + .../openshift-cluster/tasks/launch_instances.yml | 104 +++ .../libvirt/openshift-cluster/templates/domain.xml | 67 ++ .../libvirt/openshift-cluster/templates/meta-data | 3 + .../openshift-cluster/templates/network.xml | 23 + .../libvirt/openshift-cluster/templates/user-data | 23 + playbooks/libvirt/openshift-cluster/terminate.yml | 69 +- playbooks/libvirt/openshift-cluster/update.yml | 18 + playbooks/libvirt/openshift-cluster/vars.yml | 38 +- playbooks/libvirt/openshift-master/config.yml | 21 - playbooks/libvirt/openshift-master/filter_plugins | 1 - playbooks/libvirt/openshift-master/roles | 1 - playbooks/libvirt/openshift-master/vars.yml | 1 - playbooks/libvirt/openshift-node/config.yml | 102 --- playbooks/libvirt/openshift-node/filter_plugins | 1 - playbooks/libvirt/openshift-node/roles | 1 - playbooks/libvirt/openshift-node/vars.yml | 1 - playbooks/libvirt/templates/domain.xml | 62 -- playbooks/libvirt/templates/meta-data | 2 - playbooks/libvirt/templates/user-data | 10 - roles/openshift_common/tasks/main.yml | 4 +- roles/openshift_common/vars/main.yml | 4 + roles/openshift_facts/library/openshift_facts.py | 92 ++- roles/openshift_master/tasks/main.yml | 64 +- roles/openshift_master/vars/main.yml | 5 + roles/openshift_node/tasks/main.yml | 32 +- roles/openshift_node/vars/main.yml | 2 + roles/openshift_register_nodes/defaults/main.yml | 3 - .../library/kubernetes_register_node.py | 63 +- roles/openshift_register_nodes/tasks/main.yml | 64 +- roles/openshift_register_nodes/vars/main.yml | 7 + roles/openshift_repos/README.md | 2 +- roles/openshift_repos/defaults/main.yaml | 5 - .../files/online/RPM-GPG-KEY-redhat-beta | 61 -- .../files/online/RPM-GPG-KEY-redhat-release | 63 -- .../files/online/epel7-kubernetes.repo | 6 - .../files/online/epel7-openshift.repo | 6 - .../files/online/gpg_keys/RPM-GPG-KEY-redhat-beta | 61 ++ .../online/gpg_keys/RPM-GPG-KEY-redhat-release | 63 ++ .../files/online/oso-rhui-rhel-7-extras.repo | 23 - .../files/online/oso-rhui-rhel-7-server.repo | 21 - .../files/online/repos/epel7-openshift.repo | 6 + .../files/online/repos/oso-rhui-rhel-7-extras.repo | 23 + .../files/online/repos/oso-rhui-rhel-7-server.repo | 21 + .../files/online/repos/rhel-7-libra-candidate.repo | 11 + .../files/online/rhel-7-libra-candidate.repo | 11 - .../repos/maxamillion-origin-next-epel-7.repo | 7 + roles/openshift_repos/tasks/main.yaml | 14 +- roles/openshift_repos/templates/yum_repo.j2 | 1 - roles/openshift_sdn_master/tasks/main.yml | 11 +- roles/openshift_sdn_node/tasks/main.yml | 11 +- .../library/os_firewall_manage_iptables.py | 3 +- 126 files changed, 3165 insertions(+), 2677 deletions(-) delete mode 100644 inventory/aws/ec2.ini delete mode 100755 inventory/aws/ec2.py delete mode 100644 inventory/aws/group_vars/all create mode 100644 inventory/aws/hosts/ec2.ini create mode 100755 inventory/aws/hosts/ec2.py create mode 100644 inventory/aws/hosts/hosts delete mode 100644 inventory/byo/group_vars/all delete mode 100755 inventory/gce/gce.py delete mode 100644 inventory/gce/group_vars/all create mode 100755 inventory/gce/hosts/gce.py create mode 100644 inventory/gce/hosts/hosts delete mode 100644 inventory/libvirt/group_vars/all delete mode 100644 inventory/libvirt/hosts create mode 100644 inventory/libvirt/hosts/hosts create mode 100644 inventory/libvirt/hosts/libvirt.ini create mode 100755 inventory/libvirt/hosts/libvirt_generic.py create mode 100644 playbooks/aws/openshift-cluster/config.yml delete mode 100644 playbooks/aws/openshift-cluster/launch_instances.yml create mode 100644 playbooks/aws/openshift-cluster/tasks/launch_instances.yml delete mode 100644 playbooks/aws/openshift-master/vars.yml delete mode 100644 playbooks/aws/openshift-node/vars.yml create mode 100644 playbooks/byo/openshift_facts.yml create mode 100644 playbooks/common/openshift-cluster/config.yml create mode 120000 playbooks/common/openshift-cluster/filter_plugins create mode 120000 playbooks/common/openshift-cluster/roles create mode 100644 playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml create mode 100644 playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml create mode 100644 playbooks/common/openshift-cluster/update_repos_and_packages.yml create mode 100644 playbooks/common/openshift-master/config.yml create mode 120000 playbooks/common/openshift-master/filter_plugins create mode 120000 playbooks/common/openshift-master/roles create mode 100644 playbooks/common/openshift-node/config.yml create mode 120000 playbooks/common/openshift-node/filter_plugins create mode 120000 playbooks/common/openshift-node/roles create mode 100644 playbooks/gce/openshift-cluster/config.yml delete mode 100644 playbooks/gce/openshift-cluster/launch_instances.yml create mode 100644 playbooks/gce/openshift-cluster/tasks/launch_instances.yml delete mode 100644 playbooks/gce/openshift-master/vars.yml delete mode 100644 playbooks/gce/openshift-node/vars.yml create mode 100644 playbooks/libvirt/openshift-cluster/config.yml delete mode 100644 playbooks/libvirt/openshift-cluster/launch_instances.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml create mode 100644 playbooks/libvirt/openshift-cluster/templates/domain.xml create mode 100644 playbooks/libvirt/openshift-cluster/templates/meta-data create mode 100644 playbooks/libvirt/openshift-cluster/templates/network.xml create mode 100644 playbooks/libvirt/openshift-cluster/templates/user-data create mode 100644 playbooks/libvirt/openshift-cluster/update.yml delete mode 100644 playbooks/libvirt/openshift-master/config.yml delete mode 120000 playbooks/libvirt/openshift-master/filter_plugins delete mode 120000 playbooks/libvirt/openshift-master/roles delete mode 100644 playbooks/libvirt/openshift-master/vars.yml delete mode 100644 playbooks/libvirt/openshift-node/config.yml delete mode 120000 playbooks/libvirt/openshift-node/filter_plugins delete mode 120000 playbooks/libvirt/openshift-node/roles delete mode 100644 playbooks/libvirt/openshift-node/vars.yml delete mode 100644 playbooks/libvirt/templates/domain.xml delete mode 100644 playbooks/libvirt/templates/meta-data delete mode 100644 playbooks/libvirt/templates/user-data create mode 100644 roles/openshift_master/vars/main.yml create mode 100644 roles/openshift_node/vars/main.yml create mode 100644 roles/openshift_register_nodes/vars/main.yml delete mode 100644 roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta delete mode 100644 roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release delete mode 100644 roles/openshift_repos/files/online/epel7-kubernetes.repo delete mode 100644 roles/openshift_repos/files/online/epel7-openshift.repo create mode 100644 roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta create mode 100644 roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release delete mode 100644 roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo delete mode 100644 roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo create mode 100644 roles/openshift_repos/files/online/repos/epel7-openshift.repo create mode 100644 roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo create mode 100644 roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo create mode 100644 roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo delete mode 100644 roles/openshift_repos/files/online/rhel-7-libra-candidate.repo create mode 100644 roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo diff --git a/README_OSE.md b/README_OSE.md index 6ebdb7f99..6d4a9ba92 100644 --- a/README_OSE.md +++ b/README_OSE.md @@ -7,15 +7,17 @@ * [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups) * [Running the ansible playbooks](#running-the-ansible-playbooks) * [Post-ansible steps](#post-ansible-steps) +* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames) ## Requirements * ansible - * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+ + * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+ + * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842 * Available in Fedora channels * Available for EL with EPEL and Optional channel * One or more RHEL 7.1 VMs -* ssh key based auth for the root user needs to be pre-configured from the host - running ansible to the remote hosts +* Either ssh key based auth for the root user or ssh key based auth for a user + with sudo access (no password) * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/ ```sh @@ -48,9 +50,6 @@ subscription-manager repos \ ``` * Configuration of router is not automated yet * Configuration of docker-registry is not automated yet -* End-to-end testing has not been completed yet using this module -* root user is used for all ansible actions; eventually we will support using - a non-root user with sudo. ## Configuring the host inventory [Ansible docs](http://docs.ansible.com/intro_inventory.html) @@ -64,6 +63,38 @@ option to ansible-playbook. ```ini # This is an example of a bring your own (byo) host inventory +# Create an OSEv3 group that contains the maters and nodes groups +[OSEv3:children] +masters +nodes + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true +#ansible_sudo=true + +# To deploy origin, change deployment_type to origin +deployment_type=enterprise + +# Pre-release registry URL +openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} + +# Pre-release additional repo +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', +'baseurl': +'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', +'enabled': 1, 'gpgcheck': 0}] + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': +'OpenShift Origin COPR', 'baseurl': +'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', +'enabled': 1, 'gpgcheck': 1, gpgkey: +'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + # host group for masters [masters] ose3-master.example.com @@ -76,51 +107,13 @@ ose3-node[1:2].example.com The hostnames above should resolve both from the hosts themselves and the host where ansible is running (if different). -## Creating the default variables for the hosts and host groups -[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9) - -#### Group vars for all hosts -/etc/ansible/group_vars/all: -```yaml ---- -# Assume that we want to use the root as the ssh user for all hosts -ansible_ssh_user: root - -# Default debug level for all OpenShift hosts -openshift_debug_level: 4 - -# Set the OpenShift deployment type for all hosts -openshift_deployment_type: enterprise - -# Override the default registry for development -openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} - -# To use the latest OpenShift Enterprise Errata puddle: -#openshift_additional_repos: -#- id: ose-devel -# name: ose-devel -# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os -# enabled: 1 -# gpgcheck: 0 -# To use the latest OpenShift Enterprise Whitelist puddle: -openshift_additional_repos: -- id: ose-devel - name: ose-devel - baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os - enabled: 1 - gpgcheck: 0 - -``` - ## Running the ansible playbooks From the openshift-ansible checkout run: ```sh ansible-playbook playbooks/byo/config.yml ``` -**Note:** this assumes that the host inventory is /etc/ansible/hosts and the -group_vars are defined in /etc/ansible/group_vars, if using a different -inventory file (and a group_vars directory that is in the same directory as -the directory as the inventory) use the -i option for ansible-playbook. +**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different +inventory file use the -i option for ansible-playbook. ## Post-ansible steps #### Create the default router @@ -140,3 +133,109 @@ openshift ex registry --create=true \ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \ --mount-host=/var/lib/openshift/docker-registry ``` + +## Overriding detected ip addresses and hostnames +Some deployments will require that the user override the detected hostnames +and ip addresses for the hosts. To see what the default values will be you can +run the openshift_facts playbook: +```sh +ansible-playbook playbooks/byo/openshift_facts.yml +``` +The output will be similar to: +``` +ok: [10.3.9.45] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com", + "ip": "172.16.4.79", + "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com", + "public_ip": "10.3.9.45", + "use_openshift_sdn": true + }, + "provider": { + ... ... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +ok: [10.3.9.42] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com", + "ip": "172.16.4.75", + "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com", + "public_ip": "10.3.9.42", + "use_openshift_sdn": true + }, + "provider": { + ...... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +ok: [10.3.9.36] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com", + "ip": "172.16.4.73", + "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com", + "public_ip": "10.3.9.36", + "use_openshift_sdn": true + }, + "provider": { + ...... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +``` +Now, we want to verify the detected common settings to verify that they are +what we expect them to be (if not, we can override them). + +* hostname + * Should resolve to the internal ip from the instances themselves. + * openshift_hostname will override. +* ip + * Should be the internal ip of the instance. + * openshift_ip will override. +* public hostname + * Should resolve to the external ip from hosts outside of the cloud + * provider openshift_public_hostname will override. +* public_ip + * Should be the externally accessible ip associated with the instance + * openshift_public_ip will override +* use_openshift_sdn + * Should be true unless the cloud is GCE. + * openshift_use_openshift_sdn overrides + +To override the the defaults, you can set the variables in your inventory: +``` +...snip... +[masters] +ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com +...snip... +``` diff --git a/README_libvirt.md b/README_libvirt.md index fd2eb57f6..bcbaf4bd5 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -1,4 +1,3 @@ - LIBVIRT Setup instructions ========================== @@ -9,19 +8,21 @@ This makes `libvirt` useful to develop, test and debug Openshift and openshift-a Install dependencies -------------------- -1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) -2. Install [ebtables](http://ebtables.netfilter.org/) -3. Install [qemu](http://wiki.qemu.org/Main_Page) -4. Install [libvirt](http://libvirt.org/) -5. Enable and start the libvirt daemon, e.g: - * ``systemctl enable libvirtd`` - * ``systemctl start libvirtd`` -6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html) -7. Check that your `$HOME` is accessible to the qemu user² +1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) +2. Install [ebtables](http://ebtables.netfilter.org/) +3. Install [qemu](http://wiki.qemu.org/Main_Page) +4. Install [libvirt](http://libvirt.org/) +5. Enable and start the libvirt daemon, e.g: + - `systemctl enable libvirtd` + - `systemctl start libvirtd` +6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html) +7. Check that your `$HOME` is accessible to the qemu user² +8. Configure dns resolution on the host³ #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access. You can test it with the following command: + ``` virsh -c qemu:///system pool-list ``` @@ -67,12 +68,7 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied ``` -In order to fix that issue, you have several possibilities: -* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: - * backed by a filesystem with a lot of free disk space - * writable by your user; - * accessible by the qemu user. -* Grant the qemu user access to the storage pool. +In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool. On Arch: @@ -80,13 +76,55 @@ On Arch: setfacl -m g:kvm:--x ~ ``` -Test the setup +#### ³ Enabling DNS resolution to your guest VMs with NetworkManager + +- Verify NetworkManager is configured to use dnsmasq: + +```sh +$ sudo vi /etc/NetworkManager/NetworkManager.conf +[main] +dns=dnsmasq +``` + +- Configure dnsmasq to use the Virtual Network router for example.com: + +```sh +sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1 +``` + +Test The Setup -------------- +1. cd openshift-ansible/ +2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed) + +``` + bin/cluster list libvirt '' ``` -cd openshift-ansible -bin/cluster create -m 1 -n 3 libvirt lenaic +Creating a cluster +------------------ + +1. To create a cluster with one master and two nodes -bin/cluster terminate libvirt lenaic +``` + bin/cluster create libvirt lenaic +``` + +Updating a cluster +------------------ + +1. To update the cluster + +``` + bin/cluster update libvirt lenaic +``` + +Terminating a cluster +--------------------- + +1. To terminate the cluster + +``` + bin/cluster terminate libvirt lenaic ``` diff --git a/bin/cluster b/bin/cluster index ca227721e..79f1f988f 100755 --- a/bin/cluster +++ b/bin/cluster @@ -22,13 +22,28 @@ class Cluster(object): '-o ControlPersist=600s ' ) + def get_deployment_type(self, args): + """ + Get the deployment_type based on the environment variables and the + command line arguments + :param args: command line arguments provided by the user + :return: string representing the deployment type + """ + deployment_type = 'origin' + if args.deployment_type: + deployment_type = args.deployment_type + elif 'OS_DEPLOYMENT_TYPE' in os.environ: + deployment_type = os.environ['OS_DEPLOYMENT_TYPE'] + return deployment_type + def create(self, args): """ Create an OpenShift cluster for given provider :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) @@ -43,7 +58,8 @@ class Cluster(object): :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider) inventory = self.setup_provider(args.provider) @@ -55,19 +71,34 @@ class Cluster(object): :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider) inventory = self.setup_provider(args.provider) return self.action(args, inventory, env, playbook) + def config(self, args): + """ + Configure or reconfigure OpenShift across clustered VMs + :param args: command line arguments provided by user + :return: exit status from run command + """ + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} + playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider) + inventory = self.setup_provider(args.provider) + + return self.action(args, inventory, env, playbook) + def update(self, args): """ Update to latest OpenShift across clustered VMs :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider) inventory = self.setup_provider(args.provider) @@ -81,19 +112,19 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - config.readfp(open('inventory/gce/gce.ini')) + config.readfp(open('inventory/gce/hosts/gce.ini')) for key in config.options('gce'): os.environ[key] = config.get('gce', key) - inventory = '-i inventory/gce/gce.py' + inventory = '-i inventory/gce/hosts' elif 'aws' == provider: - config.readfp(open('inventory/aws/ec2.ini')) + config.readfp(open('inventory/aws/hosts/ec2.ini')) for key in config.options('ec2'): os.environ[key] = config.get('ec2', key) - inventory = '-i inventory/aws/ec2.py' + inventory = '-i inventory/aws/hosts' elif 'libvirt' == provider: inventory = '-i inventory/libvirt/hosts' else: @@ -145,29 +176,49 @@ if __name__ == '__main__': parser = argparse.ArgumentParser( description='Python wrapper to ensure proper environment for OpenShift ansible playbooks', ) - parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity') + parser.add_argument('-v', '--verbose', action='count', + help='Multiple -v options increase the verbosity') parser.add_argument('--version', action='version', version='%(prog)s 0.2') meta_parser = argparse.ArgumentParser(add_help=False) meta_parser.add_argument('provider', choices=providers, help='provider') meta_parser.add_argument('cluster_id', help='prefix for cluster VM names') - - action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions') - - create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser]) - create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster') - create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster') + meta_parser.add_argument('-t', '--deployment-type', + choices=['origin', 'online', 'enterprise'], + help='Deployment type. (default: origin)') + + action_parser = parser.add_subparsers(dest='action', title='actions', + description='Choose from valid actions') + + create_parser = action_parser.add_parser('create', help='Create a cluster', + parents=[meta_parser]) + create_parser.add_argument('-m', '--masters', default=1, type=int, + help='number of masters to create in cluster') + create_parser.add_argument('-n', '--nodes', default=2, type=int, + help='number of nodes to create in cluster') create_parser.set_defaults(func=cluster.create) - terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser]) - terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation') + config_parser = action_parser.add_parser('config', + help='Configure or reconfigure a cluster', + parents=[meta_parser]) + config_parser.set_defaults(func=cluster.config) + + terminate_parser = action_parser.add_parser('terminate', + help='Destroy a cluster', + parents=[meta_parser]) + terminate_parser.add_argument('-f', '--force', action='store_true', + help='Destroy cluster without confirmation') terminate_parser.set_defaults(func=cluster.terminate) - update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser]) - update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation') + update_parser = action_parser.add_parser('update', + help='Update OpenShift across cluster', + parents=[meta_parser]) + update_parser.add_argument('-f', '--force', action='store_true', + help='Update cluster without confirmation') update_parser.set_defaults(func=cluster.update) - list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser]) + list_parser = action_parser.add_parser('list', help='List VMs in cluster', + parents=[meta_parser]) list_parser.set_defaults(func=cluster.list) args = parser.parse_args() diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 1cf02218c..cf30cde9a 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -5,6 +5,7 @@ from ansible import errors, runner import json import pdb +import re def oo_pdb(arg): ''' This pops you into a pdb instance where arg is the data passed in from the filter. @@ -101,6 +102,18 @@ def oo_prepend_strings_in_list(data, prepend): retval = [prepend + s for s in data] return retval +def oo_get_deployment_type_from_groups(data): + ''' This takes a list of groups and returns the associated + deployment-type + ''' + if not issubclass(type(data), list): + raise errors.AnsibleFilterError("|failed expects first param is a list") + regexp = re.compile('^tag_deployment-type[-_]') + matches = filter(regexp.match, data) + if len(matches) > 0: + return regexp.sub('', matches[0]) + return "Unknown" + class FilterModule (object): def filters(self): return { @@ -109,5 +122,6 @@ class FilterModule (object): "oo_flatten": oo_flatten, "oo_len": oo_len, "oo_pdb": oo_pdb, - "oo_prepend_strings_in_list": oo_prepend_strings_in_list + "oo_prepend_strings_in_list": oo_prepend_strings_in_list, + "oo_get_deployment_type_from_groups": oo_get_deployment_type_from_groups } diff --git a/inventory/aws/ec2.ini b/inventory/aws/ec2.ini deleted file mode 100644 index eaab0a410..000000000 --- a/inventory/aws/ec2.ini +++ /dev/null @@ -1,62 +0,0 @@ -# Ansible EC2 external inventory script settings -# - -[ec2] - -# to talk to a private eucalyptus instance uncomment these lines -# and edit edit eucalyptus_host to be the host name of your cloud controller -#eucalyptus = True -#eucalyptus_host = clc.cloud.domain.org - -# AWS regions to make calls to. Set this to 'all' to make request to all regions -# in AWS and merge the results together. Alternatively, set this to a comma -# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' -regions = all -regions_exclude = us-gov-west-1,cn-north-1 - -# When generating inventory, Ansible needs to know how to address a server. -# Each EC2 instance has a lot of variables associated with it. Here is the list: -# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance -# Below are 2 variables that are used as the address of a server: -# - destination_variable -# - vpc_destination_variable - -# This is the normal destination variable to use. If you are running Ansible -# from outside EC2, then 'public_dns_name' makes the most sense. If you are -# running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. -destination_variable = public_dns_name - -# For server inside a VPC, using DNS names may not make sense. When an instance -# has 'subnet_id' set, this variable is used. If the subnet is public, setting -# this to 'ip_address' will return the public IP address. For instances in a -# private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from with EC2. -vpc_destination_variable = ip_address - -# To tag instances on EC2 with the resource records that point to them from -# Route53, uncomment and set 'route53' to True. -route53 = False - -# Additionally, you can specify the list of zones to exclude looking up in -# 'route53_excluded_zones' as a comma-separated list. -# route53_excluded_zones = samplezone1.com, samplezone2.com - -# API calls to EC2 are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-ec2.cache -# - ansible-ec2.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# These two settings allow flexible ansible host naming based on a format -# string and a comma-separated list of ec2 tags. The tags used must be -# present for all instances, or the code will fail. This overrides both -# destination_variable and vpc_destination_variable. -# destination_format = {0}.{1}.rhcloud.com -# destination_format_tags = Name,environment diff --git a/inventory/aws/ec2.py b/inventory/aws/ec2.py deleted file mode 100755 index f231ff4c2..000000000 --- a/inventory/aws/ec2.py +++ /dev/null @@ -1,798 +0,0 @@ -#!/usr/bin/env python2 - -''' -EC2 external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -AWS EC2 using the Boto library. - -NOTE: This script assumes Ansible is being executed where the environment -variables needed for Boto have already been set: - export AWS_ACCESS_KEY_ID='AK123' - export AWS_SECRET_ACCESS_KEY='abc123' - -This script also assumes there is an ec2.ini file alongside it. To specify a -different path to ec2.ini, define the EC2_INI_PATH environment variable: - - export EC2_INI_PATH=/path/to/my_ec2.ini - -If you're using eucalyptus you need to set the above variables and -you need to define: - - export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus - -For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html - -When run against a specific host, this script returns the following variables: - - ec2_ami_launch_index - - ec2_architecture - - ec2_association - - ec2_attachTime - - ec2_attachment - - ec2_attachmentId - - ec2_client_token - - ec2_deleteOnTermination - - ec2_description - - ec2_deviceIndex - - ec2_dns_name - - ec2_eventsSet - - ec2_group_name - - ec2_hypervisor - - ec2_id - - ec2_image_id - - ec2_instanceState - - ec2_instance_type - - ec2_ipOwnerId - - ec2_ip_address - - ec2_item - - ec2_kernel - - ec2_key_name - - ec2_launch_time - - ec2_monitored - - ec2_monitoring - - ec2_networkInterfaceId - - ec2_ownerId - - ec2_persistent - - ec2_placement - - ec2_platform - - ec2_previous_state - - ec2_private_dns_name - - ec2_private_ip_address - - ec2_publicIp - - ec2_public_dns_name - - ec2_ramdisk - - ec2_reason - - ec2_region - - ec2_requester_id - - ec2_root_device_name - - ec2_root_device_type - - ec2_security_group_ids - - ec2_security_group_names - - ec2_shutdown_state - - ec2_sourceDestCheck - - ec2_spot_instance_request_id - - ec2_state - - ec2_state_code - - ec2_state_reason - - ec2_status - - ec2_subnet_id - - ec2_tenancy - - ec2_virtualization_type - - ec2_vpc_id - -These variables are pulled out of a boto.ec2.instance object. There is a lack of -consistency with variable spellings (camelCase and underscores) since this -just loops through all variables the object exposes. It is preferred to use the -ones with underscores when multiple exist. - -In addition, if an instance has AWS Tags associated with it, each tag is a new -variable named: - - ec2_tag_[Key] = [Value] - -Security groups are comma-separated in 'ec2_security_group_ids' and -'ec2_security_group_names'. -''' - -# (c) 2012, Peter Sankauskas -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import sys -import os -import argparse -import re -from time import time -import boto -from boto import ec2 -from boto import rds -from boto import route53 -import ConfigParser -from collections import defaultdict - -try: - import json -except ImportError: - import simplejson as json - - -class Ec2Inventory(object): - def _empty_inventory(self): - return {"_meta" : {"hostvars" : {}}} - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - # Index of hostname (address) to instance ID - self.index = {} - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print data_to_print - - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - - def read_settings(self): - ''' Reads the settings from the ec2.ini file ''' - - config = ConfigParser.SafeConfigParser() - ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') - ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) - config.read(ec2_ini_path) - - # is eucalyptus? - self.eucalyptus_host = None - self.eucalyptus = False - if config.has_option('ec2', 'eucalyptus'): - self.eucalyptus = config.getboolean('ec2', 'eucalyptus') - if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): - self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') - - # Regions - self.regions = [] - configRegions = config.get('ec2', 'regions') - configRegions_exclude = config.get('ec2', 'regions_exclude') - if (configRegions == 'all'): - if self.eucalyptus_host: - self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) - else: - for regionInfo in ec2.regions(): - if regionInfo.name not in configRegions_exclude: - self.regions.append(regionInfo.name) - else: - self.regions = configRegions.split(",") - - # Destination addresses - self.destination_variable = config.get('ec2', 'destination_variable') - self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') - - if config.has_option('ec2', 'destination_format') and \ - config.has_option('ec2', 'destination_format_tags'): - self.destination_format = config.get('ec2', 'destination_format') - self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') - else: - self.destination_format = None - self.destination_format_tags = None - - # Route53 - self.route53_enabled = config.getboolean('ec2', 'route53') - self.route53_excluded_zones = [] - if config.has_option('ec2', 'route53_excluded_zones'): - self.route53_excluded_zones.extend( - config.get('ec2', 'route53_excluded_zones', '').split(',')) - - # Include RDS instances? - self.rds_enabled = True - if config.has_option('ec2', 'rds'): - self.rds_enabled = config.getboolean('ec2', 'rds') - - # Return all EC2 and RDS instances (if RDS is enabled) - if config.has_option('ec2', 'all_instances'): - self.all_instances = config.getboolean('ec2', 'all_instances') - else: - self.all_instances = False - if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: - self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') - else: - self.all_rds_instances = False - - # Cache related - cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - self.cache_path_cache = cache_dir + "/ansible-ec2.cache" - self.cache_path_index = cache_dir + "/ansible-ec2.index" - self.cache_max_age = config.getint('ec2', 'cache_max_age') - - # Configure nested groups instead of flat namespace. - if config.has_option('ec2', 'nested_groups'): - self.nested_groups = config.getboolean('ec2', 'nested_groups') - else: - self.nested_groups = False - - # Configure which groups should be created. - group_by_options = [ - 'group_by_instance_id', - 'group_by_region', - 'group_by_availability_zone', - 'group_by_ami_id', - 'group_by_instance_type', - 'group_by_key_pair', - 'group_by_vpc_id', - 'group_by_security_group', - 'group_by_tag_keys', - 'group_by_tag_none', - 'group_by_route53_names', - 'group_by_rds_engine', - 'group_by_rds_parameter_group', - ] - for option in group_by_options: - if config.has_option('ec2', option): - setattr(self, option, config.getboolean('ec2', option)) - else: - setattr(self, option, True) - - # Do we need to just include hosts that match a pattern? - try: - pattern_include = config.get('ec2', 'pattern_include') - if pattern_include and len(pattern_include) > 0: - self.pattern_include = re.compile(pattern_include) - else: - self.pattern_include = None - except ConfigParser.NoOptionError, e: - self.pattern_include = None - - # Do we need to exclude hosts that match a pattern? - try: - pattern_exclude = config.get('ec2', 'pattern_exclude'); - if pattern_exclude and len(pattern_exclude) > 0: - self.pattern_exclude = re.compile(pattern_exclude) - else: - self.pattern_exclude = None - except ConfigParser.NoOptionError, e: - self.pattern_exclude = None - - # Instance filters (see boto and EC2 API docs). Ignore invalid filters. - self.ec2_instance_filters = defaultdict(list) - if config.has_option('ec2', 'instance_filters'): - for instance_filter in config.get('ec2', 'instance_filters', '').split(','): - instance_filter = instance_filter.strip() - if not instance_filter or '=' not in instance_filter: - continue - filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] - if not filter_key: - continue - self.ec2_instance_filters[filter_key].append(filter_value) - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - self.args = parser.parse_args() - - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - if self.route53_enabled: - self.get_route53_records() - - for region in self.regions: - self.get_instances_by_region(region) - if self.rds_enabled: - self.get_rds_instances_by_region(region) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - - def get_instances_by_region(self, region): - ''' Makes an AWS EC2 API call to the list of instances in a particular - region ''' - - try: - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host) - conn.APIVersion = '2010-08-31' - else: - conn = ec2.connect_to_region(region) - - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) - - reservations = [] - if self.ec2_instance_filters: - for filter_key, filter_values in self.ec2_instance_filters.iteritems(): - reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) - else: - reservations = conn.get_all_instances() - - for reservation in reservations: - for instance in reservation.instances: - self.add_instance(instance, region) - - except boto.exception.BotoServerError, e: - if not self.eucalyptus: - print "Looks like AWS is down again:" - print e - sys.exit(1) - - def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular - region ''' - - try: - conn = rds.connect_to_region(region) - if conn: - instances = conn.get_all_dbinstances() - for instance in instances: - self.add_rds_instance(instance, region) - except boto.exception.BotoServerError, e: - if not e.reason == "Forbidden": - print "Looks like AWS RDS is down: " - print e - sys.exit(1) - - def get_instance(self, region, instance_id): - ''' Gets details about a specific instance ''' - if self.eucalyptus: - conn = boto.connect_euca(self.eucalyptus_host) - conn.APIVersion = '2010-08-31' - else: - conn = ec2.connect_to_region(region) - - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) - - reservations = conn.get_all_instances([instance_id]) - for reservation in reservations: - for instance in reservation.instances: - return instance - - def add_instance(self, instance, region): - ''' Adds an instance to the inventory and index, as long as it is - addressable ''' - - # Only want running instances unless all_instances is True - if not self.all_instances and instance.state != 'running': - return - - # Select the best destination address - if self.destination_format and self.destination_format_tags: - dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ]) - elif instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) - else: - dest = getattr(instance, self.destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.destination_variable, None) - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(dest): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(dest): - return - - # Add to index - self.index[dest] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.placement) - self.push_group(self.inventory, 'zones', instance.placement) - - # Inventory: Group by Amazon Machine Image (AMI) ID - if self.group_by_ami_id: - ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, dest) - if self.nested_groups: - self.push_group(self.inventory, 'images', ami_id) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by key pair - if self.group_by_key_pair and instance.key_name: - key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'keys', key_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - for group in instance.groups: - key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) - - # Inventory: Group by tag keys - if self.group_by_tag_keys: - for k, v in instance.tags.iteritems(): - key = self.to_safe("tag_" + k + "=" + v) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - self.push_group(self.inventory, self.to_safe("tag_" + k), key) - - # Inventory: Group by Route53 domain names if enabled - if self.route53_enabled and self.group_by_route53_names: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - self.push(self.inventory, name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'route53', name) - - # Global Tag: instances without tags - if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', dest) - - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) - - - def add_rds_instance(self, instance, region): - ''' Adds an RDS instance to the inventory and index, as long as it is - addressable ''' - - # Only want available instances unless all_rds_instances is True - if not self.all_rds_instances and instance.status != 'available': - return - - # Select the best destination address - dest = instance.endpoint[0] - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.availability_zone) - self.push_group(self.inventory, 'zones', instance.availability_zone) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - if instance.security_group: - key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) - - # Inventory: Group by engine - if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) - if self.nested_groups: - self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) - - # Inventory: Group by parameter group - if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) - if self.nested_groups: - self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) - - # Global Tag: all RDS instances - self.push(self.inventory, 'rds', dest) - - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) - - - def get_route53_records(self): - ''' Get and store the map of resource records to domain names that - point to them. ''' - - r53_conn = route53.Route53Connection() - all_zones = r53_conn.get_zones() - - route53_zones = [ zone for zone in all_zones if zone.name[:-1] - not in self.route53_excluded_zones ] - - self.route53_records = {} - - for zone in route53_zones: - rrsets = r53_conn.get_all_rrsets(zone.id) - - for record_set in rrsets: - record_name = record_set.name - - if record_name.endswith('.'): - record_name = record_name[:-1] - - for resource in record_set.resource_records: - self.route53_records.setdefault(resource, set()) - self.route53_records[resource].add(record_name) - - - def get_instance_route53_names(self, instance): - ''' Check if an instance is referenced in the records we have from - Route53. If it is, return the list of domain names pointing to said - instance. If nothing points to it, return an empty list. ''' - - instance_attributes = [ 'public_dns_name', 'private_dns_name', - 'ip_address', 'private_ip_address' ] - - name_list = set() - - for attrib in instance_attributes: - try: - value = getattr(instance, attrib) - except AttributeError: - continue - - if value in self.route53_records: - name_list.update(self.route53_records[value]) - - return list(name_list) - - - def get_host_info_dict_from_instance(self, instance): - instance_vars = {} - for key in vars(instance): - value = getattr(instance, key) - key = self.to_safe('ec2_' + key) - - # Handle complex types - # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 - if key == 'ec2__state': - instance_vars['ec2_state'] = instance.state or '' - instance_vars['ec2_state_code'] = instance.state_code - elif key == 'ec2__previous_state': - instance_vars['ec2_previous_state'] = instance.previous_state or '' - instance_vars['ec2_previous_state_code'] = instance.previous_state_code - elif type(value) in [int, bool]: - instance_vars[key] = value - elif type(value) in [str, unicode]: - instance_vars[key] = value.strip() - elif type(value) == type(None): - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2__placement': - instance_vars['ec2_placement'] = value.zone - elif key == 'ec2_tags': - for k, v in value.iteritems(): - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) - instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) - else: - pass - # TODO Product codes if someone finds them useful - #print key - #print type(value) - #print value - - return instance_vars - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if not self.args.host in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if not self.args.host in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (region, instance_id) = self.index[self.args.host] - - instance = self.get_instance(region, instance_id) - return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be - used as Ansible groups ''' - - return re.sub("[^A-Za-z0-9\-]", "_", word) - - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -Ec2Inventory() - diff --git a/inventory/aws/group_vars/all b/inventory/aws/group_vars/all deleted file mode 100644 index b22da00de..000000000 --- a/inventory/aws/group_vars/all +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_ssh_user: root diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini new file mode 100644 index 000000000..eaab0a410 --- /dev/null +++ b/inventory/aws/hosts/ec2.ini @@ -0,0 +1,62 @@ +# Ansible EC2 external inventory script settings +# + +[ec2] + +# to talk to a private eucalyptus instance uncomment these lines +# and edit edit eucalyptus_host to be the host name of your cloud controller +#eucalyptus = True +#eucalyptus_host = clc.cloud.domain.org + +# AWS regions to make calls to. Set this to 'all' to make request to all regions +# in AWS and merge the results together. Alternatively, set this to a comma +# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' +regions = all +regions_exclude = us-gov-west-1,cn-north-1 + +# When generating inventory, Ansible needs to know how to address a server. +# Each EC2 instance has a lot of variables associated with it. Here is the list: +# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance +# Below are 2 variables that are used as the address of a server: +# - destination_variable +# - vpc_destination_variable + +# This is the normal destination variable to use. If you are running Ansible +# from outside EC2, then 'public_dns_name' makes the most sense. If you are +# running Ansible from within EC2, then perhaps you want to use the internal +# address, and should set this to 'private_dns_name'. +destination_variable = public_dns_name + +# For server inside a VPC, using DNS names may not make sense. When an instance +# has 'subnet_id' set, this variable is used. If the subnet is public, setting +# this to 'ip_address' will return the public IP address. For instances in a +# private subnet, this should be set to 'private_ip_address', and Ansible must +# be run from with EC2. +vpc_destination_variable = ip_address + +# To tag instances on EC2 with the resource records that point to them from +# Route53, uncomment and set 'route53' to True. +route53 = False + +# Additionally, you can specify the list of zones to exclude looking up in +# 'route53_excluded_zones' as a comma-separated list. +# route53_excluded_zones = samplezone1.com, samplezone2.com + +# API calls to EC2 are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-ec2.cache +# - ansible-ec2.index +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 + +# These two settings allow flexible ansible host naming based on a format +# string and a comma-separated list of ec2 tags. The tags used must be +# present for all instances, or the code will fail. This overrides both +# destination_variable and vpc_destination_variable. +# destination_format = {0}.{1}.rhcloud.com +# destination_format_tags = Name,environment diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py new file mode 100755 index 000000000..f231ff4c2 --- /dev/null +++ b/inventory/aws/hosts/ec2.py @@ -0,0 +1,798 @@ +#!/usr/bin/env python2 + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +This script also assumes there is an ec2.ini file alongside it. To specify a +different path to ec2.ini, define the EC2_INI_PATH environment variable: + + export EC2_INI_PATH=/path/to/my_ec2.ini + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS Tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +import boto +from boto import ec2 +from boto import rds +from boto import route53 +import ConfigParser +from collections import defaultdict + +try: + import json +except ImportError: + import simplejson as json + + +class Ec2Inventory(object): + def _empty_inventory(self): + return {"_meta" : {"hostvars" : {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + # Index of hostname (address) to instance ID + self.index = {} + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print data_to_print + + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + + config = ConfigParser.SafeConfigParser() + ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') + ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) + config.read(ec2_ini_path) + + # is eucalyptus? + self.eucalyptus_host = None + self.eucalyptus = False + if config.has_option('ec2', 'eucalyptus'): + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + configRegions = config.get('ec2', 'regions') + configRegions_exclude = config.get('ec2', 'regions_exclude') + if (configRegions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) + else: + for regionInfo in ec2.regions(): + if regionInfo.name not in configRegions_exclude: + self.regions.append(regionInfo.name) + else: + self.regions = configRegions.split(",") + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + + if config.has_option('ec2', 'destination_format') and \ + config.has_option('ec2', 'destination_format_tags'): + self.destination_format = config.get('ec2', 'destination_format') + self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') + else: + self.destination_format = None + self.destination_format_tags = None + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + self.route53_excluded_zones = [] + if config.has_option('ec2', 'route53_excluded_zones'): + self.route53_excluded_zones.extend( + config.get('ec2', 'route53_excluded_zones', '').split(',')) + + # Include RDS instances? + self.rds_enabled = True + if config.has_option('ec2', 'rds'): + self.rds_enabled = config.getboolean('ec2', 'rds') + + # Return all EC2 and RDS instances (if RDS is enabled) + if config.has_option('ec2', 'all_instances'): + self.all_instances = config.getboolean('ec2', 'all_instances') + else: + self.all_instances = False + if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: + self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') + else: + self.all_rds_instances = False + + # Cache related + cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + self.cache_path_cache = cache_dir + "/ansible-ec2.cache" + self.cache_path_index = cache_dir + "/ansible-ec2.index" + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + # Configure nested groups instead of flat namespace. + if config.has_option('ec2', 'nested_groups'): + self.nested_groups = config.getboolean('ec2', 'nested_groups') + else: + self.nested_groups = False + + # Configure which groups should be created. + group_by_options = [ + 'group_by_instance_id', + 'group_by_region', + 'group_by_availability_zone', + 'group_by_ami_id', + 'group_by_instance_type', + 'group_by_key_pair', + 'group_by_vpc_id', + 'group_by_security_group', + 'group_by_tag_keys', + 'group_by_tag_none', + 'group_by_route53_names', + 'group_by_rds_engine', + 'group_by_rds_parameter_group', + ] + for option in group_by_options: + if config.has_option('ec2', option): + setattr(self, option, config.getboolean('ec2', option)) + else: + setattr(self, option, True) + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get('ec2', 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except ConfigParser.NoOptionError, e: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get('ec2', 'pattern_exclude'); + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except ConfigParser.NoOptionError, e: + self.pattern_exclude = None + + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + self.ec2_instance_filters[filter_key].append(filter_value) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + self.args = parser.parse_args() + + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + if self.rds_enabled: + self.get_rds_instances_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + sys.exit(1) + + reservations = [] + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.iteritems(): + reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) + else: + reservations = conn.get_all_instances() + + for reservation in reservations: + for instance in reservation.instances: + self.add_instance(instance, region) + + except boto.exception.BotoServerError, e: + if not self.eucalyptus: + print "Looks like AWS is down again:" + print e + sys.exit(1) + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + try: + conn = rds.connect_to_region(region) + if conn: + instances = conn.get_all_dbinstances() + for instance in instances: + self.add_rds_instance(instance, region) + except boto.exception.BotoServerError, e: + if not e.reason == "Forbidden": + print "Looks like AWS RDS is down: " + print e + sys.exit(1) + + def get_instance(self, region, instance_id): + ''' Gets details about a specific instance ''' + if self.eucalyptus: + conn = boto.connect_euca(self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + sys.exit(1) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only want running instances unless all_instances is True + if not self.all_instances and instance.state != 'running': + return + + # Select the best destination address + if self.destination_format and self.destination_format_tags: + dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ]) + elif instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) + else: + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(dest): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(dest): + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, dest) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by key pair + if self.group_by_key_pair and instance.key_name: + key_name = self.to_safe('key_' + instance.key_name) + self.push(self.inventory, key_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'keys', key_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) + + # Inventory: Group by tag keys + if self.group_by_tag_keys: + for k, v in instance.tags.iteritems(): + key = self.to_safe("tag_" + k + "=" + v) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled and self.group_by_route53_names: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'route53', name) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all EC2 instances + self.push(self.inventory, 'ec2', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances unless all_rds_instances is True + if not self.all_rds_instances and instance.status != 'available': + return + + # Select the best destination address + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) + + # Inventory: Group by engine + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + + # Inventory: Group by parameter group + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + + # Global Tag: all RDS instances + self.push(self.inventory, 'rds', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [ zone for zone in all_zones if zone.name[:-1] + not in self.route53_excluded_zones ] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = [ 'public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address' ] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 + if key == 'ec2__state': + instance_vars['ec2_state'] = instance.state or '' + instance_vars['ec2_state_code'] = instance.state_code + elif key == 'ec2__previous_state': + instance_vars['ec2_previous_state'] = instance.previous_state or '' + instance_vars['ec2_previous_state_code'] = instance.previous_state_code + elif type(value) in [int, bool]: + instance_vars[key] = value + elif type(value) in [str, unicode]: + instance_vars[key] = value.strip() + elif type(value) == type(None): + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone + elif key == 'ec2_tags': + for k, v in value.iteritems(): + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + else: + pass + # TODO Product codes if someone finds them useful + #print key + #print type(value) + #print value + + return instance_vars + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if not self.args.host in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if not self.args.host in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be + used as Ansible groups ''' + + return re.sub("[^A-Za-z0-9\-]", "_", word) + + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +Ec2Inventory() + diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts new file mode 100644 index 000000000..6c590ac93 --- /dev/null +++ b/inventory/aws/hosts/hosts @@ -0,0 +1 @@ +localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 diff --git a/inventory/byo/group_vars/all b/inventory/byo/group_vars/all deleted file mode 100644 index d63e96668..000000000 --- a/inventory/byo/group_vars/all +++ /dev/null @@ -1,28 +0,0 @@ ---- -# lets assume that we want to use the root as the ssh user for all hosts -ansible_ssh_user: root - -# default debug level for all OpenShift hosts -openshift_debug_level: 4 - -# set the OpenShift deployment type for all hosts -openshift_deployment_type: enterprise - -# Override the default registry for development -openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} - -# Use latest Errata puddle as an additional repo: -#openshift_additional_repos: -#- id: ose-devel -# name: ose-devel -# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os -# enabled: 1 -# gpgcheck: 0 - -# Use latest Whitelist puddle as an additional repo: -openshift_additional_repos: -- id: ose-devel - name: ose-devel - baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os - enabled: 1 - gpgcheck: 0 diff --git a/inventory/byo/hosts b/inventory/byo/hosts index 2dd854778..e9af5e571 100644 --- a/inventory/byo/hosts +++ b/inventory/byo/hosts @@ -1,5 +1,30 @@ # This is an example of a bring your own (byo) host inventory +# Create an OSEv3 group that contains the maters and nodes groups +[OSEv3:children] +masters +nodes + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true +#ansible_sudo=true + +# To deploy origin, change deployment_type to origin +deployment_type=enterprise + +# Pre-release registry URL +openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} + +# Pre-release additional repo +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + # host group for masters [masters] ose3-master-ansible.test.example.com @@ -7,4 +32,3 @@ ose3-master-ansible.test.example.com # host group for nodes [nodes] ose3-node[1:2]-ansible.test.example.com - diff --git a/inventory/gce/gce.py b/inventory/gce/gce.py deleted file mode 100755 index 3403f735e..000000000 --- a/inventory/gce/gce.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python2 -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -''' -GCE external inventory script -================================= - -Generates inventory that Ansible can understand by making API requests -Google Compute Engine via the libcloud library. Full install/configuration -instructions for the gce* modules can be found in the comments of -ansible/test/gce_tests.py. - -When run against a specific host, this script returns the following variables -based on the data obtained from the libcloud Node object: - - gce_uuid - - gce_id - - gce_image - - gce_machine_type - - gce_private_ip - - gce_public_ip - - gce_name - - gce_description - - gce_status - - gce_zone - - gce_tags - - gce_metadata - - gce_network - -When run in --list mode, instances are grouped by the following categories: - - zone: - zone group name examples are us-central1-b, europe-west1-a, etc. - - instance tags: - An entry is created for each tag. For example, if you have two instances - with a common tag called 'foo', they will both be grouped together under - the 'tag_foo' name. - - network name: - the name of the network is appended to 'network_' (e.g. the 'default' - network will result in a group named 'network_default') - - machine type - types follow a pattern like n1-standard-4, g1-small, etc. - - running status: - group name prefixed with 'status_' (e.g. status_running, status_stopped,..) - - image: - when using an ephemeral/scratch disk, this will be set to the image name - used when creating the instance (e.g. debian-7-wheezy-v20130816). when - your instance was created with a root persistent disk it will be set to - 'persistent_disk' since there is no current way to determine the image. - -Examples: - Execute uname on all instances in the us-central1-a zone - $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" - - Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance - -Author: Eric Johnson -Version: 0.0.1 -''' - -USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" -USER_AGENT_VERSION="v1" - -import sys -import os -import argparse -import ConfigParser - -try: - import json -except ImportError: - import simplejson as json - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - _ = Provider.GCE -except: - print("GCE inventory script requires libcloud >= 0.13") - sys.exit(1) - - -class GceInventory(object): - def __init__(self): - # Read settings and parse CLI arguments - self.parse_cli_args() - self.driver = self.get_gce_driver() - - # Just display data for specific host - if self.args.host: - print self.json_format_dict(self.node_to_dict( - self.get_instance(self.args.host)), - pretty=self.args.pretty) - sys.exit(0) - - # Otherwise, assume user wants all instances grouped - print(self.json_format_dict(self.group_instances(), - pretty=self.args.pretty)) - sys.exit(0) - - def get_gce_driver(self): - """Determine the GCE authorization settings and return a - libcloud driver. - """ - gce_ini_default_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "gce.ini") - gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - - # Create a ConfigParser. - # This provides empty defaults to each key, so that environment - # variable configuration (as opposed to INI configuration) is able - # to work. - config = ConfigParser.SafeConfigParser(defaults={ - 'gce_service_account_email_address': '', - 'gce_service_account_pem_file_path': '', - 'gce_project_id': '', - 'libcloud_secrets': '', - }) - if 'gce' not in config.sections(): - config.add_section('gce') - config.read(gce_ini_path) - - # Attempt to get GCE params from a configuration file, if one - # exists. - secrets_path = config.get('gce', 'libcloud_secrets') - secrets_found = False - try: - import secrets - args = list(getattr(secrets, 'GCE_PARAMS', [])) - kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) - secrets_found = True - except: - pass - - if not secrets_found and secrets_path: - if not secrets_path.endswith('secrets.py'): - err = "Must specify libcloud secrets file as " - err += "/absolute/path/to/secrets.py" - print(err) - sys.exit(1) - sys.path.append(os.path.dirname(secrets_path)) - try: - import secrets - args = list(getattr(secrets, 'GCE_PARAMS', [])) - kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) - secrets_found = True - except: - pass - if not secrets_found: - args = [ - config.get('gce','gce_service_account_email_address'), - config.get('gce','gce_service_account_pem_file_path') - ] - kwargs = {'project': config.get('gce', 'gce_project_id')} - - # If the appropriate environment variables are set, they override - # other configuration; process those into our args and kwargs. - args[0] = os.environ.get('GCE_EMAIL', args[0]) - args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) - kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - - # Retrieve and return the GCE driver. - gce = get_driver(Provider.GCE)(*args, **kwargs) - gce.connection.user_agent_append( - '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), - ) - return gce - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser( - description='Produce an Ansible Inventory file based on GCE') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all information about an instance') - parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty format (default: False)') - self.args = parser.parse_args() - - - def node_to_dict(self, inst): - md = {} - - if inst is None: - return {} - - if inst.extra['metadata'].has_key('items'): - for entry in inst.extra['metadata']['items']: - md[entry['key']] = entry['value'] - - net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] - return { - 'gce_uuid': inst.uuid, - 'gce_id': inst.id, - 'gce_image': inst.image, - 'gce_machine_type': inst.size, - 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], - 'gce_name': inst.name, - 'gce_description': inst.extra['description'], - 'gce_status': inst.extra['status'], - 'gce_zone': inst.extra['zone'].name, - 'gce_tags': inst.extra['tags'], - 'gce_metadata': md, - 'gce_network': net, - # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] - } - - def get_instance(self, instance_name): - '''Gets details about a specific instance ''' - try: - return self.driver.ex_get_node(instance_name) - except Exception, e: - return None - - def group_instances(self): - '''Group all instances''' - groups = {} - meta = {} - meta["hostvars"] = {} - - for node in self.driver.list_nodes(): - name = node.name - - meta["hostvars"][name] = self.node_to_dict(node) - - zone = node.extra['zone'].name - if groups.has_key(zone): groups[zone].append(name) - else: groups[zone] = [name] - - tags = node.extra['tags'] - for t in tags: - tag = 'tag_%s' % t - if groups.has_key(tag): groups[tag].append(name) - else: groups[tag] = [name] - - net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] - net = 'network_%s' % net - if groups.has_key(net): groups[net].append(name) - else: groups[net] = [name] - - machine_type = node.size - if groups.has_key(machine_type): groups[machine_type].append(name) - else: groups[machine_type] = [name] - - image = node.image and node.image or 'persistent_disk' - if groups.has_key(image): groups[image].append(name) - else: groups[image] = [name] - - status = node.extra['status'] - stat = 'status_%s' % status.lower() - if groups.has_key(stat): groups[stat].append(name) - else: groups[stat] = [name] - - groups["_meta"] = meta - - return groups - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -GceInventory() diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all deleted file mode 100644 index b22da00de..000000000 --- a/inventory/gce/group_vars/all +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_ssh_user: root diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py new file mode 100755 index 000000000..3403f735e --- /dev/null +++ b/inventory/gce/hosts/gce.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python2 +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +GCE external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests +Google Compute Engine via the libcloud library. Full install/configuration +instructions for the gce* modules can be found in the comments of +ansible/test/gce_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the libcloud Node object: + - gce_uuid + - gce_id + - gce_image + - gce_machine_type + - gce_private_ip + - gce_public_ip + - gce_name + - gce_description + - gce_status + - gce_zone + - gce_tags + - gce_metadata + - gce_network + +When run in --list mode, instances are grouped by the following categories: + - zone: + zone group name examples are us-central1-b, europe-west1-a, etc. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - machine type + types follow a pattern like n1-standard-4, g1-small, etc. + - running status: + group name prefixed with 'status_' (e.g. status_running, status_stopped,..) + - image: + when using an ephemeral/scratch disk, this will be set to the image name + used when creating the instance (e.g. debian-7-wheezy-v20130816). when + your instance was created with a root persistent disk it will be set to + 'persistent_disk' since there is no current way to determine the image. + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" + + Use the GCE inventory script to print out instance specific information + $ plugins/inventory/gce.py --host my_instance + +Author: Eric Johnson +Version: 0.0.1 +''' + +USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" +USER_AGENT_VERSION="v1" + +import sys +import os +import argparse +import ConfigParser + +try: + import json +except ImportError: + import simplejson as json + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + _ = Provider.GCE +except: + print("GCE inventory script requires libcloud >= 0.13") + sys.exit(1) + + +class GceInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.parse_cli_args() + self.driver = self.get_gce_driver() + + # Just display data for specific host + if self.args.host: + print self.json_format_dict(self.node_to_dict( + self.get_instance(self.args.host)), + pretty=self.args.pretty) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print(self.json_format_dict(self.group_instances(), + pretty=self.args.pretty)) + sys.exit(0) + + def get_gce_driver(self): + """Determine the GCE authorization settings and return a + libcloud driver. + """ + gce_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "gce.ini") + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'gce_service_account_email_address': '', + 'gce_service_account_pem_file_path': '', + 'gce_project_id': '', + 'libcloud_secrets': '', + }) + if 'gce' not in config.sections(): + config.add_section('gce') + config.read(gce_ini_path) + + # Attempt to get GCE params from a configuration file, if one + # exists. + secrets_path = config.get('gce', 'libcloud_secrets') + secrets_found = False + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify libcloud secrets file as " + err += "/absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + if not secrets_found: + args = [ + config.get('gce','gce_service_account_email_address'), + config.get('gce','gce_service_account_pem_file_path') + ] + kwargs = {'project': config.get('gce', 'gce_project_id')} + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + args[0] = os.environ.get('GCE_EMAIL', args[0]) + args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) + kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + + # Retrieve and return the GCE driver. + gce = get_driver(Provider.GCE)(*args, **kwargs) + gce.connection.user_agent_append( + '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), + ) + return gce + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on GCE') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') + self.args = parser.parse_args() + + + def node_to_dict(self, inst): + md = {} + + if inst is None: + return {} + + if inst.extra['metadata'].has_key('items'): + for entry in inst.extra['metadata']['items']: + md[entry['key']] = entry['value'] + + net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + return { + 'gce_uuid': inst.uuid, + 'gce_id': inst.id, + 'gce_image': inst.image, + 'gce_machine_type': inst.size, + 'gce_private_ip': inst.private_ips[0], + 'gce_public_ip': inst.public_ips[0], + 'gce_name': inst.name, + 'gce_description': inst.extra['description'], + 'gce_status': inst.extra['status'], + 'gce_zone': inst.extra['zone'].name, + 'gce_tags': inst.extra['tags'], + 'gce_metadata': md, + 'gce_network': net, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] + } + + def get_instance(self, instance_name): + '''Gets details about a specific instance ''' + try: + return self.driver.ex_get_node(instance_name) + except Exception, e: + return None + + def group_instances(self): + '''Group all instances''' + groups = {} + meta = {} + meta["hostvars"] = {} + + for node in self.driver.list_nodes(): + name = node.name + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.extra['zone'].name + if groups.has_key(zone): groups[zone].append(name) + else: groups[zone] = [name] + + tags = node.extra['tags'] + for t in tags: + tag = 'tag_%s' % t + if groups.has_key(tag): groups[tag].append(name) + else: groups[tag] = [name] + + net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] + net = 'network_%s' % net + if groups.has_key(net): groups[net].append(name) + else: groups[net] = [name] + + machine_type = node.size + if groups.has_key(machine_type): groups[machine_type].append(name) + else: groups[machine_type] = [name] + + image = node.image and node.image or 'persistent_disk' + if groups.has_key(image): groups[image].append(name) + else: groups[image] = [name] + + status = node.extra['status'] + stat = 'status_%s' % status.lower() + if groups.has_key(stat): groups[stat].append(name) + else: groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +GceInventory() diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts new file mode 100644 index 000000000..6c590ac93 --- /dev/null +++ b/inventory/gce/hosts/hosts @@ -0,0 +1 @@ +localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 diff --git a/inventory/libvirt/group_vars/all b/inventory/libvirt/group_vars/all deleted file mode 100644 index b22da00de..000000000 --- a/inventory/libvirt/group_vars/all +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_ssh_user: root diff --git a/inventory/libvirt/hosts b/inventory/libvirt/hosts deleted file mode 100644 index 6a818f268..000000000 --- a/inventory/libvirt/hosts +++ /dev/null @@ -1,2 +0,0 @@ -# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now... -localhost ansible_python_interpreter=/usr/bin/python2 diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts new file mode 100644 index 000000000..9cdc31449 --- /dev/null +++ b/inventory/libvirt/hosts/hosts @@ -0,0 +1 @@ +localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini new file mode 100644 index 000000000..62ff204dd --- /dev/null +++ b/inventory/libvirt/hosts/libvirt.ini @@ -0,0 +1,20 @@ +# Ansible libvirt external inventory script settings +# + +[libvirt] + +uri = qemu:///system + +# API calls to libvirt can be slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-libvirt.cache +# - ansible-libvirt.index +cache_path = /tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +cache_max_age = 900 + + + diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py new file mode 100755 index 000000000..0a98e2af3 --- /dev/null +++ b/inventory/libvirt/hosts/libvirt_generic.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python + +""" +libvirt external inventory script +================================= + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +To use this, copy this file over /etc/ansible/hosts and chmod +x the file. +This, more or less, allows you to keep one central database containing +info about all of your managed instances. + +""" + +# (c) 2015, Jason DeTiberus +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import argparse +import ConfigParser +import os +import re +import sys +from time import time +import libvirt +import xml.etree.ElementTree as ET + +try: + import json +except ImportError: + import simplejson as json + + +class LibvirtInventory(object): + + def __init__(self): + self.inventory = dict() # A list of groups and the hosts in that group + self.cache = dict() # Details about hosts in the inventory + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + if self.args.host: + print self.json_format_dict(self.get_host_info(), self.args.pretty) + elif self.args.list: + print self.json_format_dict(self.get_inventory(), self.args.pretty) + else: # default action with no options + print self.json_format_dict(self.get_inventory(), self.args.pretty) + + def read_settings(self): + config = ConfigParser.SafeConfigParser() + config.read( + os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini' + ) + self.libvirt_uri = config.get('libvirt', 'uri') + + def parse_cli_args(self): + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on libvirt' + ) + parser.add_argument( + '--list', + action='store_true', + default=True, + help='List instances (default: True)' + ) + parser.add_argument( + '--host', + action='store', + help='Get all the variables about a specific instance' + ) + parser.add_argument( + '--pretty', + action='store_true', + default=False, + help='Pretty format (default: False)' + ) + self.args = parser.parse_args() + + def get_host_info(self): + inventory = self.get_inventory() + if self.args.host in inventory['_meta']['hostvars']: + return inventory['_meta']['hostvars'][self.args.host] + + def get_inventory(self): + inventory = dict(_meta=dict(hostvars=dict())) + + conn = libvirt.openReadOnly(self.libvirt_uri) + if conn is None: + print "Failed to open connection to %s" % libvirt_uri + sys.exit(1) + + domains = conn.listAllDomains() + if domains is None: + print "Failed to list domains for connection %s" % libvirt_uri + sys.exit(1) + + arp_entries = self.parse_arp_entries() + + for domain in domains: + hostvars = dict(libvirt_name=domain.name(), + libvirt_id=domain.ID(), + libvirt_uuid=domain.UUIDString()) + domain_name = domain.name() + + # TODO: add support for guests that are not in a running state + state, _ = domain.state() + # 2 is the state for a running guest + if state != 1: + continue + + hostvars['libvirt_status'] = 'running' + + root = ET.fromstring(domain.XMLDesc()) + ns = {'ansible': 'https://github.com/ansible/ansible'} + for tag_elem in root.findall('./metadata/ansible:tag', ns): + tag = tag_elem.text + self.push(inventory, "tag_%s" % tag, domain_name) + self.push(hostvars, 'libvirt_tags', tag) + + # TODO: support more than one network interface, also support + # interface types other than 'network' + interface = root.find("./devices/interface[@type='network']") + if interface is not None: + mac_elem = interface.find('mac') + if mac_elem is not None: + mac = mac_elem.get('address') + if mac in arp_entries: + ip_address = arp_entries[mac]['ip_address'] + hostvars['ansible_ssh_host'] = ip_address + hostvars['libvirt_ip_address'] = ip_address + + inventory['_meta']['hostvars'][domain_name] = hostvars + + return inventory + + def parse_arp_entries(self): + arp_entries = dict() + with open('/proc/net/arp', 'r') as f: + # throw away the header + f.readline() + + for line in f: + ip_address, _, _, mac, _, device = line.strip().split() + arp_entries[mac] = dict(ip_address=ip_address, device=device) + + return arp_entries + + def push(self, my_dict, key, element): + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def json_format_dict(self, data, pretty=False): + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + +LibvirtInventory() diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml new file mode 100644 index 000000000..b8961704e --- /dev/null +++ b/playbooks/aws/openshift-cluster/config.yml @@ -0,0 +1,36 @@ +--- +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ ec2_private_ip_address }}" + openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 3561c1803..e7125ea0c 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -4,59 +4,26 @@ connection: local gather_facts: no vars_files: - - vars.yml + - vars.yml tasks: - - set_fact: k8s_type="master" - - - name: Generate master instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: master_names_output - with_sequence: start=1 end={{ num_masters }} - - # These set_fact's cannot be combined - - set_fact: - master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - master_names: "{{ master_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - - - set_fact: k8s_type="node" - - - name: Generate node instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: node_names_output - with_sequence: start=1 end={{ num_nodes }} - - # These set_fact's cannot be combined - - set_fact: - node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - node_names: "{{ node_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - -- hosts: "tag_env_{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest - -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" - -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" + - fail: + msg: Deployment type not supported for libvirt provider yet + when: deployment_type == 'enterprise' + + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + +- include: update.yml - include: list.yml diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml deleted file mode 100644 index 9d645fbe5..000000000 --- a/playbooks/aws/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- set_fact: - machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}" - machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}" - machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}" - machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" - created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" - security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" - env: "{{ cluster }}" - host_type: "{{ type }}" - env_host_type: "{{ cluster }}-openshift-{{ type }}" - -- name: Launch instance(s) - ec2: - state: present - region: "{{ machine_region }}" - keypair: "{{ machine_keypair }}" - group: "{{ security_group }}" - instance_type: "{{ machine_type }}" - image: "{{ machine_image }}" - count: "{{ instances | oo_len }}" - wait: yes - instance_tags: - created-by: "{{ created_by }}" - env: "{{ env }}" - host-type: "{{ host_type }}" - env-host-type: "{{ env_host_type }}" - register: ec2 - -- name: Add Name tag to instances - ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present - with_together: - - instances - - ec2.instances - args: - tags: - Name: "{{ item.0 }}" - -- set_fact: - instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} - -- name: Add new instances groups and variables - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - groups: "{{ instance_groups }}" - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - with_together: - - instances - - ec2.instances - -- name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" - with_items: ec2.instances - -- name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: ec2.instances diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 08e9e2df4..5c04bc320 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -2,16 +2,23 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + vars_files: + - vars.yml tasks: - set_fact: scratch_group=tag_env_{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all - when: scratch_group is not defined - - add_host: name={{ item }} groups=oo_list_hosts - with_items: groups[scratch_group] | difference(['localhost']) + when: cluster_id == '' + - add_host: + name: "{{ item }}" + groups: oo_list_hosts + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) - name: List Hosts hosts: oo_list_hosts gather_facts: no tasks: - - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}" + - debug: + msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}" diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..58b4082df --- /dev/null +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,69 @@ +--- +# TODO: modify machine_image based on deployment_type +- set_fact: + machine_type: "{{ lookup('env', 'ec2_instance_type') | default('m3.large', true) }}" + machine_image: "{{ lookup('env', 'ec2_ami') | default(deployment_vars[deployment_type].image, true) }}" + machine_region: "{{ lookup('env', 'ec2_region') | default(deployment_vars[deployment_type].region, true) }}" + machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" + created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" + security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" + env: "{{ cluster }}" + host_type: "{{ type }}" + env_host_type: "{{ cluster }}-openshift-{{ type }}" + +- name: Launch instance(s) + ec2: + state: present + region: "{{ machine_region }}" + keypair: "{{ machine_keypair }}" + group: "{{ security_group }}" + instance_type: "{{ machine_type }}" + image: "{{ machine_image }}" + count: "{{ instances | oo_len }}" + wait: yes + instance_tags: + created-by: "{{ created_by }}" + env: "{{ env }}" + host-type: "{{ host_type }}" + env-host-type: "{{ env_host_type }}" + deployment-type: "{{ deployment_type }}" + register: ec2 + +- name: Add Name tag to instances + ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present + with_together: + - instances + - ec2.instances + args: + tags: + Name: "{{ item.0 }}" + +- set_fact: + instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, tag_deployment-type_{{ deployment_type }} + +- name: Add new instances groups and variables + add_host: + hostname: "{{ item.0 }}" + ansible_ssh_host: "{{ item.1.dns_name }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: "{{ instance_groups }}" + ec2_private_ip_address: "{{ item.1.private_ip }}" + ec2_ip_address: "{{ item.1.public_ip }}" + with_together: + - instances + - ec2.instances + +- name: Wait for ssh + wait_for: "port=22 host={{ item.dns_name }}" + with_items: ec2.instances + +- name: Wait for user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_together: + - instances + - ec2.instances diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 39607633a..1d2b60594 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -1,14 +1,26 @@ --- - name: Terminate instance(s) hosts: localhost - + gather_facts: no vars_files: - - vars.yml + - vars.yml + tasks: + - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-node + - add_host: + name: "{{ item }}" + groups: oo_nodes_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) + + - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-master + - add_host: + name: "{{ item }}" + groups: oo_masters_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) - include: ../openshift-node/terminate.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]' - include: ../openshift-master/terminate.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]' diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index 90ecdc6ab..5e7ab4e58 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,13 +1,18 @@ --- -- hosts: "tag_env_{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([]) -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" +- include: ../../common/openshift-cluster/update_repos_and_packages.yml -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" +- include: config.yml diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index ed97d539c..f0df3d6f5 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1 +1,20 @@ --- +deployment_vars: + origin: + # fedora, since centos requires marketplace + image: ami-acd999c4 + region: us-east-1 + ssh_user: fedora + sudo: yes + online: + # private ami + image: ami-307b3658 + region: us-east-1 + ssh_user: root + sudo: no + enterprise: + # rhel-7.1, requires cloud access subscription + image: ami-10663b78 + region: us-east-1 + ssh_user: ec2-user + sudo: yes diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index 1c4060eee..37ab4fbe6 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -1,24 +1,19 @@ --- -- name: Populate oo_masters_to_config host group if needed +- name: Populate oo_masters_to_config host group hosts: localhost gather_facts: no tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_masters_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) -- name: Configure instances - hosts: oo_masters_to_config +- include: ../../common/openshift-master/config.yml vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" - # TODO: this should be removed once openshift-sdn packages are available - openshift_use_openshift_sdn: False - vars_files: - - vars.yml - roles: - - openshift_master - #- openshift_sdn_master - - pods - - os_env_extras diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml index 3d87879a0..6b3751682 100644 --- a/playbooks/aws/openshift-master/launch.yml +++ b/playbooks/aws/openshift-master/launch.yml @@ -4,14 +4,12 @@ connection: local gather_facts: no +# TODO: modify atomic_ami based on deployment_type vars: inst_region: us-east-1 atomic_ami: ami-86781fee user_data_file: user_data.txt - vars_files: - - vars.yml - tasks: - name: Launch instances ec2: @@ -40,7 +38,7 @@ Name: "{{ item.0 }}" - name: Add other tags to instances - ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" + ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present with_items: ec2.instances args: tags: "{{ oo_new_inst_tags }}" @@ -57,7 +55,7 @@ - ec2.instances - name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" + wait_for: port=22 host={{ item.dns_name }} with_items: ec2.instances - name: Wait for root user setup diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml index fd15cf00f..a790336b1 100644 --- a/playbooks/aws/openshift-master/terminate.yml +++ b/playbooks/aws/openshift-master/terminate.yml @@ -1,15 +1,15 @@ --- -- name: Populate oo_masters_to_terminate host group if needed +- name: Populate oo_masters_to_terminate host group hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_masters_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_terminate + add_host: name={{ item }} groups=oo_masters_to_terminate + with_items: oo_host_group_exp | default([]) -- name: Gather facts for instances to terminate +- name: Gather dynamic inventory variables for hosts to terminate hosts: oo_masters_to_terminate + gather_facts: no - name: Terminate instances hosts: localhost @@ -27,11 +27,12 @@ ignore_errors: yes register: ec2_term with_items: host_vars + when: "'oo_masters_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + when: "'oo_masters_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" with_items: ec2_term.results - name: Stop instance if termination failed @@ -42,6 +43,7 @@ register: ec2_stop when: item.failed with_items: ec2_term.results + when: "'oo_masters_to_terminate' in groups" - name: Rename stopped instances ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present @@ -49,4 +51,5 @@ tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" with_items: ec2_stop.results + when: "'oo_masters_to_terminate' in groups" diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/aws/openshift-master/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index b08ed7571..fc9b397b4 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -1,107 +1,25 @@ --- -- name: Populate oo_nodes_to_config host group if needed +- name: Populate oo_nodes_to_config and oo_first_master host groups hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_nodes_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - - add_host: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) + - name: Evaluate oo_first_master + add_host: name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" groups: oo_first_master - when: oo_host_group_exp is defined + ansible_ssh_user: root -- name: Gather and set facts for hosts to configure - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ ec2_private_ip_address }}" - public_hostname: "{{ ec2_ip_address }}" - # TODO: this should be removed once openshift-sdn packages are available - use_openshift_sdn: False - - role: node - local_facts: - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" - - -- name: Register nodes - hosts: oo_first_master - vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - - -- name: Configure instances - hosts: oo_nodes_to_config - vars_files: - - vars.yml +- include: ../../common/openshift-node/config.yml vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" - sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - #- openshift_sdn_node - - os_env_extras - - os_env_extras_node diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml index b7ef593e7..36aee14ff 100644 --- a/playbooks/aws/openshift-node/launch.yml +++ b/playbooks/aws/openshift-node/launch.yml @@ -4,14 +4,12 @@ connection: local gather_facts: no +# TODO: modify atomic_ami based on deployment_type vars: inst_region: us-east-1 atomic_ami: ami-86781fee user_data_file: user_data.txt - vars_files: - - vars.yml - tasks: - name: Launch instances ec2: @@ -33,7 +31,7 @@ with_items: ec2.instances - name: Add Name and environment tags to instances - ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present" + ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present with_together: - oo_new_inst_names - ec2.instances @@ -42,7 +40,7 @@ Name: "{{ item.0 }}" - name: Add other tags to instances - ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" + ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present with_items: ec2.instances args: tags: "{{ oo_new_inst_tags }}" @@ -59,7 +57,7 @@ - ec2.instances - name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" + wait_for: port=22 host={{ item.dns_name }} with_items: ec2.instances - name: Wait for root user setup diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml index 1c0c77eb7..40ae56f99 100644 --- a/playbooks/aws/openshift-node/terminate.yml +++ b/playbooks/aws/openshift-node/terminate.yml @@ -1,15 +1,15 @@ --- -- name: Populate oo_nodes_to_terminate host group if needed +- name: Populate oo_nodes_to_terminate host group hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_nodes_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_nodes_to_terminate + add_host: name={{ item }} groups=oo_nodes_to_terminate + with_items: oo_host_group_exp | default([]) -- name: Gather facts for instances to terminate +- name: Gather dynamic inventory variables for hosts to terminate hosts: oo_nodes_to_terminate + gather_facts: no - name: Terminate instances hosts: localhost @@ -27,11 +27,12 @@ ignore_errors: yes register: ec2_term with_items: host_vars + when: "'oo_nodes_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + when: "'oo_nodes_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" with_items: ec2_term.results - name: Stop instance if termination failed @@ -42,6 +43,7 @@ register: ec2_stop when: item.failed with_items: ec2_term.results + when: "'oo_nodes_to_terminate' in groups" - name: Rename stopped instances ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present @@ -49,4 +51,5 @@ tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" with_items: ec2_stop.results + when: "'oo_nodes_to_terminate' in groups" diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/aws/openshift-node/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml index 706f9285c..f61d277c6 100644 --- a/playbooks/byo/openshift-master/config.yml +++ b/playbooks/byo/openshift-master/config.yml @@ -1,9 +1,15 @@ --- -- name: Gather facts for node hosts - hosts: nodes +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + tasks: + - add_host: + name: "{{ item }}" + groups: oo_masters_to_config + with_items: groups['masters'] -- name: Configure master instances - hosts: masters - roles: - - openshift_master - - openshift_sdn_master +- include: ../../common/openshift-master/config.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml index 69ad7a840..d569827b4 100644 --- a/playbooks/byo/openshift-node/config.yml +++ b/playbooks/byo/openshift-node/config.yml @@ -1,79 +1,21 @@ --- -- name: Gather facts for node hosts - hosts: nodes - roles: - - openshift_facts +- name: Populate oo_nodes_to_config and oo_first_master host groups + hosts: localhost + gather_facts: no tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: 'node' - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + with_items: groups.nodes + - name: Evaluate oo_first_master + add_host: + name: "{{ groups.masters[0] }}" + groups: oo_first_master -- name: Register nodes - hosts: masters[0] +- include: ../../common/openshift-node/config.yml vars: - openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - - -- name: Configure node instances - hosts: nodes - vars: - sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001 - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - - openshift_sdn_node + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml new file mode 100644 index 000000000..cd282270f --- /dev/null +++ b/playbooks/byo/openshift_facts.yml @@ -0,0 +1,10 @@ +--- +- name: Gather OpenShift facts + hosts: all + gather_facts: no + roles: + - openshift_facts + tasks: + - openshift_facts: + register: result + - debug: var=result diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml new file mode 100644 index 000000000..14ffa928f --- /dev/null +++ b/playbooks/common/openshift-cluster/config.yml @@ -0,0 +1,4 @@ +--- +- include: ../openshift-master/config.yml + +- include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cluster/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/roles b/playbooks/common/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml new file mode 100644 index 000000000..118727273 --- /dev/null +++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml @@ -0,0 +1,11 @@ +--- +- set_fact: k8s_type="master" + +- name: Generate master instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: master_names_output + with_sequence: start=1 end={{ num_masters }} + +- set_fact: + master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml new file mode 100644 index 000000000..162315d46 --- /dev/null +++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml @@ -0,0 +1,11 @@ +--- +- set_fact: k8s_type="node" + +- name: Generate node instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: node_names_output + with_sequence: start=1 end={{ num_nodes }} + +- set_fact: + node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml new file mode 100644 index 000000000..e92c6f1ee --- /dev/null +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -0,0 +1,7 @@ +--- +- hosts: oo_hosts_to_update + vars: + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_repos + - os_update_latest diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml new file mode 100644 index 000000000..05822d118 --- /dev/null +++ b/playbooks/common/openshift-master/config.yml @@ -0,0 +1,19 @@ +--- +- name: Configure master instances + hosts: oo_masters_to_config + vars: + openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001 + roles: + - openshift_master + - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool } + tasks: + - name: Create group for deployment type + group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False + +# Additional instance config for online deployments +- name: Additional instance config + hosts: oo_masters_deployment_type_online + roles: + - pods + - os_env_extras diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-master/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-master/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml new file mode 100644 index 000000000..c82d69c28 --- /dev/null +++ b/playbooks/common/openshift-node/config.yml @@ -0,0 +1,121 @@ +--- +- name: Gather and set facts for node hosts + hosts: oo_nodes_to_config + roles: + - openshift_facts + tasks: + # Since the master is registering the nodes before they are configured, we + # need to make sure to set the node properties beforehand if we do not want + # the defaults + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + - role: node + local_facts: + external_id: "{{ openshift_node_external_id | default(None) }}" + resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}" + resources_memory: "{{ openshift_node_resources_memory | default(None) }}" + pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}" + labels: "{{ openshift_node_labels | default(None) }}" + annotations: "{{ openshift_node_annotations | default(None) }}" + deployment_type: "{{ openshift_deployment_type }}" + + +- name: Create temp directory for syncing certs + hosts: localhost + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: mktemp + changed_when: False + + +- name: Register nodes + hosts: oo_first_master + vars: + openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + roles: + - openshift_register_nodes + tasks: + - name: Create the temp directory on the master + file: + path: "{{ sync_tmpdir }}" + owner: "{{ ansible_ssh_user }}" + mode: 0700 + state: directory + changed_when: False + + - name: Create a tarball of the node config directories + command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./ + args: + chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}" + with_items: openshift_nodes + changed_when: False + + - name: Retrieve the node config tarballs from the master + fetch: + src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: openshift_nodes + changed_when: False + + - name: Remove the temp directory on the master + file: + path: "{{ sync_tmpdir }}" + state: absent + changed_when: False + + +- name: Configure node instances + hosts: oo_nodes_to_config + gather_facts: no + vars: + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001" + pre_tasks: + - name: Ensure certificate directory exists + file: + path: "{{ openshift_node_cert_dir }}" + state: directory + + # TODO: notify restart openshift-node and/or restart openshift-sdn-node, + # possibly test service started time against certificate/config file + # timestamps in openshift-node or openshift-sdn-node to trigger notify + - name: Unarchive the tarball on the node + unarchive: + src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz" + dest: "{{ openshift_node_cert_dir }}" + roles: + - openshift_node + - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool } + tasks: + - name: Create group for deployment type + group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False + + +- name: Delete temporary directory + hosts: localhost + gather_facts: no + tasks: + - file: name={{ mktemp.stdout }} state=absent + changed_when: False + + +# Additional config for online type deployments +- name: Additional instance config + hosts: oo_nodes_deployment_type_online + gather_facts: no + roles: + - os_env_extras + - os_env_extras_node diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-node/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-node/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml new file mode 100644 index 000000000..8b8490246 --- /dev/null +++ b/playbooks/gce/openshift-cluster/config.yml @@ -0,0 +1,37 @@ +--- +# TODO: fix firewall related bug with GCE and origin, since GCE is overriding +# /etc/sysconfig/iptables +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 14cdd2537..34a5a0b94 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -4,59 +4,25 @@ connection: local gather_facts: no vars_files: - - vars.yml + - vars.yml tasks: - - set_fact: k8s_type="master" - - - name: Generate master instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: master_names_output - with_sequence: start=1 end={{ num_masters }} - - # These set_fact's cannot be combined - - set_fact: - master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - master_names: "{{ master_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - - - set_fact: k8s_type="node" - - - name: Generate node instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: node_names_output - with_sequence: start=1 end={{ num_nodes }} - - # These set_fact's cannot be combined - - set_fact: - node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - node_names: "{{ node_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - -- hosts: "tag_env-{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest - -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" - -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" + - fail: msg="Deployment type not supported for libvirt provider yet" + when: deployment_type == 'enterprise' + + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + +- include: update.yml - include: list.yml diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml deleted file mode 100644 index b4f33bd87..000000000 --- a/playbooks/gce/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# TODO: when we are ready to go to ansible 1.9+ support only, we can update to -# the gce task to use the disk_auto_delete parameter to avoid having to delete -# the disk as a separate step on termination - -- set_fact: - machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}" - machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}" - -- name: Launch instance(s) - gce: - instance_names: "{{ instances }}" - machine_type: "{{ machine_type }}" - image: "{{ machine_image }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - tags: - - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}" - - "env-{{ cluster }}" - - "host-type-{{ type }}" - - "env-host-type-{{ cluster }}-openshift-{{ type }}" - register: gce - -- name: Add new instances to groups and set variables needed - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" - groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" - gce_public_ip: "{{ item.public_ip }}" - gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data - -- name: Wait for ssh - wait_for: "port=22 host={{ item.public_ip }}" - with_items: gce.instance_data - -- name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 1124b0ea3..bab2fb9f8 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -2,16 +2,23 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + vars_files: + - vars.yml tasks: - set_fact: scratch_group=tag_env-{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all - when: scratch_group is not defined - - add_host: name={{ item }} groups=oo_list_hosts - with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated) + when: cluster_id == '' + - add_host: + name: "{{ item }}" + groups: oo_list_hosts + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - name: List Hosts hosts: oo_list_hosts gather_facts: no tasks: - - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}" + - debug: + msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..a68edefae --- /dev/null +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,42 @@ +--- +# TODO: when we are ready to go to ansible 1.9+ support only, we can update to +# the gce task to use the disk_auto_delete parameter to avoid having to delete +# the disk as a separate step on termination +- name: Launch instance(s) + gce: + instance_names: "{{ instances }}" + machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}" + image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}" + service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + project_id: "{{ lookup('env', 'gce_project_id') }}" + tags: + - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} + - env-{{ cluster }} + - host-type-{{ type }} + - env-host-type-{{ cluster }}-openshift-{{ type }} + - deployment-type-{{ deployment_type }} + register: gce + +- name: Add new instances to groups and set variables needed + add_host: + hostname: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" + gce_public_ip: "{{ item.public_ip }}" + gce_private_ip: "{{ item.private_ip }}" + with_items: gce.instance_data + +- name: Wait for ssh + wait_for: port=22 host={{ item.public_ip }} + with_items: gce.instance_data + +- name: Wait for user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 0281ae953..abe6a4c95 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,20 +1,34 @@ --- - name: Terminate instance(s) hosts: localhost - + gather_facts: no vars_files: - - vars.yml + - vars.yml + tasks: + - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node + - add_host: + name: "{{ item }}" + groups: oo_nodes_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + + - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master + - add_host: + name: "{{ item }}" + groups: oo_masters_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - include: ../openshift-node/terminate.yml vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" gce_project_id: "{{ lookup('env', 'gce_project_id') }}" - include: ../openshift-master/terminate.yml vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]' gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 973e4c3ef..9ebf39a13 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,13 +1,18 @@ --- -- hosts: "tag_env-{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([]) -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" +- include: ../../common/openshift-cluster/update_repos_and_packages.yml -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" +- include: config.yml diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index ed97d539c..ae33083b9 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1 +1,15 @@ --- +deployment_vars: + origin: + image: centos-7 + ssh_user: + sudo: yes + online: + image: libra-rhel7 + ssh_user: root + sudo: no + enterprise: + image: rhel-7 + ssh_user: + sudo: yes + diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml index 857da0763..af6000bc8 100644 --- a/playbooks/gce/openshift-master/config.yml +++ b/playbooks/gce/openshift-master/config.yml @@ -1,20 +1,18 @@ --- -- name: master/config.yml, populate oo_masters_to_config host group if needed +- name: Populate oo_masters_to_config host group hosts: localhost gather_facts: no tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_masters_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) -- name: "Configure instances" - hosts: oo_masters_to_config +- include: ../../common/openshift-master/config.yml vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" - vars_files: - - vars.yml - roles: - - openshift_master - - pods - - os_env_extras diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml index 287596002..ef10b6cf0 100644 --- a/playbooks/gce/openshift-master/launch.yml +++ b/playbooks/gce/openshift-master/launch.yml @@ -8,14 +8,12 @@ connection: local gather_facts: no +# TODO: modify image based on deployment_type vars: inst_names: "{{ oo_new_inst_names }}" machine_type: n1-standard-1 image: libra-rhel7 - vars_files: - - vars.yml - tasks: - name: Launch instances gce: @@ -37,7 +35,7 @@ with_items: gce.instance_data - name: Wait for ssh - wait_for: "port=22 host={{ item.public_ip }}" + wait_for: port=22 host={{ item.public_ip }} with_items: gce.instance_data - name: Wait for root user setup diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml index 8319774f8..452ac5199 100644 --- a/playbooks/gce/openshift-master/terminate.yml +++ b/playbooks/gce/openshift-master/terminate.yml @@ -3,10 +3,9 @@ hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_masters_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_terminate + add_host: name={{ item }} groups=oo_masters_to_terminate + with_items: oo_host_group_exp | default([]) - name: Terminate master instances hosts: localhost @@ -22,6 +21,7 @@ instance_names: "{{ groups['oo_masters_to_terminate'] }}" disks: "{{ groups['oo_masters_to_terminate'] }}" register: gce + when: "'oo_masters_to_terminate' in groups" - name: Remove disks of instances gce_pd: @@ -32,5 +32,4 @@ zone: "{{ gce.zone }}" state: absent with_items: gce.instance_names - - + when: "'oo_masters_to_terminate' in groups" diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/gce/openshift-master/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 771cc3a94..5b1601176 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -1,100 +1,24 @@ --- -- name: node/config.yml, populate oo_nodes_to_config host group if needed +- name: Populate oo_nodes_to_config and oo_first_master host groups hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_nodes_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - - add_host: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) + - name: Evaluate oo_first_master + add_host: name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" groups: oo_first_master - when: oo_host_group_exp is defined + ansible_ssh_user: root -- name: Gather and set facts for hosts to configure - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ gce_private_ip }}" - - role: node - local_facts: - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" - - -- name: Register nodes - hosts: oo_first_master - vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - -- name: Configure instances - hosts: oo_nodes_to_config - vars_files: - - vars.yml +- include: ../../common/openshift-node/config.yml vars: - sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - - os_env_extras - - os_env_extras_node + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml index 73d0478ab..086ba58bc 100644 --- a/playbooks/gce/openshift-node/launch.yml +++ b/playbooks/gce/openshift-node/launch.yml @@ -8,14 +8,12 @@ connection: local gather_facts: no +# TODO: modify image based on deployment_type vars: inst_names: "{{ oo_new_inst_names }}" machine_type: n1-standard-1 image: libra-rhel7 - vars_files: - - vars.yml - tasks: - name: Launch instances gce: @@ -37,7 +35,7 @@ with_items: gce.instance_data - name: Wait for ssh - wait_for: "port=22 host={{ item.public_ip }}" + wait_for: port=22 host={{ item.public_ip }} with_items: gce.instance_data - name: Wait for root user setup diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml index 7d71dfcab..357e0c295 100644 --- a/playbooks/gce/openshift-node/terminate.yml +++ b/playbooks/gce/openshift-node/terminate.yml @@ -3,10 +3,9 @@ hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_nodes_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_nodes_to_terminate + add_host: name={{ item }} groups=oo_nodes_to_terminate + with_items: oo_host_group_exp | default([]) - name: Terminate node instances hosts: localhost @@ -22,6 +21,7 @@ instance_names: "{{ groups['oo_nodes_to_terminate'] }}" disks: "{{ groups['oo_nodes_to_terminate'] }}" register: gce + when: "'oo_nodes_to_terminate' in groups" - name: Remove disks of instances gce_pd: @@ -32,5 +32,4 @@ zone: "{{ gce.zone }}" state: absent with_items: gce.instance_names - - + when: "'oo_nodes_to_terminate' in groups" diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/gce/openshift-node/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml new file mode 100644 index 000000000..faf278b10 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -0,0 +1,38 @@ +--- +# TODO: need to figure out a plan for setting hostname, currently the default +# is localhost, so no hostname value (or public_hostname) value is getting +# assigned + +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_masters_to_config + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_nodes_to_config + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_first_master + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 6f2df33af..a7ddc1e7e 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -1,65 +1,36 @@ +--- - name: Launch instance(s) hosts: localhost - connection: local gather_facts: no - - vars: - libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift" - libvirt_storage_pool: 'openshift' - libvirt_uri: 'qemu:///system' - vars_files: - - vars.yml - + - vars.yml + vars: + os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}" + os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}" + os_libvirt_network: "{{ libvirt_network | default('default') }}" + image_url: "{{ deployment_vars[deployment_type].image.url }}" + image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" + image_name: "{{ deployment_vars[deployment_type].image.name }}" tasks: - - set_fact: - k8s_type: master - - - name: Generate master instance name(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}" - register: master_names_output - with_sequence: start=1 end='{{ num_masters }}' + - fail: msg="Deployment type not supported for libvirt provider yet" + when: deployment_type in ['online', 'enterprise'] - - set_fact: - master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" + - include: tasks/configure_libvirt.yml - - include: launch_instances.yml - vars: - instances: '{{ master_names }}' - cluster: '{{ cluster_id }}' - type: '{{ k8s_type }}' - group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master' + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" - - set_fact: - k8s_type: node + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" - - name: Generate node instance name(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}" - register: node_names_output - with_sequence: start=1 end='{{ num_nodes }}' +- include: update.yml - - set_fact: - node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" - - - include: launch_instances.yml - vars: - instances: '{{ node_names }}' - cluster: '{{ cluster_id }}' - type: '{{ k8s_type }}' - -- hosts: 'tag_env-{{ cluster_id }}' - roles: - - openshift_repos - - os_update_latest - -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]' - oo_env: '{{ cluster_id }}' - -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' - oo_env: '{{ cluster_id }}' +- include: list.yml diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/launch_instances.yml deleted file mode 100644 index 3bbcae981..000000000 --- a/playbooks/libvirt/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,102 +0,0 @@ -- name: Create the libvirt storage directory for openshift - file: - dest: '{{ libvirt_storage_pool_path }}' - state: directory - -- name: Download Base Cloud image - get_url: - url: '{{ base_image_url }}' - sha256sum: '{{ base_image_sha256 }}' - dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}' - -- name: Create the cloud-init config drive path - file: - dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest' - state: directory - with_items: '{{ instances }}' - -- name: Create the cloud-init config drive files - template: - src: '{{ item[1] }}' - dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}' - with_nested: - - '{{ instances }}' - - [ user-data, meta-data ] - -- name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' - args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' - with_items: '{{ instances }}' - -- name: Create the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' - ignore_errors: yes - -- name: Refresh the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - -- name: Create VMs drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2' - with_items: '{{ instances }}' - -- name: Create VMs - virt: - name: '{{ item }}' - command: define - xml: "{{ lookup('template', '../templates/domain.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Start VMs - virt: - name: '{{ item }}' - state: running - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Collect MAC addresses of the VMs - shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -' - register: scratch_mac - with_items: '{{ instances }}' - -- name: Wait for the VMs to get an IP - command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp" - ignore_errors: yes - register: nb_allocated_ips - until: nb_allocated_ips.stdout == '{{ instances | length }}' - retries: 30 - delay: 1 - -- name: Collect IP addresses of the VMs - shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp" - register: scratch_ip - with_items: '{{ scratch_mac.results }}' - -- set_fact: - ips: "{{ scratch_ip.results | oo_collect('stdout') }}" - -- name: Add new instances - add_host: - hostname: '{{ item.0 }}' - ansible_ssh_host: '{{ item.1 }}' - ansible_ssh_user: root - groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}' - with_together: - - instances - - ips - -- name: Wait for ssh - wait_for: - host: '{{ item }}' - port: 22 - with_items: ips - -- name: Wait for root user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_items: ips diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 6bf07e3c6..25a25f791 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -1,43 +1,23 @@ +--- - name: Generate oo_list_hosts group hosts: localhost - connection: local gather_facts: no - - vars: - libvirt_uri: 'qemu:///system' - + vars_files: + - vars.yml tasks: - - name: List VMs - virt: - command: list_vms - register: list_vms - - - name: Collect MAC addresses of the VMs - shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -' - register: scratch_mac - with_items: '{{ list_vms.list_vms }}' - when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' - - - name: Collect IP addresses of the VMs - shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp" - register: scratch_ip - with_items: '{{ scratch_mac.results }}' - when: item.skipped is not defined - - - name: Add hosts - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[1].stdout }}' - ansible_ssh_user: root - groups: oo_list_hosts - with_together: - - '{{ list_vms.list_vms }}' - - '{{ scratch_ip.results }}' - when: item[1].skipped is not defined + - set_fact: scratch_group=tag_env-{{ cluster_id }} + when: cluster_id != '' + - set_fact: scratch_group=all + when: cluster_id == '' + - add_host: + name: "{{ item }}" + groups: oo_list_hosts + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) - name: List Hosts hosts: oo_list_hosts - tasks: - - debug: - msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}' + - debug: + msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}' diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml new file mode 100644 index 000000000..f237c1a60 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml @@ -0,0 +1,6 @@ +--- +- include: configure_libvirt_storage_pool.yml + when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined + +- include: configure_libvirt_network.yml + when: libvirt_network is defined diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml new file mode 100644 index 000000000..1cd83f7be --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml @@ -0,0 +1,27 @@ +--- +- name: Test if libvirt network for openshift already exists + command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}" + register: net_info_result + changed_when: False + failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr" + +- name: Create a temp directory for the template xml file + command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX" + register: mktemp + when: net_info_result.rc == 1 + +- name: Create network xml file + template: + src: templates/network.xml + dest: "{{ mktemp.stdout }}/network.xml" + when: net_info_result.rc == 1 + +- name: Create libvirt network for openshift + command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml" + when: net_info_result.rc == 1 + +- name: Remove the temp directory + file: + path: "{{ mktemp.stdout }}" + state: absent + when: net_info_result.rc == 1 diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml new file mode 100644 index 000000000..817acb250 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -0,0 +1,27 @@ +--- +- name: Create libvirt storage directory for openshift + file: + dest: "{{ libvirt_storage_pool_path }}" + state: directory + +- acl: + default: yes + entity: kvm + etype: group + name: "{{ libvirt_storage_pool_path }}" + permissions: rwx + state: present + +- name: Test if libvirt storage pool for openshift already exists + command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}" + register: pool_info_result + changed_when: False + failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr" + +- name: Create the libvirt storage pool for openshift + command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' + when: pool_info_result.rc == 1 + +- name: Refresh the libvirt storage pool for openshift + command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' + when: pool_info_result.rc == 1 diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..96d440096 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,104 @@ +--- +# TODO: Add support for choosing base image based on deployment_type and os +# wanted (os wanted needs support added in bin/cluster with sane defaults: +# fedora/centos for origin, rhel for online/enterprise) + +# TODO: create a role to encapsulate some of this complexity, possibly also +# create a module to manage the storage tasks, network tasks, and possibly +# even handle the libvirt tasks to set metadata in the domain xml and be able +# to create/query data about vms without having to use xml the python libvirt +# bindings look like a good candidate for this + +- name: Download Base Cloud image + get_url: + url: '{{ image_url }}' + sha256sum: '{{ image_sha256 }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + +- name: Create the cloud-init config drive path + file: + dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' + state: directory + with_items: instances + +- name: Create the cloud-init config drive files + template: + src: '{{ item[1] }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' + with_nested: + - instances + - [ user-data, meta-data ] + +- name: Create the cloud-init config drive + command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' + args: + chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' + creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + with_items: instances + +- name: Create VMs drives + command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' + with_items: instances + +- name: Create VMs + virt: + name: '{{ item }}' + command: define + xml: "{{ lookup('template', '../templates/domain.xml') }}" + uri: '{{ libvirt_uri }}' + with_items: instances + +- name: Start VMs + virt: + name: '{{ item }}' + state: running + uri: '{{ libvirt_uri }}' + with_items: instances + +- name: Collect MAC addresses of the VMs + shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -' + register: scratch_mac + with_items: instances + +- name: Wait for the VMs to get an IP + command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp" + ignore_errors: yes + register: nb_allocated_ips + until: nb_allocated_ips.stdout == '{{ instances | length }}' + retries: 30 + delay: 1 + +- name: Collect IP addresses of the VMs + shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp" + register: scratch_ip + with_items: scratch_mac.results + +- set_fact: + ips: "{{ scratch_ip.results | oo_collect('stdout') }}" + +- name: Add new instances + add_host: + hostname: '{{ item.0 }}' + ansible_ssh_host: '{{ item.1 }}' + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}' + with_together: + - instances + - ips + +- name: Wait for ssh + wait_for: + host: '{{ item }}' + port: 22 + with_items: ips + +- name: Wait for openshift user setup + command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup' + register: result + until: result.rc == 0 + retries: 30 + delay: 1 + with_together: + - instances + - ips diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml new file mode 100644 index 000000000..8cb017367 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -0,0 +1,67 @@ + + {{ item }} + 1 + + deployment-type-{{ deployment_type }} + env-{{ cluster }} + env-host-type-{{ cluster }}-openshift-{{ type }} + host-type-{{ type }} + + 1 + 2 + + hvm + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/qemu-system-x86_64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data new file mode 100644 index 000000000..6b421770d --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/meta-data @@ -0,0 +1,3 @@ +instance-id: {{ item[0] }} +hostname: {{ item[0] }} +local-hostname: {{ item[0] }}.example.com diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml new file mode 100644 index 000000000..86dcd62bb --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/network.xml @@ -0,0 +1,23 @@ + + openshift-ansible + + + + + + + + + + + + + + + + + + + + + diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data new file mode 100644 index 000000000..77b788109 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -0,0 +1,23 @@ +#cloud-config +disable_root: true + +hostname: {{ item[0] }} +fqdn: {{ item[0] }}.example.com +manage_etc_hosts: true + +users: + - default + - name: root + ssh_authorized_keys: + - {{ lookup('file', '~/.ssh/id_rsa.pub') }} + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +ssh_authorized_keys: + - {{ lookup('file', '~/.ssh/id_rsa.pub') }} + +bootcmd: + - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index c609169d3..b173a09dd 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -1,41 +1,44 @@ +--- +# TODO: does not handle a non-existant cluster gracefully + - name: Terminate instance(s) hosts: localhost - connection: local gather_facts: no + vars_files: + - vars.yml + tasks: + - set_fact: cluster_group=tag_env-{{ cluster_id }} + - add_host: + name: "{{ item }}" + groups: oo_hosts_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[cluster_group] | default([]) - vars: - libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift" - libvirt_storage_pool: 'openshift' - libvirt_uri: 'qemu:///system' + - name: Destroy VMs + virt: + name: '{{ item[0] }}' + command: '{{ item[1] }}' + uri: '{{ libvirt_uri }}' + with_nested: + - groups['oo_hosts_to_terminate'] + - [ destroy, undefine ] - tasks: - - name: List VMs - virt: - command: list_vms - register: list_vms + - name: Delete VMs drives + command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2' + args: + removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2' + with_items: groups['oo_hosts_to_terminate'] - - name: Destroy VMs - virt: - name: '{{ item[0] }}' - command: '{{ item[1] }}' - uri: '{{ libvirt_uri }}' - with_nested: - - '{{ list_vms.list_vms }}' - - [ destroy, undefine ] - when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' + - name: Delete the VM cloud-init image + file: + path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + state: absent + with_items: groups['oo_hosts_to_terminate'] - - name: Delete VMs config drive - file: - path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack' - state: absent - with_items: '{{ list_vms.list_vms }}' - when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' + - name: Remove the cloud-init config directory + file: + path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' + state: absent + with_items: groups['oo_hosts_to_terminate'] - - name: Delete VMs drives - command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}' - args: - removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}' - with_nested: - - '{{ list_vms.list_vms }}' - - [ '_configdrive', '_cloud-init.iso', '.qcow2' ] - when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml new file mode 100644 index 000000000..57e36db9e --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -0,0 +1,18 @@ +--- +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([]) + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- include: config.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index 4e4eecd46..65d954fee 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -1,7 +1,33 @@ -# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2 -# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2 -# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86 +--- +libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible" +libvirt_storage_pool: 'openshift-ansible' +libvirt_network: openshift-ansible +libvirt_uri: 'qemu:///system' -base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 -base_image_name: CentOS-7-x86_64-GenericCloud.qcow2 -base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab +deployment_vars: + origin: + image: + url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2" + name: CentOS-7-x86_64-GenericCloud.qcow2 + sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab + ssh_user: openshift + sudo: yes + online: + image: + url: + name: + sha256: + ssh_user: root + sudo: no + enterprise: + image: + url: + name: + sha256: + ssh_user: openshift + sudo: yes +# origin: +# fedora: +# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2" +# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2 +# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86 diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml deleted file mode 100644 index dd95fd57f..000000000 --- a/playbooks/libvirt/openshift-master/config.yml +++ /dev/null @@ -1,21 +0,0 @@ -- name: master/config.yml, populate oo_masters_to_config host group if needed - hosts: localhost - gather_facts: no - tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: - name: '{{ item }}' - groups: oo_masters_to_config - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - -- name: Configure instances - hosts: oo_masters_to_config - vars: - openshift_hostname: '{{ ansible_default_ipv4.address }}' - vars_files: - - vars.yml - roles: - - openshift_master - - pods - - os_env_extras diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/libvirt/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/libvirt/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/libvirt/openshift-master/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/libvirt/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml deleted file mode 100644 index ad0c0fbe2..000000000 --- a/playbooks/libvirt/openshift-master/vars.yml +++ /dev/null @@ -1 +0,0 @@ -openshift_debug_level: 4 diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml deleted file mode 100644 index 3244a8046..000000000 --- a/playbooks/libvirt/openshift-node/config.yml +++ /dev/null @@ -1,102 +0,0 @@ -- name: node/config.yml, populate oo_nodes_to_config host group if needed - hosts: localhost - gather_facts: no - tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: - name: '{{ item }}' - groups: oo_nodes_to_config - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - - - add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" - groups: oo_first_master - when: oo_host_group_exp is defined - - -- name: Gather and set facts for hosts to configure - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ ansible_default_ipv4.address }}" - - role: node - local_facts: - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" - - -- name: Register nodes - hosts: oo_first_master - vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - -- name: Configure instances - hosts: oo_nodes_to_config - vars_files: - - vars.yml - vars: - sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - - os_env_extras - - os_env_extras_node diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/libvirt/openshift-node/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/libvirt/openshift-node/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/libvirt/openshift-node/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml deleted file mode 100644 index ad0c0fbe2..000000000 --- a/playbooks/libvirt/openshift-node/vars.yml +++ /dev/null @@ -1 +0,0 @@ -openshift_debug_level: 4 diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/templates/domain.xml deleted file mode 100644 index da037d138..000000000 --- a/playbooks/libvirt/templates/domain.xml +++ /dev/null @@ -1,62 +0,0 @@ - - {{ item }} - 1 - 1 - 2 - - hvm - - - - - - - - - - - - - destroy - restart - restart - - /usr/bin/qemu-system-x86_64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data deleted file mode 100644 index 5d779519f..000000000 --- a/playbooks/libvirt/templates/meta-data +++ /dev/null @@ -1,2 +0,0 @@ -instance-id: {{ item[0] }} -local-hostname: {{ item[0] }} diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data deleted file mode 100644 index 985badc8e..000000000 --- a/playbooks/libvirt/templates/user-data +++ /dev/null @@ -1,10 +0,0 @@ -#cloud-config - -disable_root: 0 - -system_info: - default_user: - name: root - -ssh_authorized_keys: - - {{ lookup('file', '~/.ssh/id_rsa.pub') }} diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 941190534..c55677c3f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Set common OpenShift facts openshift_facts: - role: 'common' + role: common local_facts: cluster_id: "{{ openshift_cluster_id | default('default') }}" debug_level: "{{ openshift_debug_level | default(0) }}" @@ -10,7 +10,7 @@ public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" - + deployment_type: "{{ openshift_deployment_type }}" - name: Set hostname hostname: name={{ openshift.common.hostname }} diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml index 50816d319..9f657a2c7 100644 --- a/roles/openshift_common/vars/main.yml +++ b/roles/openshift_common/vars/main.yml @@ -5,3 +5,7 @@ # chains with the public zone (or the zone associated with the correct # interfaces) os_firewall_use_firewalld: False + +openshift_cert_parent_dir: /var/lib/openshift +openshift_cert_relative_dir: openshift.local.certificates +openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 0dd343443..1e0d5c605 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -21,8 +21,11 @@ class OpenShiftFactsUnsupportedRoleError(Exception): class OpenShiftFactsFileWriteError(Exception): pass +class OpenShiftFactsMetadataUnavailableError(Exception): + pass + class OpenShiftFacts(): - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn'] + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] def __init__(self, role, filename, local_facts): self.changed = False @@ -169,20 +172,18 @@ class OpenShiftFacts(): return hostname def get_defaults(self, roles): - hardware_facts = self.get_hardware_facts() - net_facts = self.get_net_facts() - base_facts = self.get_base_facts() + ansible_facts = self.get_ansible_facts() defaults = dict() common = dict(use_openshift_sdn=True) - ip = net_facts['default_ipv4']['address'] + ip = ansible_facts['default_ipv4']['address'] common['ip'] = ip common['public_ip'] = ip rc, output, error = module.run_command(['hostname', '-f']) hostname_f = output.strip() if rc == 0 else '' - hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']] + hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']] hostname = self.choose_hostname(hostname_values) common['hostname'] = hostname @@ -196,14 +197,14 @@ class OpenShiftFacts(): master = dict(api_use_ssl=True, api_port='8443', console_use_ssl=True, console_path='/console', console_port='8443', etcd_use_ssl=False, - etcd_port='4001') + etcd_port='4001', portal_net='172.30.17.0/24') defaults['master'] = master if 'node' in roles: node = dict(external_id=common['hostname'], pod_cidr='', labels={}, annotations={}) - node['resources_cpu'] = hardware_facts['processor_cores'] - node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75) + node['resources_cpu'] = ansible_facts['processor_cores'] + node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75) defaults['node'] = node return defaults @@ -226,8 +227,7 @@ class OpenShiftFacts(): def query_metadata(self, metadata_url, headers=None, expect_json=False): r, info = fetch_url(module, metadata_url, headers=headers) if info['status'] != 200: - module.fail_json(msg='Failed to query metadata', result=r, - info=info) + raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") if expect_json: return module.from_json(r.read()) else: @@ -252,40 +252,27 @@ class OpenShiftFacts(): def get_provider_metadata(self, metadata_url, supports_recursive=False, headers=None, expect_json=False): - if supports_recursive: - metadata = self.query_metadata(metadata_url, headers, expect_json) - else: - metadata = self.walk_metadata(metadata_url, headers, expect_json) + try: + if supports_recursive: + metadata = self.query_metadata(metadata_url, headers, expect_json) + else: + metadata = self.walk_metadata(metadata_url, headers, expect_json) + except OpenShiftFactsMetadataUnavailableError as e: + metadata = None return metadata - def get_hardware_facts(self): - if not hasattr(self, 'hardware_facts'): - self.hardware_facts = Hardware().populate() - return self.hardware_facts - - def get_base_facts(self): - if not hasattr(self, 'base_facts'): - self.base_facts = Facts().populate() - return self.base_facts - - def get_virt_facts(self): - if not hasattr(self, 'virt_facts'): - self.virt_facts = Virtual().populate() - return self.virt_facts - - def get_net_facts(self): - if not hasattr(self, 'net_facts'): - self.net_facts = Network(module).populate() - return self.net_facts + def get_ansible_facts(self): + if not hasattr(self, 'ansible_facts'): + self.ansible_facts = ansible_facts(module) + return self.ansible_facts def guess_host_provider(self): # TODO: cloud provider facts should probably be submitted upstream - virt_facts = self.get_virt_facts() - hardware_facts = self.get_hardware_facts() - product_name = hardware_facts['product_name'] - product_version = hardware_facts['product_version'] - virt_type = virt_facts['virtualization_type'] - virt_role = virt_facts['virtualization_role'] + ansible_facts = self.get_ansible_facts() + product_name = ansible_facts['product_name'] + product_version = ansible_facts['product_version'] + virt_type = ansible_facts['virtualization_type'] + virt_role = ansible_facts['virtualization_role'] provider = None metadata = None @@ -300,8 +287,9 @@ class OpenShiftFacts(): True) # Filter sshKeys and serviceAccounts from gce metadata - metadata['project']['attributes'].pop('sshKeys', None) - metadata['instance'].pop('serviceAccounts', None) + if metadata: + metadata['project']['attributes'].pop('sshKeys', None) + metadata['instance'].pop('serviceAccounts', None) elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): provider = 'ec2' metadata_url = 'http://169.254.169.254/latest/meta-data/' @@ -310,12 +298,18 @@ class OpenShiftFacts(): provider = 'openstack' metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json' metadata = self.get_provider_metadata(metadata_url, True, None, True) - ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' - metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) - # Filter public_keys and random_seed from openstack metadata - metadata.pop('public_keys', None) - metadata.pop('random_seed', None) + if metadata: + ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' + metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) + + # Filter public_keys and random_seed from openstack metadata + metadata.pop('public_keys', None) + metadata.pop('random_seed', None) + + if not metadata['ec2_compat']: + metadata = None + return dict(name=provider, metadata=metadata) def normalize_provider_facts(self, provider, metadata): @@ -479,4 +473,6 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.facts import * from ansible.module_utils.urls import * -main() + +if __name__ == '__main__': + main() diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index aa615df39..1b1210007 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,33 +11,67 @@ api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" + +# TODO: These values need to be configurable +- name: Set dns OpenShift facts + openshift_facts: + role: 'dns' + local_facts: + ip: "{{ openshift.common.ip }}" + domain: local - name: Install OpenShift Master package yum: pkg=openshift-master state=installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +- name: Create certificate parent directory if it doesn't exist + file: + path: "{{ openshift_cert_parent_dir }}" + state: directory + +- name: Create config parent directory if it doesn't exist + file: + path: "{{ openshift_master_config | dirname }}" + state: directory + +# TODO: should probably use a template lookup for this +# TODO: should allow for setting --etcd, --kubernetes options +# TODO: recreate config if values change +- name: Use enterprise default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined +- name: Create master config + command: > + /usr/bin/openshift start master --write-config + --config={{ openshift_master_config }} + --portal-net={{ openshift.master.portal_net }} + --master={{ openshift.master.api_url }} + --public-master={{ openshift.master.public_api_url }} + --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }} + {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }} + {{ ('--nodes=' ~ openshift_node_ips | join(',')) if openshift_node_ips is defined else '' }} + args: + chdir: "{{ openshift_cert_parent_dir }}" + creates: "{{ openshift_master_config }}" -# TODO: We should pre-generate the master config and point to the generated -# config rather than setting command line flags here - name: Configure OpenShift settings lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\"" - notify: - - restart openshift-master - -# TODO: should this be populated by a fact based on the deployment type -# (origin, online, enterprise)? -- name: Set default registry url - lineinfile: - dest: /etc/sysconfig/openshift-master - regexp: '^IMAGES=' - line: "IMAGES={{ openshift_registry_url }}" - when: openshift_registry_url is defined + line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\"" notify: - restart openshift-master @@ -53,6 +87,6 @@ # TODO: Update this file if the contents of the source file are not present in # the dest file, will need to make sure to ignore things that could be added - name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig + command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig /root/.kube/.kubeconfig args: creates: /root/.kube/.kubeconfig diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml new file mode 100644 index 000000000..c52d957ac --- /dev/null +++ b/roles/openshift_master/vars/main.yml @@ -0,0 +1,5 @@ +--- +openshift_master_config: /etc/openshift/master.yaml +openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca" +openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt" +openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e3c04585b..3d56bdd67 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -13,17 +13,22 @@ failed_when: not result.stat.exists register: result with_items: - - "{{ cert_path }}" - - "{{ cert_path }}/cert.crt" - - "{{ cert_path }}/key.key" - - "{{ cert_path }}/.kubeconfig" - - "{{ cert_path }}/server.crt" - - "{{ cert_path }}/server.key" - - "{{ cert_parent_path }}/ca/cert.crt" - #- "{{ cert_path }}/node.yaml" + - "{{ openshift_node_cert_dir }}" + - "{{ openshift_node_cert_dir }}/ca.crt" + - "{{ openshift_node_cert_dir }}/client.crt" + - "{{ openshift_node_cert_dir }}/client.key" + - "{{ openshift_node_cert_dir }}/.kubeconfig" + - "{{ openshift_node_cert_dir }}/node-config.yaml" + - "{{ openshift_node_cert_dir }}/server.crt" + - "{{ openshift_node_cert_dir }}/server.key" - name: Install OpenShift Node package yum: pkg=openshift-node state=installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed # --create-certs=false is a temporary workaround until # https://github.com/openshift/origin/pull/1361 is merged upstream and it is @@ -32,16 +37,7 @@ lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\"" - notify: - - restart openshift-node - -- name: Set default registry url - lineinfile: - dest: /etc/sysconfig/openshift-node - regexp: '^IMAGES=' - line: "IMAGES={{ openshift_registry_url }}" - when: openshift_registry_url is defined + line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\"" notify: - restart openshift-node diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml new file mode 100644 index 000000000..c6be83139 --- /dev/null +++ b/roles/openshift_node/vars/main.yml @@ -0,0 +1,2 @@ +--- +openshift_node_cert_dir: /etc/openshift/node diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml index 3501e8922..a0befab44 100644 --- a/roles/openshift_register_nodes/defaults/main.yml +++ b/roles/openshift_register_nodes/defaults/main.yml @@ -1,5 +1,2 @@ --- openshift_kube_api_version: v1beta1 -openshift_cert_dir: openshift.local.certificates -openshift_cert_dir_parent: /var/lib/openshift -openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}" diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py index 8ebeb087a..1ec977716 100755 --- a/roles/openshift_register_nodes/library/kubernetes_register_node.py +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -97,10 +97,8 @@ class ClientConfigException(Exception): class ClientConfig: def __init__(self, client_opts, module): - _, output, error = module.run_command(["/usr/bin/openshift", "ex", - "config", "view", "-o", - "json"] + client_opts, - check_rc = True) + kubectl = module.params['kubectl_cmd'] + _, output, error = module.run_command(kubectl + ["config", "view", "-o", "json"] + client_opts, check_rc = True) self.config = json.loads(output) if not (bool(self.config['clusters']) or @@ -146,6 +144,9 @@ class ClientConfig: def get_cluster_for_context(self, context): return self.get_value_for_context(context, 'cluster') + def get_namespace_for_context(self, context): + return self.get_value_for_context(context, 'namespace') + class Util: @staticmethod def remove_empty_elements(mapping): @@ -247,15 +248,15 @@ class Node: return Util.remove_empty_elements(node) def exists(self): - _, output, error = self.module.run_command(["/usr/bin/osc", "get", - "nodes"] + self.client_opts, - check_rc = True) + kubectl = self.module.params['kubectl_cmd'] + _, output, error = self.module.run_command(kubectl + ["get", "nodes"] + self.client_opts, check_rc = True) if re.search(self.module.params['name'], output, re.MULTILINE): return True return False def create(self): - cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] + kubectl = self.module.params['kubectl_cmd'] + cmd = kubectl + self.client_opts + ['create', '-f', '-'] rc, output, error = self.module.run_command(cmd, data=self.module.jsonify(self.get_node())) if rc != 0: @@ -273,24 +274,26 @@ class Node: def main(): module = AnsibleModule( - argument_spec = dict( - name = dict(required = True, type = 'str'), - host_ip = dict(type = 'str'), - hostnames = dict(type = 'list', default = []), - external_ips = dict(type = 'list', default = []), - internal_ips = dict(type = 'list', default = []), - api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 - choices = ['v1beta1', 'v1beta3']), - cpu = dict(type = 'str'), - memory = dict(type = 'str'), - labels = dict(type = 'dict', default = {}), # TODO: needs documented - annotations = dict(type = 'dict', default = {}), # TODO: needs documented - pod_cidr = dict(type = 'str'), # TODO: needs documented - external_id = dict(type = 'str'), # TODO: needs documented - client_config = dict(type = 'str'), # TODO: needs documented - client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented - client_context = dict(type = 'str', default = 'master'), # TODO: needs documented - client_user = dict(type = 'str', default = 'admin') # TODO: needs documented + argument_spec = dict( + name = dict(required = True, type = 'str'), + host_ip = dict(type = 'str'), + hostnames = dict(type = 'list', default = []), + external_ips = dict(type = 'list', default = []), + internal_ips = dict(type = 'list', default = []), + api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 + choices = ['v1beta1', 'v1beta3']), + cpu = dict(type = 'str'), + memory = dict(type = 'str'), + labels = dict(type = 'dict', default = {}), # TODO: needs documented + annotations = dict(type = 'dict', default = {}), # TODO: needs documented + pod_cidr = dict(type = 'str'), # TODO: needs documented + external_id = dict(type = 'str'), # TODO: needs documented + client_config = dict(type = 'str'), # TODO: needs documented + client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented + client_context = dict(type = 'str', default = 'default'), # TODO: needs documented + client_namespace = dict(type = 'str', default = 'default'), # TODO: needs documented + client_user = dict(type = 'str', default = 'system:openshift-client'), # TODO: needs documented + kubectl_cmd = dict(type = 'list', default = ['kubectl']) # TODO: needs documented ), mutually_exclusive = [ ['host_ip', 'external_ips'], @@ -333,14 +336,16 @@ def main(): client_cluster = module.params['client_cluster'] if config.has_cluster(client_cluster): - if client_cluster != config.get_cluster_for_context(client_cluster): + if client_cluster != config.get_cluster_for_context(client_context): client_opts.append("--cluster=%s" % client_cluster) else: module.fail_json(msg="Cluster %s not found in client config" % client_cluster) - # TODO: provide sane defaults for some (like hostname, externalIP, - # internalIP, etc) + client_namespace = module.params['client_namespace'] + if client_namespace != config.get_namespace_for_context(client_context): + client_opts.append("--namespace=%s" % client_namespace) + node = Node(module, client_opts, module.params['api_version'], module.params['name'], module.params['host_ip'], module.params['hostnames'], module.params['external_ips'], diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml index 7319b88b1..85f490f70 100644 --- a/roles/openshift_register_nodes/tasks/main.yml +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -3,53 +3,37 @@ # TODO: recreate master/node configs if settings that affect the configs # change (hostname, public_hostname, ip, public_ip, etc) -# TODO: create a failed_when condition -- name: Create node server certificates - command: > - /usr/bin/openshift admin create-server-cert - --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key - --hostnames={{ [item.openshift.common.hostname, - item.openshift.common.public_hostname]|unique|join(",") }} - args: - chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt" - with_items: openshift_nodes - register: server_cert_result - -# TODO: create a failed_when condition -- name: Create node client certificates - command: > - /usr/bin/openshift admin create-node-cert - --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key - --node-name={{ item.openshift.common.hostname }} - args: - chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt" - with_items: openshift_nodes - register: node_cert_result +# TODO: use a template lookup here # TODO: create a failed_when condition -- name: Create kubeconfigs for nodes +- name: Use enterprise default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined +- name: Create node config command: > - /usr/bin/openshift admin create-kubeconfig - --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt - --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key - --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig - --master={{ openshift.master.api_url }} - --public-master={{ openshift.master.public_api_url }} + /usr/bin/openshift admin create-node-config + --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }} + --node={{ item.openshift.common.hostname }} + --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }} + --dns-domain={{ openshift.dns.domain }} + --dns-ip={{ openshift.dns.ip }} + --master={{ openshift.master.api_url }} + --signer-key={{ openshift_master_ca_key }} + --signer-cert={{ openshift_master_ca_cert }} + --certificate-authority={{ openshift_master_ca_cert }} + --signer-serial={{ openshift_master_ca_dir }}/serial.txt + --node-client-certificate-authority={{ openshift_master_ca_cert }} + {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }} + --listen=https://0.0.0.0:10250 args: - chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig" + chdir: "{{ openshift_cert_parent_dir }}" + creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}" with_items: openshift_nodes - register: kubeconfig_result - name: Register unregistered nodes kubernetes_register_node: - client_user: openshift-client + kubectl_cmd: ['openshift', 'kube'] name: "{{ item.openshift.common.hostname }}" api_version: "{{ openshift_kube_api_version }}" cpu: "{{ item.openshift.node.resources_cpu | default(None) }}" @@ -61,7 +45,5 @@ external_id: "{{ item.openshift.node.external_id }}" # TODO: support customizing other attributes such as: client_config, # client_cluster, client_context, client_user - # TODO: update for v1beta3 changes after rebase: hostnames, external_ips, - # internal_ips, external_id with_items: openshift_nodes register: register_result diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml new file mode 100644 index 000000000..bd497f08f --- /dev/null +++ b/roles/openshift_register_nodes/vars/main.yml @@ -0,0 +1,7 @@ +--- +openshift_cert_parent_dir: /var/lib/openshift +openshift_cert_relative_dir: openshift.local.certificates +openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}" +openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca" +openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt" +openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key" diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md index 6713e11fc..6bbedd839 100644 --- a/roles/openshift_repos/README.md +++ b/roles/openshift_repos/README.md @@ -14,7 +14,7 @@ Role Variables | Name | Default value | | |-------------------------------|---------------|----------------------------------------------| -| openshift_deployment_type | online | Possible values enterprise, origin, online | +| openshift_deployment_type | None | Possible values enterprise, origin, online | | openshift_additional_repos | {} | TODO | Dependencies diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml index 1730207f4..7c5a14cd7 100644 --- a/roles/openshift_repos/defaults/main.yaml +++ b/roles/openshift_repos/defaults/main.yaml @@ -1,7 +1,2 @@ --- -# TODO: once we are able to configure/deploy origin using the openshift roles, -# then we should default to origin - -# TODO: push the defaulting of these values to the openshift_facts module -openshift_deployment_type: online openshift_additional_repos: {} diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta deleted file mode 100644 index 7b40671a4..000000000 --- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta +++ /dev/null @@ -1,61 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT -kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A -BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo -gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P -xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D -FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 -Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i -QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm -G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt -0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR -fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB -tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv -bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT -ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy -6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ -OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 -0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc -MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u -QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE -Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 -DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 -B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH -V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT -CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== -=21pb ------END PGP PUBLIC KEY BLOCK----- -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG -package. Questions about this key should be sent to security@redhat.com. - - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.0.6 (GNU/Linux) -Comment: For info see http://www.gnupg.org - -mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp -Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd -LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi -UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe -II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW -QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz -+AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1 -VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI -mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg -SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX -BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4 -F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF -AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q -0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc -RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI -JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR -xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU -ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5 -WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI -RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL -yACfb68fBd2pWEzLKsOk9imIobHHpzE= -=gpIn ------END PGP PUBLIC KEY BLOCK----- diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release deleted file mode 100644 index 0f83b622d..000000000 --- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release +++ /dev/null @@ -1,63 +0,0 @@ -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. This key is used for packages in Red Hat -products shipped after November 2009, and for all updates to those -products. - -Questions about this key should be sent to security@redhat.com. - -pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF -0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF -0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c -u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh -XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H -5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW -9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj -/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 -PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY -HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF -buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB -tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 -LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK -CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC -2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf -C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 -un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E -0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE -IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh -8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL -Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki -JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 -OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq -dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== -=zbHE ------END PGP PUBLIC KEY BLOCK----- -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. This key is a supporting (auxiliary) key for -Red Hat products shipped after November 2006 and for all updates to -those products. - -Questions about this key should be sent to security@redhat.com. - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT -VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A -UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C -yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu -MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg -Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z -z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2 -eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq -SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg -SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC -AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg -ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX -=d/bm ------END PGP PUBLIC KEY BLOCK----- - diff --git a/roles/openshift_repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo deleted file mode 100644 index 1deae2939..000000000 --- a/roles/openshift_repos/files/online/epel7-kubernetes.repo +++ /dev/null @@ -1,6 +0,0 @@ -[maxamillion-epel7-kubernetes] -name=Copr repo for epel7-kubernetes owned by maxamillion -baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/ -skip_if_unavailable=True -gpgcheck=0 -enabled=1 diff --git a/roles/openshift_repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo deleted file mode 100644 index c7629872d..000000000 --- a/roles/openshift_repos/files/online/epel7-openshift.repo +++ /dev/null @@ -1,6 +0,0 @@ -[maxamillion-origin-next] -name=Copr repo for origin-next owned by maxamillion -baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ -skip_if_unavailable=False -gpgcheck=0 -enabled=1 diff --git a/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta new file mode 100644 index 000000000..7b40671a4 --- /dev/null +++ b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta @@ -0,0 +1,61 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT +kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A +BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo +gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P +xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D +FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 +Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i +QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm +G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt +0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR +fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB +tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv +bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT +ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy +6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ +OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 +0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc +MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u +QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE +Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 +DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 +B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH +V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT +CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== +=21pb +-----END PGP PUBLIC KEY BLOCK----- +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG +package. Questions about this key should be sent to security@redhat.com. + + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.0.6 (GNU/Linux) +Comment: For info see http://www.gnupg.org + +mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp +Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd +LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi +UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe +II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW +QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz ++AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1 +VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI +mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg +SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX +BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4 +F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF +AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q +0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc +RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI +JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR +xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU +ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5 +WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI +RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL +yACfb68fBd2pWEzLKsOk9imIobHHpzE= +=gpIn +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release new file mode 100644 index 000000000..0f83b622d --- /dev/null +++ b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release @@ -0,0 +1,63 @@ +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. This key is used for packages in Red Hat +products shipped after November 2009, and for all updates to those +products. + +Questions about this key should be sent to security@redhat.com. + +pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF +0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF +0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c +u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh +XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H +5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW +9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj +/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 +PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY +HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF +buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB +tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 +LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK +CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC +2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf +C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 +un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E +0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE +IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh +8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL +Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki +JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 +OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq +dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== +=zbHE +-----END PGP PUBLIC KEY BLOCK----- +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. This key is a supporting (auxiliary) key for +Red Hat products shipped after November 2006 and for all updates to +those products. + +Questions about this key should be sent to security@redhat.com. + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT +VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A +UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C +yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu +MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg +Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z +z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2 +eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq +SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg +SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC +AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg +ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX +=d/bm +-----END PGP PUBLIC KEY BLOCK----- + diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo deleted file mode 100644 index cfe41f691..000000000 --- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo +++ /dev/null @@ -1,23 +0,0 @@ -[oso-rhui-rhel-server-extras] -name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem - -[oso-rhui-rhel-server-extras-htb] -name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ -enabled=0 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo deleted file mode 100644 index ddc93193d..000000000 --- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo +++ /dev/null @@ -1,21 +0,0 @@ -[oso-rhui-rhel-server-releases] -name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem - -[oso-rhui-rhel-server-releases-optional] -name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/epel7-openshift.repo b/roles/openshift_repos/files/online/repos/epel7-openshift.repo new file mode 100644 index 000000000..c7629872d --- /dev/null +++ b/roles/openshift_repos/files/online/repos/epel7-openshift.repo @@ -0,0 +1,6 @@ +[maxamillion-origin-next] +name=Copr repo for origin-next owned by maxamillion +baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ +skip_if_unavailable=False +gpgcheck=0 +enabled=1 diff --git a/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo new file mode 100644 index 000000000..cfe41f691 --- /dev/null +++ b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo @@ -0,0 +1,23 @@ +[oso-rhui-rhel-server-extras] +name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta +failovermethod=priority +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem + +[oso-rhui-rhel-server-extras-htb] +name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta +failovermethod=priority +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo new file mode 100644 index 000000000..ddc93193d --- /dev/null +++ b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo @@ -0,0 +1,21 @@ +[oso-rhui-rhel-server-releases] +name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem + +[oso-rhui-rhel-server-releases-optional] +name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo new file mode 100644 index 000000000..b4215679f --- /dev/null +++ b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo @@ -0,0 +1,11 @@ +[rhel-7-libra-candidate] +name=rhel-7-libra-candidate - \$basearch +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ + https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ +gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted +skip_if_unavailable=True +gpgcheck=0 +enabled=1 +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem +sslverify=False diff --git a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo deleted file mode 100644 index b4215679f..000000000 --- a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo +++ /dev/null @@ -1,11 +0,0 @@ -[rhel-7-libra-candidate] -name=rhel-7-libra-candidate - \$basearch -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ - https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ -gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted -skip_if_unavailable=True -gpgcheck=0 -enabled=1 -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem -sslverify=False diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo new file mode 100644 index 000000000..0b21e0a65 --- /dev/null +++ b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo @@ -0,0 +1,7 @@ +[maxamillion-origin-next] +name=Copr repo for origin-next owned by maxamillion +baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ +skip_if_unavailable=True +gpgcheck=1 +gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg +enabled=1 diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index bb1551d37..12e98b7a1 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,10 +10,6 @@ - assert: that: openshift_deployment_type in known_openshift_deployment_types -# TODO: remove this when origin support actually works -- fail: msg="OpenShift Origin support is not currently enabled" - when: openshift_deployment_type == 'origin' - - name: Ensure libselinux-python is installed yum: pkg: libselinux-python @@ -36,17 +32,15 @@ path: "/etc/yum.repos.d/{{ item | basename }}" state: absent with_fileglob: - - '*/*' - when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$")) + - '*/repos/*' + when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) - name: Configure gpg keys if needed copy: src={{ item }} dest=/etc/pki/rpm-gpg/ with_fileglob: - - "{{ openshift_deployment_type }}/*" - when: item | basename | match("RPM-GPG-KEY-") + - "{{ openshift_deployment_type }}/gpg_keys/*" - name: Configure yum repositories copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - - "{{ openshift_deployment_type }}/*" - when: item | basename | search(".*\.repo$") + - "{{ openshift_deployment_type }}/repos/*" diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2 index 7ea2c7460..2d9243545 100644 --- a/roles/openshift_repos/templates/yum_repo.j2 +++ b/roles/openshift_repos/templates/yum_repo.j2 @@ -1,4 +1,3 @@ -# {{ ansible_managed }} {% for repo in openshift_additional_repos %} [{{ repo.id }}] name={{ repo.name | default(repo.id) }} diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml index f2d61043b..77e7a80ba 100644 --- a/roles/openshift_sdn_master/tasks/main.yml +++ b/roles/openshift_sdn_master/tasks/main.yml @@ -12,12 +12,21 @@ yum: pkg: openshift-sdn-master state: installed + register: install_result +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +# TODO: we should probably generate certs specifically for sdn - name: Configure openshift-sdn-master settings lineinfile: dest: /etc/sysconfig/openshift-sdn-master regexp: '^OPTIONS=' - line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\"" + line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}} + -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt + -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt + -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\"" notify: - restart openshift-sdn-master diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml index 729c28879..c2329dd6f 100644 --- a/roles/openshift_sdn_node/tasks/main.yml +++ b/roles/openshift_sdn_node/tasks/main.yml @@ -9,9 +9,15 @@ yum: pkg: openshift-sdn-node state: installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed # TODO: we are specifying -hostname= for OPTIONS as a workaround for # openshift-sdn-node not properly detecting the hostname. +# TODO: we should probably generate certs specifically for sdn - name: Configure openshift-sdn-node settings lineinfile: dest: /etc/sysconfig/openshift-sdn-node @@ -20,7 +26,10 @@ backrefs: yes with_items: - regex: '^(OPTIONS=)' - line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"' + line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }} + -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt + -etcd-certfile={{ openshift_node_cert_dir }}/client.crt + -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"' - regex: '^(MASTER_URL=)' line: '\1"{{ openshift_sdn_master_url }}"' - regex: '^(MINION_IP=)' diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 90588d2ae..9d0af497d 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -270,4 +270,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() -- cgit v1.2.3 From 34326ef782bcba8632738a40d3948bb23a3915dc Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 15 Apr 2015 09:40:38 -0400 Subject: fix missed absolute path reference to mktemp --- playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml index 1cd83f7be..a320e681e 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml @@ -6,7 +6,7 @@ failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr" - name: Create a temp directory for the template xml file - command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX" + command: "mktemp -d /tmp/openshift-ansible-XXXXXXX" register: mktemp when: net_info_result.rc == 1 -- cgit v1.2.3 From bc0b703b70d9f36b3ad841409454dd21b1934d92 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 15 Apr 2015 17:09:30 -0400 Subject: explicitly use python2 for libvirt dynamic inventory --- inventory/libvirt/hosts/libvirt_generic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py index 0a98e2af3..2e1437903 100755 --- a/inventory/libvirt/hosts/libvirt_generic.py +++ b/inventory/libvirt/hosts/libvirt_generic.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 """ libvirt external inventory script -- cgit v1.2.3 From 83ed87d41536f7006b3858a65a587263e3fd2b14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Wed, 15 Apr 2015 23:17:30 +0200 Subject: Move `virsh pool-refresh` The `pool-refresh` command is used to ask libvirt to rescan the content of a volume pool. This is used to make `libvirt` take into account volumes that were created outside of livirt control i.e.: not with a `virsh` command. `pool-refresh` is useless after a `pool-create` as the content is scanned at creation. `pool-refresh` is mandatory after having created files inside an existing pool. --- .../openshift-cluster/tasks/configure_libvirt_storage_pool.yml | 4 ---- playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml index 817acb250..b49879c6a 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -21,7 +21,3 @@ - name: Create the libvirt storage pool for openshift command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' when: pool_info_result.rc == 1 - -- name: Refresh the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - when: pool_info_result.rc == 1 diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 96d440096..359d0b2f3 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -36,6 +36,9 @@ creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' with_items: instances +- name: Refresh the libvirt storage pool for openshift + command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' + - name: Create VMs drives command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' with_items: instances -- cgit v1.2.3 From b71037de41baf06889b7a875a0e8914f940ecc2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Wed, 15 Apr 2015 23:37:24 +0200 Subject: Make the error message checks locale proof MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On a computer which has a locale set, the error messages look like this: ``` $ virsh net-info foo erreur :impossible de récupérer le réseau « foo » erreur :Réseau non trouvé : no network with matching name 'foo' ``` ``` $ virsh pool-info foo erreur :impossible de récupérer le pool « foo » erreur :Pool de stockage introuvable : no storage pool with matching name 'foo' ``` The classical way to make those tests locale proof is to force a given locale. Like this: ``` $ LANG=POSIX virsh net-info foo error: failed to get network 'foo' error: Réseau non trouvé : no network with matching name 'foo' ``` ``` $ LANG=POSIX virsh pool-info foo error: failed to get pool 'foo' error: Pool de stockage introuvable : no storage pool with matching name 'foo' ``` It looks like the "Network not found" or "Storage pool not found" parts of the message are generated by the `libvirtd` daemon and are not subject to the locale of the `virsh` client. The clean fix consists in patching `libvirt` so that `virsh` sends its locale to the `libvirtd` daemon. But in the mean time, it is safer to have our playbook match the part of the message which is not subject to the daemon locale. --- playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml | 2 +- .../libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml index a320e681e..3117d9edc 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml @@ -3,7 +3,7 @@ command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}" register: net_info_result changed_when: False - failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr" + failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr" - name: Create a temp directory for the template xml file command: "mktemp -d /tmp/openshift-ansible-XXXXXXX" diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml index 817acb250..10715f2b5 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -16,7 +16,7 @@ command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}" register: pool_info_result changed_when: False - failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr" + failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr" - name: Create the libvirt storage pool for openshift command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' -- cgit v1.2.3 From aaee17b0fc8feddf31d4e5b46a1bfe2f8dabf16b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Thu, 16 Apr 2015 00:26:45 +0200 Subject: Fix libvirt metadata used to store ansible tags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to https://libvirt.org/formatdomain.html#elementsMetadata , the `metadata` tag can contain only one top-level element per namespace. Because of that, libvirt stored only the `deployment-type-{{ deployment_type }}` tag. As a consequence, the dynamic inventory reported no `env-{{ cluster }}` group. This is problematic for the `terminate.yml` playbook which iterates over `groups['tag-env-{{ cluster-id }}]` The symptom is that `oo_hosts_to_terminate` was not defined. In the end, as Ansible couldn’t iterate on the value of `groups['oo_hosts_to_terminate']`, it iterated on its letters: ``` TASK: [Destroy VMs] *********************************************************** failed: [localhost] => (item=['g', 'destroy']) => {"failed": true, "item": ["g", "destroy"]} msg: virtual machine g not found failed: [localhost] => (item=['g', 'undefine']) => {"failed": true, "item": ["g", "undefine"]} msg: virtual machine g not found failed: [localhost] => (item=['r', 'destroy']) => {"failed": true, "item": ["r", "destroy"]} msg: virtual machine r not found failed: [localhost] => (item=['r', 'undefine']) => {"failed": true, "item": ["r", "undefine"]} msg: virtual machine r not found failed: [localhost] => (item=['o', 'destroy']) => {"failed": true, "item": ["o", "destroy"]} msg: virtual machine o not found failed: [localhost] => (item=['o', 'undefine']) => {"failed": true, "item": ["o", "undefine"]} msg: virtual machine o not found failed: [localhost] => (item=['u', 'destroy']) => {"failed": true, "item": ["u", "destroy"]} msg: virtual machine u not found failed: [localhost] => (item=['u', 'undefine']) => {"failed": true, "item": ["u", "undefine"]} msg: virtual machine u not found failed: [localhost] => (item=['p', 'destroy']) => {"failed": true, "item": ["p", "destroy"]} msg: virtual machine p not found failed: [localhost] => (item=['p', 'undefine']) => {"failed": true, "item": ["p", "undefine"]} msg: virtual machine p not found failed: [localhost] => (item=['s', 'destroy']) => {"failed": true, "item": ["s", "destroy"]} msg: virtual machine s not found failed: [localhost] => (item=['s', 'undefine']) => {"failed": true, "item": ["s", "undefine"]} msg: virtual machine s not found failed: [localhost] => (item=['[', 'destroy']) => {"failed": true, "item": ["[", "destroy"]} msg: virtual machine [ not found failed: [localhost] => (item=['[', 'undefine']) => {"failed": true, "item": ["[", "undefine"]} msg: virtual machine [ not found failed: [localhost] => (item=["'", 'destroy']) => {"failed": true, "item": ["'", "destroy"]} msg: virtual machine ' not found failed: [localhost] => (item=["'", 'undefine']) => {"failed": true, "item": ["'", "undefine"]} msg: virtual machine ' not found failed: [localhost] => (item=['o', 'destroy']) => {"failed": true, "item": ["o", "destroy"]} msg: virtual machine o not found failed: [localhost] => (item=['o', 'undefine']) => {"failed": true, "item": ["o", "undefine"]} msg: virtual machine o not found etc… ``` --- inventory/libvirt/hosts/libvirt_generic.py | 4 ++-- playbooks/libvirt/openshift-cluster/templates/domain.xml | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py index 0a98e2af3..4652f112e 100755 --- a/inventory/libvirt/hosts/libvirt_generic.py +++ b/inventory/libvirt/hosts/libvirt_generic.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 """ libvirt external inventory script @@ -131,7 +131,7 @@ class LibvirtInventory(object): root = ET.fromstring(domain.XMLDesc()) ns = {'ansible': 'https://github.com/ansible/ansible'} - for tag_elem in root.findall('./metadata/ansible:tag', ns): + for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ns): tag = tag_elem.text self.push(inventory, "tag_%s" % tag, domain_name) self.push(hostvars, 'libvirt_tags', tag) diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml index 8cb017367..7656249da 100644 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -2,10 +2,12 @@ {{ item }} 1 - deployment-type-{{ deployment_type }} - env-{{ cluster }} - env-host-type-{{ cluster }}-openshift-{{ type }} - host-type-{{ type }} + + deployment-type-{{ deployment_type }} + env-{{ cluster }} + env-host-type-{{ cluster }}-openshift-{{ type }} + host-type-{{ type }} + 1 2 -- cgit v1.2.3 From dbb252bc04a6488c1fde05dbc325b246fd4a651e Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 15 Apr 2015 20:52:38 -0400 Subject: Fixup typos --- playbooks/aws/openshift-cluster/launch.yml | 2 +- playbooks/gce/openshift-cluster/launch.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index e7125ea0c..a0de00fc3 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -7,7 +7,7 @@ - vars.yml tasks: - fail: - msg: Deployment type not supported for libvirt provider yet + msg: Deployment type not supported for aws provider yet when: deployment_type == 'enterprise' - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 34a5a0b94..771f51e91 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -6,7 +6,7 @@ vars_files: - vars.yml tasks: - - fail: msg="Deployment type not supported for libvirt provider yet" + - fail: msg="Deployment type not supported for gce provider yet" when: deployment_type == 'enterprise' - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml -- cgit v1.2.3