diff options
45 files changed, 1180 insertions, 144 deletions
| @@ -35,7 +35,7 @@ trap upload_journals ERR  # run the actual installer  # FIXME: override openshift_image_tag defined in the inventory until  # https://github.com/openshift/openshift-ansible/issues/4478 is fixed. -ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG" +ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"  ### DISABLING TESTS FOR NOW, SEE:  ### https://github.com/openshift/openshift-ansible/pull/6132 diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 9db0b5c98..4bd643260 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.8.0-0.13.0 ./ +3.9.0-0.4.0 ./ diff --git a/ansible.cfg b/ansible.cfg index 9900d28f8..e4d72553e 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -30,8 +30,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt  # work around privilege escalation timeouts in ansible:  timeout = 30 -# Uncomment to use the provided BYO inventory -#inventory = inventory/byo/hosts.example +# Uncomment to use the provided example inventory +#inventory = inventory/hosts.example  [inventory]  # fail more helpfully when the inventory file does not parse (Ansible 2.4+) diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json index 8b984d7a3..53696b03e 100644 --- a/images/installer/root/exports/manifest.json +++ b/images/installer/root/exports/manifest.json @@ -4,7 +4,7 @@          "OPTS": "",          "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",          "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log", -        "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml", +        "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",  	"HOME_ROOT": "/root",  	"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",          "INVENTORY_FILE": "/dev/null" diff --git a/inventory/byo/.gitignore b/inventory/.gitignore index 6ff331c7e..6ff331c7e 100644 --- a/inventory/byo/.gitignore +++ b/inventory/.gitignore diff --git a/inventory/README.md b/inventory/README.md index 5e26e3c32..2e348194f 100644 --- a/inventory/README.md +++ b/inventory/README.md @@ -1,5 +1 @@ -# OpenShift Ansible inventory config files - -You can install OpenShift on: - -* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts +# OpenShift Ansible example inventory config files diff --git a/inventory/byo/hosts.example b/inventory/hosts.example index e3b56d7a1..c18a53671 100644 --- a/inventory/byo/hosts.example +++ b/inventory/hosts.example @@ -1,4 +1,4 @@ -# This is an example of a bring your own (byo) host inventory +# This is an example of an OpenShift-Ansible host inventory  # Create an OSEv3 group that contains the masters and nodes groups  [OSEv3:children] @@ -1047,7 +1047,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # name and password AND are trying to use integration scripts.  #  # For example, adding this cluster as a container provider, -# playbooks/byo/openshift-management/add_container_provider.yml +# playbooks/openshift-management/add_container_provider.yml  #openshift_management_username: admin  #openshift_management_password: smartvm diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/hosts.glusterfs.external.example index acf68266e..bf2557cf0 100644 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ b/inventory/hosts.glusterfs.external.example @@ -1,11 +1,11 @@ -# This is an example of a bring your own (byo) host inventory for a cluster +# This is an example of an OpenShift-Ansible host inventory for a cluster  # with natively hosted, containerized GlusterFS storage.  # -# This inventory may be used with the byo/config.yml playbook to deploy a new +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new  # cluster with GlusterFS storage, which will use that storage to create a  # volume that will provide backend storage for a hosted Docker registry.  # -# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# This inventory may also be used with openshift-glusterfs/config.yml to  # deploy GlusterFS storage on an existing cluster. With this playbook, the  # registry backend volume will be created but the administrator must then  # either deploy a hosted registry or change an existing hosted registry to use @@ -13,7 +13,7 @@  #  # There are additional configuration parameters that can be specified to  # control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and +# documentation in playbooks/openshift-glusterfs/README.md and  # roles/openshift_storage_glusterfs/README.md for additional details.  [OSEv3:children] @@ -44,7 +44,7 @@ node2   openshift_schedulable=True  master  # Specify the glusterfs group, which contains the nodes of the external -# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"  +# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"  # and "glusterfs_devices" variables defined.  #  # The first variable indicates the hostname of the external GLusterFS node, diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example index a559dc377..8a20a037e 100644 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ b/inventory/hosts.glusterfs.mixed.example @@ -1,11 +1,11 @@ -# This is an example of a bring your own (byo) host inventory for a cluster +# This is an example of an OpenShift-Ansible host inventory for a cluster  # with natively hosted, containerized GlusterFS storage.  # -# This inventory may be used with the byo/config.yml playbook to deploy a new +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new  # cluster with GlusterFS storage, which will use that storage to create a  # volume that will provide backend storage for a hosted Docker registry.  # -# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# This inventory may also be used with openshift-glusterfs/config.yml to  # deploy GlusterFS storage on an existing cluster. With this playbook, the  # registry backend volume will be created but the administrator must then  # either deploy a hosted registry or change an existing hosted registry to use @@ -13,7 +13,7 @@  #  # There are additional configuration parameters that can be specified to  # control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and +# documentation in playbooks/openshift-glusterfs/README.md and  # roles/openshift_storage_glusterfs/README.md for additional details.  [OSEv3:children] @@ -47,7 +47,7 @@ node2   openshift_schedulable=True  master  # Specify the glusterfs group, which contains the nodes of the external -# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"  +# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"  # and "glusterfs_devices" variables defined.  #  # The first variable indicates the hostname of the external GLusterFS node, diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/hosts.glusterfs.native.example index ca4765c53..59acf1194 100644 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ b/inventory/hosts.glusterfs.native.example @@ -1,16 +1,16 @@ -# This is an example of a bring your own (byo) host inventory for a cluster +# This is an example of an OpenShift-Ansible host inventory for a cluster  # with natively hosted, containerized GlusterFS storage for applications. It -# will also autmatically create a StorageClass for this purpose. +# will also automatically create a StorageClass for this purpose.  # -# This inventory may be used with the byo/config.yml playbook to deploy a new +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new  # cluster with GlusterFS storage.  # -# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# This inventory may also be used with openshift-glusterfs/config.yml to  # deploy GlusterFS storage on an existing cluster.  #  # There are additional configuration parameters that can be specified to  # control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and +# documentation in playbooks/openshift-glusterfs/README.md and  # roles/openshift_storage_glusterfs/README.md for additional details.  [OSEv3:children] diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example index 32040f593..6f33e9f6d 100644 --- a/inventory/byo/hosts.byo.glusterfs.registry-only.example +++ b/inventory/hosts.glusterfs.registry-only.example @@ -1,12 +1,12 @@ -# This is an example of a bring your own (byo) host inventory for a cluster +# This is an example of an OpenShift-Ansible host inventory for a cluster  # with natively hosted, containerized GlusterFS storage for exclusive use  # as storage for a natively hosted Docker registry.  # -# This inventory may be used with the byo/config.yml playbook to deploy a new +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new  # cluster with GlusterFS storage, which will use that storage to create a  # volume that will provide backend storage for a hosted Docker registry.  # -# This inventory may also be used with byo/openshift-glusterfs/registry.yml to +# This inventory may also be used with openshift-glusterfs/registry.yml to  # deploy GlusterFS storage on an existing cluster. With this playbook, the  # registry backend volume will be created but the administrator must then  # either deploy a hosted registry or change an existing hosted registry to use @@ -14,7 +14,7 @@  #  # There are additional configuration parameters that can be specified to  # control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and +# documentation in playbooks/openshift-glusterfs/README.md and  # roles/openshift_storage_glusterfs/README.md for additional details.  [OSEv3:children] diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example index 9bd37cbf6..1f3a4282a 100644 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ b/inventory/hosts.glusterfs.storage-and-registry.example @@ -1,12 +1,12 @@ -# This is an example of a bring your own (byo) host inventory for a cluster +# This is an example of an OpenShift-Ansible host inventory for a cluster  # with natively hosted, containerized GlusterFS storage for both general  # application use and a natively hosted Docker registry. It will also create a  # StorageClass for the general storage.  # -# This inventory may be used with the byo/config.yml playbook to deploy a new +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new  # cluster with GlusterFS storage.  # -# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# This inventory may also be used with openshift-glusterfs/config.yml to  # deploy GlusterFS storage on an existing cluster. With this playbook, the  # registry backend volume will be created but the administrator must then  # either deploy a hosted registry or change an existing hosted registry to use @@ -14,7 +14,7 @@  #  # There are additional configuration parameters that can be specified to  # control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and +# documentation in playbooks/openshift-glusterfs/README.md and  # roles/openshift_storage_glusterfs/README.md for additional details.  [OSEv3:children] diff --git a/inventory/byo/hosts.openstack b/inventory/hosts.openstack index c648078c4..d928c2b86 100644 --- a/inventory/byo/hosts.openstack +++ b/inventory/hosts.openstack @@ -1,4 +1,4 @@ -# This is an example of a bring your own (byo) host inventory +# This is an example of an OpenShift-Ansible host inventory  # Create an OSEv3 group that contains the masters and nodes groups  [OSEv3:children] diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 7d543afdd..31ae55b30 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@  Name:           openshift-ansible  Version:        3.9.0 -Release:        0.0.0%{?dist} +Release:        0.4.0%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0  URL:            https://github.com/openshift/openshift-ansible @@ -67,7 +67,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce  # openshift-ansible-docs install  # Install example inventory into docs/examples  mkdir -p docs/example-inventories -cp inventory/byo/* docs/example-inventories/ +cp inventory/* docs/example-inventories/  # openshift-ansible-files install  cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/ @@ -285,14 +285,144 @@ Atomic OpenShift Utilities includes  %changelog +* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.4.0 +- Copying upstream fix for ansible 2.4 ec2_group module. (kwoodson@redhat.com) +- Add missing dependencies on openshift_facts role (sdodson@redhat.com) + +* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.3.0 +- remove integration tests from tox (lmeyer@redhat.com) +- correct ansible-playbook command syntax (jdiaz@redhat.com) +- Add openshift_facts to upgrade plays for service_type (mgugino@redhat.com) +- Check for openshift attribute before using it during CNS install. +  (jmencak@redhat.com) + +* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.2.0 +- GlusterFS: Add playbook doc note (jarrpa@redhat.com) +- Fix openshift hosted registry rollout (rteague@redhat.com) +- Remove container_runtime from the openshift_version (sdodson@redhat.com) + +* Fri Dec 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.1.0 +- Cleanup byo references (rteague@redhat.com) +- openshift_node: reintroduce restart of CRI-O. (gscrivan@redhat.com) +- container-engine: skip openshift_docker_log_driver when it is False +  (gscrivan@redhat.com) +- container-engine: log-opts is a dictionary in the daemon.json file +  (gscrivan@redhat.com) +- openshift_version: add dependency to openshift_facts (gscrivan@redhat.com) +- openshift_version: define openshift_use_crio_only (gscrivan@redhat.com) +- openshift_version: add dependency to container_runtime (gscrivan@redhat.com) +- crio: define and use l_is_node_system_container (gscrivan@redhat.com) +- Update deprecation checks - include: (rteague@redhat.com) +- Add os_firewall to prerequisites.yml (mgugino@redhat.com) +- add 3.8 templates for gluster ep and svc (lmeyer@redhat.com) +- Remove openshift.common.service_type (mgugino@redhat.com) +- Remove unused openshift_env_structures and openshift_env (mgugino@redhat.com) +- Fix incorrect register name master registry auth (mgugino@redhat.com) +- Include Deprecation: Convert to import_playbook (rteague@redhat.com) +- add 3.8 templates for gluster ep and svc (m.judeikis@gmail.com) +- Remove all uses of openshift.common.admin_binary (sdodson@redhat.com) +- Implement container_runtime playbooks and changes (mgugino@redhat.com) +- Playbook Consolidation - byo/config.yml (rteague@redhat.com) +- openshift_logging_kibana: fix mixing paren (lmeyer@redhat.com) +- Fix ami building. (kwoodson@redhat.com) +- Include Deprecation: Convert to include_tasks (rteague@redhat.com) +- Add missing symlinks in openshift-logging (rteague@redhat.com) +- Fix generate_pv_pvcs_list plugin undef (mgugino@redhat.com) +- Playbook Consolidation - etcd Upgrade (rteague@redhat.com) +- bug 1519622. Disable rollback of ES DCs (jcantril@redhat.com) +- Remove all references to pacemaker (pcs, pcsd) and +  openshift.master.cluster_method. (abutcher@redhat.com) +- Remove entry point files no longer needed by CI (rteague@redhat.com) +- Don't check for the deployment_type (tomas@sedovic.cz) +- Get the correct value out of openshift_release (tomas@sedovic.cz) +- Fix oreg_auth_credentials_create register var (mgugino@redhat.com) +- Fix and cleanup not required dns bits (bdobreli@redhat.com) +- Fix hosted vars (mgugino@redhat.com) +- Remove duplicate init import in network_manager.yml (rteague@redhat.com) +- Document testing repos for dev purposes (bdobreli@redhat.com) +- Remove unused protected_facts_to_overwrite (mgugino@redhat.com) +- Use openshift testing repos for openstack (bdobreli@redhat.com) +- Use openshift_release instead of ose_version (tomas@sedovic.cz) +- Remove the ose_version check (tomas@sedovic.cz) +- Allow number of retries in openshift_management to be configurable +  (ealfassa@redhat.com) +- Bumping to 3.9 (smunilla@redhat.com) +- Cleanup unused openstack provider code (bdobreli@redhat.com) +- Adding 3.9 tito releaser (smunilla@redhat.com) +- Implement container runtime role (mgugino@redhat.com) +- Fix glusterfs checkpoint info (rteague@redhat.com) +- storage_glusterfs: fix typo (lmeyer@redhat.com) +- Playbook Consolidation - Redeploy Certificates (rteague@redhat.com) +- Fix tox (tomas@sedovic.cz) +- Remove shell environment lookup (tomas@sedovic.cz) +- Revert "Fix syntax error caused by an extra paren" (tomas@sedovic.cz) +- Revert "Fix the env lookup fallback in rhel_subscribe" (tomas@sedovic.cz) +- Remove reading shell environment in rhel_subscribe (tomas@sedovic.cz) +- retry package operations (lmeyer@redhat.com) +- Add v3.9 support (sdodson@redhat.com) +- Playbook Consolidation - openshift-logging (rteague@redhat.com) +- Do not escalate privileges in jks generation tasks (iacopo.rozzo@amadeus.com) +- Fix inventory symlinks in origin-ansible container. (dgoodwin@redhat.com) +- Initial upgrade for scale groups. (kwoodson@redhat.com) +- Update the doc text (tomas@sedovic.cz) +- Optionally subscribe OpenStack RHEL nodes (tomas@sedovic.cz) +- Fix the env lookup fallback in rhel_subscribe (tomas@sedovic.cz) +- Fix syntax error caused by an extra paren (tomas@sedovic.cz) +- Fix no_log warnings for custom module (mgugino@redhat.com) +- Add external_svc_subnet for k8s loadbalancer type service +  (jihoon.o@samsung.com) +- Remove openshift_facts project_cfg_facts (mgugino@redhat.com) +- Remove dns_port fact (mgugino@redhat.com) +- Bug 1512793- Fix idempotence issues in ASB deploy (fabian@fabianism.us) +- Remove unused task file from etcd role (rteague@redhat.com) +- fix type in authroize (jchaloup@redhat.com) +- Use IP addresses for OpenStack nodes (tomas@sedovic.cz) +- Update prometheus to 2.0.0 GA (zgalor@redhat.com) +- remove schedulable from openshift_facts (mgugino@redhat.com) +- inventory: Add example for service catalog vars (smilner@redhat.com) +- Correct usage of include_role (rteague@redhat.com) +- Remove openshift.common.cli_image (mgugino@redhat.com) +- Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com) +- Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com) +- GlusterFS: Remove extraneous line from glusterblock template +  (jarrpa@redhat.com) +- Remove openshift_clock from meta depends (mgugino@redhat.com) +- Simplify is_master_system_container logic (mgugino@redhat.com) +- dist.iteritems() no longer exists in Python 3. (jpazdziora@redhat.com) +- Remove spurrious file committed by error (diego.abelenda@camptocamp.com) +- Fix name of the service pointed to by hostname +  (diego.abelenda@camptocamp.com) +- Missed the default value after the variable name change... +  (diego.abelenda@camptocamp.com) +- Change the name of the variable and explicitely document the names +  (diego.abelenda@camptocamp.com) +- Allow to set the hostname for routes to prometheus and alertmanager +  (diego.abelenda@camptocamp.com) +- Allow openshift_install_examples to be false (michael.fraenkel@gmail.com) +- Include Deprecation - openshift-service-catalog (rteague@redhat.com) +- Remove is_openvswitch_system_container from facts (mgugino@redhat.com) +- Workaround the fact that package state=present with dnf fails for already +  installed but excluded packages. (jpazdziora@redhat.com) +- With dnf repoquery and excluded packages, --disableexcludes=all is needed to +  list the package with --installed. (jpazdziora@redhat.com) +- Add support for external glusterfs as registry backend (m.judeikis@gmail.com) +- cri-o: honor additional and insecure registries again (gscrivan@redhat.com) +- docker: copy Docker metadata to the alternative storage path +  (gscrivan@redhat.com) +- Add check for gluterFS DS to stop restarts (m.judeikis@gmail.com) +- Bug 1514417 - Adding correct advertise-client-urls (shawn.hurley21@gmail.com) +- Uninstall tuned-profiles-atomic-openshift-node as defined in origin.spec +  (jmencak@redhat.com) +- Mod startup script to publish all frontend binds (cwilkers@redhat.com) +  * Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.13.0 --  +-  * Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.12.0 --  +-  * Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.11.0 --  +-  * Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.10.0  - tox.ini: simplify unit test reqs (lmeyer@redhat.com) @@ -341,16 +471,16 @@ Atomic OpenShift Utilities includes  - Include Deprecation - Init Playbook Paths (rteague@redhat.com)  * Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.8.0 --  +-  * Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.7.0 --  +-  * Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.6.0 --  +-  * Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.5.0 --  +-  * Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.4.0  - bug 1498398. Enclose content between store tag (rromerom@redhat.com) @@ -643,10 +773,10 @@ Atomic OpenShift Utilities includes  - Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch)  * Thu Nov 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.5-1 --  +-  * Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.4-1 --  +-  * Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.3-1  - Adding configuration for keeping transient namespace on error. @@ -816,10 +946,10 @@ Atomic OpenShift Utilities includes  - GlusterFS: Remove image option from heketi command (jarrpa@redhat.com)  * Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0 --  +-  * Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0 --  +-  * Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0  - bug 1506073. Lower cpu request for logging when it exceeds limit @@ -849,7 +979,7 @@ Atomic OpenShift Utilities includes  - Refactor health check playbooks (rteague@redhat.com)  * Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0 --  +-  * Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0  - Fixing documentation for the cert_key_path variable name. @@ -923,16 +1053,16 @@ Atomic OpenShift Utilities includes    (hansmi@vshn.ch)  * Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0 --  +-  * Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0 --  +-  * Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0 --  +-  * Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0 --  +-  * Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0  - Use "requests" for CPU resources instead of limits @@ -956,16 +1086,16 @@ Atomic OpenShift Utilities includes    (dymurray@redhat.com)  * Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0 --  +-  * Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0 --  +-  * Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0 --  +-  * Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0 --  +-  * Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0  - Change to service-signer.crt for template_service_broker CA_BUNDLE @@ -988,7 +1118,7 @@ Atomic OpenShift Utilities includes  - Remove unneeded master config updates during upgrades (mgugino@redhat.com)  * Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0 --  +-  * Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0  - Fix pvc selector default to be empty dict instead of string @@ -1030,16 +1160,16 @@ Atomic OpenShift Utilities includes    (jchaloup@redhat.com)  * Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0 --  +-  * Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0 --  +-  * Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0  - default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)  * Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0 --  +-  * Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0  - updated dynamic provision section for openshift metrics to support storage @@ -1448,7 +1578,7 @@ Atomic OpenShift Utilities includes  - oc_atomic_container: support Skopeo output (gscrivan@redhat.com)  * Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0 --  +-  * Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.124.0  - Fix ansible_syntax check (rteague@redhat.com) @@ -1475,7 +1605,7 @@ Atomic OpenShift Utilities includes    (miciah.masters@gmail.com)  * Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0 --  +-  * Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0  - Update openshift_hosted_routers example to be in ini format. @@ -1537,10 +1667,10 @@ Atomic OpenShift Utilities includes  - Add missing hostnames to registry cert (sdodson@redhat.com)  * Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.115.0 --  +-  * Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.114.0 --  +-  * Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.113.0  - openshift_version: enterprise accepts new style pre-release @@ -1558,13 +1688,13 @@ Atomic OpenShift Utilities includes  - Setup tuned profiles in /etc/tuned (jmencak@redhat.com)  * Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.109.0 --  +-  * Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.108.0 --  +-  * Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.107.0 --  +-  * Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.106.0  - Add dotnet 2.0 to v3.6 (sdodson@redhat.com) @@ -1601,13 +1731,13 @@ Atomic OpenShift Utilities includes    (sdodson@redhat.com)  * Sat Aug 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.103.0 --  +-  * Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.102.0 --  +-  * Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.101.0 --  +-  * Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.100.0  - Change memory requests and limits units (mak@redhat.com) @@ -1906,13 +2036,13 @@ Atomic OpenShift Utilities includes    (kwoodson@redhat.com)  * Mon Jul 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.152-1 --  +-  * Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.151-1 --  +-  * Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.150-1 --  +-  * Sat Jul 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.149-1  - Config was missed before replace. (jkaur@redhat.com) @@ -1935,7 +2065,7 @@ Atomic OpenShift Utilities includes  - GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com)  * Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.143-1 --  +-  * Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.142-1  - add scheduled pods check (jvallejo@redhat.com) @@ -1960,7 +2090,7 @@ Atomic OpenShift Utilities includes  - updating fetch tasks to be flat paths (ewolinet@redhat.com)  * Mon Jul 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.140-1 --  +-  * Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.139-1  - increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com) @@ -2008,7 +2138,7 @@ Atomic OpenShift Utilities includes  - Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com)  * Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.134-1 --  +-  * Tue Jul 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.133-1  - etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com) @@ -2020,7 +2150,7 @@ Atomic OpenShift Utilities includes  - Fixes to storage migration (sdodson@redhat.com)  * Mon Jul 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.132-1 --  +-  * Sun Jul 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.131-1  - Fix upgrade (sdodson@redhat.com) @@ -2161,7 +2291,7 @@ Atomic OpenShift Utilities includes  - bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)  * Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1 --  +-  * Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1  - Updating default from null to "" (ewolinet@redhat.com) @@ -2205,7 +2335,7 @@ Atomic OpenShift Utilities includes  - CloudForms 4.5 templates (simaishi@redhat.com)  * Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1 --  +-  * Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1  - Make rollout status check best-effort, add poll (skuznets@redhat.com) @@ -2267,7 +2397,7 @@ Atomic OpenShift Utilities includes  - singletonize some role tasks that repeat a lot (lmeyer@redhat.com)  * Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1 --  +-  * Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1  - Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de) @@ -2323,7 +2453,7 @@ Atomic OpenShift Utilities includes  - Install default storageclass in AWS & GCE envs (hekumar@redhat.com)  * Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1 --  +-  * Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1  - Updated to using oo_random_word for secret gen (ewolinet@redhat.com) @@ -2355,7 +2485,7 @@ Atomic OpenShift Utilities includes    loopback kubeconfigs. (abutcher@redhat.com)  * Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1 --  +-  * Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1  - Updating image for registry_console (ewolinet@redhat.com) @@ -2602,13 +2732,13 @@ Atomic OpenShift Utilities includes  - Fix additional master cert & client config creation. (abutcher@redhat.com)  * Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1 --  +-  * Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1 --  +-  * Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1 --  +-  * Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1  - Updating logging and metrics to restart api, ha and controllers when updating @@ -2621,10 +2751,10 @@ Atomic OpenShift Utilities includes  - Moving Dockerfile content to images dir (jupierce@redhat.com)  * Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1 --  +-  * Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1 --  +-  * Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1  - Fix 1448368, and some other minors issues (ghuang@redhat.com) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index ed7a7bd1a..9f044c089 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -123,7 +123,7 @@          - origin-clients          - origin-node          - origin-sdn-ovs -        - tuned-profiles-openshift-node +        - tuned-profiles-atomic-openshift-node          - tuned-profiles-origin-node          register: result          until: result | success diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 417fb539a..d203b9cda 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi  In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory.  This enables us to create the AMI using the openshift-ansible node roles.  This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster  using the normal openshift-ansible method.  See provisioning-inventory.example.ini for an example. -There are more examples of cluster inventory settings [`here`](../../inventory/byo/). +There are more examples of cluster inventory settings [`here`](../../inventory/).  #### Step 0 (optional) @@ -134,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast  Now it is time to install Openshift using the openshift-ansible installer.  This can be achieved by running the following playbook:  ``` -$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml +$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml  ```  This playbook accomplishes the following:  1. Builds a dynamic inventory file by querying AWS. -2. Runs the [`byo`](../../common/openshift-cluster/config.yml) +2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml)  Once this playbook completes, the cluster masters should be installed and configured. diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml deleted file mode 100644 index 4b74e5bce..000000000 --- a/playbooks/byo/config.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. -- import_playbook: ../deploy_cluster.yml diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 5c6def484..fcb828808 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -32,6 +32,7 @@    any_errors_fatal: true    roles: +  - openshift_facts    - lib_openshift    tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 0c1a99272..9ec788e76 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -121,6 +121,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 9dcad352c..ad67b6c44 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -125,6 +125,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index ead2efbd0..60ec79df5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -121,6 +121,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index ae37b1359..c1a3f64f2 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -125,6 +125,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index eb688f189..1e704b66c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -127,6 +127,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift.common.service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 983bb4a63..a9689da1f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -131,6 +131,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift.common.service_type }}-master-controllers      systemd: diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md index f62aea229..107bbfff6 100644 --- a/playbooks/openshift-glusterfs/README.md +++ b/playbooks/openshift-glusterfs/README.md @@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster.     devices but you must specify the following variables in `[OSEv3:vars]`:     * `openshift_storage_glusterfs_is_missing=False`     * `openshift_storage_glusterfs_heketi_is_missing=False` + * If GlusterFS will be running natively, the target hosts must also be listed +   in the `nodes` group. They must also already be configured as OpenShift +   nodes before this playbook runs.  By default, pods for a native GlusterFS cluster will be created in the  `default` namespace. To change this, specify diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml index d71b4f1c5..83d330284 100644 --- a/playbooks/openshift-logging/config.yml +++ b/playbooks/openshift-logging/config.yml @@ -1,7 +1,7 @@  ---  #  # This playbook is a preview of upcoming changes for installing -# Hosted logging on.  See inventory/byo/hosts.*.example for the +# Hosted logging on.  See inventory/hosts.example for the  # currently supported method.  #  - import_playbook: ../init/main.yml diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 2a190935e..9f5502141 100644 --- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -56,7 +56,7 @@      - groups.oo_etcd_to_config | default([]) | length == 0      - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'    # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. -  # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml +  # This change will be reverted in playbooks/redeploy-certificates.yml    - modify_yaml:        dest: "{{ openshift.common.config_base }}/master/master-config.yaml"        yaml_key: servingInfo.clientCA diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index f567242cd..d361d6278 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -226,7 +226,7 @@ advanced configuration:  [hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware  [origin]: https://www.openshift.org/  [centos7]: https://www.centos.org/ -[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example +[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example  [advanced-configuration]: ./advanced-configuration.md  [accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster  [uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index db2a13d38..403e0e1a7 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -343,7 +343,7 @@ installation for example by specifying the authentication.  The full list of options is available in this sample inventory: -https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example +https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example  Note, that in order to deploy OpenShift origin, you should update the following  variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: @@ -604,7 +604,7 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/  Once it succeeds, you can install openshift by running: -    ansible-playbook openshift-ansible/playbooks/byo/config.yml +    ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml  ## Access UI diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 879ca4f4e..f2e1fc310 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -19,3 +19,4 @@ dependencies:  - role: lib_openshift  - role: lib_os_firewall  - role: lib_utils +- role: openshift_facts diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py new file mode 100644 index 000000000..615021ac5 --- /dev/null +++ b/roles/lib_utils/library/oo_ec2_group.py @@ -0,0 +1,903 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# pylint: skip-file +# flake8: noqa + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +ANSIBLE_METADATA = {'metadata_version': '1.1', +                    'status': ['stableinterface'], +                    'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: ec2_group +author: "Andrew de Quincey (@adq)" +version_added: "1.3" +requirements: [ boto3 ] +short_description: maintain an ec2 VPC security group. +description: +    - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5 +options: +  name: +    description: +      - Name of the security group. +      - One of and only one of I(name) or I(group_id) is required. +      - Required if I(state=present). +    required: false +  group_id: +    description: +      - Id of group to delete (works only with absent). +      - One of and only one of I(name) or I(group_id) is required. +    required: false +    version_added: "2.4" +  description: +    description: +      - Description of the security group. Required when C(state) is C(present). +    required: false +  vpc_id: +    description: +      - ID of the VPC to create the group in. +    required: false +  rules: +    description: +      - List of firewall inbound rules to enforce in this group (see example). If none are supplied, +        no inbound rules will be enabled. Rules list may include its own name in `group_name`. +        This allows idempotent loopback additions (e.g. allow group to access itself). +        Rule sources list support was added in version 2.4. This allows to define multiple sources per +        source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. +    required: false +  rules_egress: +    description: +      - List of firewall outbound rules to enforce in this group (see example). If none are supplied, +        a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. +        Rule Egress sources list support was added in version 2.4. +    required: false +    version_added: "1.6" +  state: +    version_added: "1.4" +    description: +      - Create or delete a security group +    required: false +    default: 'present' +    choices: [ "present", "absent" ] +    aliases: [] +  purge_rules: +    version_added: "1.8" +    description: +      - Purge existing rules on security group that are not found in rules +    required: false +    default: 'true' +    aliases: [] +  purge_rules_egress: +    version_added: "1.8" +    description: +      - Purge existing rules_egress on security group that are not found in rules_egress +    required: false +    default: 'true' +    aliases: [] +  tags: +    version_added: "2.4" +    description: +      - A dictionary of one or more tags to assign to the security group. +    required: false +  purge_tags: +    version_added: "2.4" +    description: +      - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then +        tags will not be modified. +    required: false +    default: yes +    choices: [ 'yes', 'no' ] + +extends_documentation_fragment: +    - aws +    - ec2 + +notes: +  - If a rule declares a group_name and that group doesn't exist, it will be +    automatically created. In that case, group_desc should be provided as well. +    The module will refuse to create a depended-on group without a description. +''' + +EXAMPLES = ''' +- name: example ec2 group +  ec2_group: +    name: example +    description: an example EC2 group +    vpc_id: 12345 +    region: eu-west-1 +    aws_secret_key: SECRET +    aws_access_key: ACCESS +    rules: +      - proto: tcp +        from_port: 80 +        to_port: 80 +        cidr_ip: 0.0.0.0/0 +      - proto: tcp +        from_port: 22 +        to_port: 22 +        cidr_ip: 10.0.0.0/8 +      - proto: tcp +        from_port: 443 +        to_port: 443 +        group_id: amazon-elb/sg-87654321/amazon-elb-sg +      - proto: tcp +        from_port: 3306 +        to_port: 3306 +        group_id: 123412341234/sg-87654321/exact-name-of-sg +      - proto: udp +        from_port: 10050 +        to_port: 10050 +        cidr_ip: 10.0.0.0/8 +      - proto: udp +        from_port: 10051 +        to_port: 10051 +        group_id: sg-12345678 +      - proto: icmp +        from_port: 8 # icmp type, -1 = any type +        to_port:  -1 # icmp subtype, -1 = any subtype +        cidr_ip: 10.0.0.0/8 +      - proto: all +        # the containing group name may be specified here +        group_name: example +    rules_egress: +      - proto: tcp +        from_port: 80 +        to_port: 80 +        cidr_ip: 0.0.0.0/0 +        cidr_ipv6: 64:ff9b::/96 +        group_name: example-other +        # description to use if example-other needs to be created +        group_desc: other example EC2 group + +- name: example2 ec2 group +  ec2_group: +    name: example2 +    description: an example2 EC2 group +    vpc_id: 12345 +    region: eu-west-1 +    rules: +      # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). +      - proto: tcp +        ports: 22 +        group_name: example-vpn +      - proto: tcp +        ports: +          - 80 +          - 443 +          - 8080-8099 +        cidr_ip: 0.0.0.0/0 +      # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. +      - proto: tcp +        ports: +          - 6379 +          - 26379 +        group_name: +          - example-vpn +          - example-redis +      - proto: tcp +        ports: 5665 +        group_name: example-vpn +        cidr_ip: +          - 172.16.1.0/24 +          - 172.16.17.0/24 +        cidr_ipv6: +          - 2607:F8B0::/32 +          - 64:ff9b::/96 +        group_id: +          - sg-edcd9784 + +- name: "Delete group by its id" +  ec2_group: +    group_id: sg-33b4ee5b +    state: absent +''' + +RETURN = ''' +group_name: +  description: Security group name +  sample: My Security Group +  type: string +  returned: on create/update +group_id: +  description: Security group id +  sample: sg-abcd1234 +  type: string +  returned: on create/update +description: +  description: Description of security group +  sample: My Security Group +  type: string +  returned: on create/update +tags: +  description: Tags associated with the security group +  sample: +    Name: My Security Group +    Purpose: protecting stuff +  type: dict +  returned: on create/update +vpc_id: +  description: ID of VPC to which the security group belongs +  sample: vpc-abcd1234 +  type: string +  returned: on create/update +ip_permissions: +  description: Inbound rules associated with the security group. +  sample: +    - from_port: 8182 +      ip_protocol: tcp +      ip_ranges: +        - cidr_ip: "1.1.1.1/32" +      ipv6_ranges: [] +      prefix_list_ids: [] +      to_port: 8182 +      user_id_group_pairs: [] +  type: list +  returned: on create/update +ip_permissions_egress: +  description: Outbound rules associated with the security group. +  sample: +    - ip_protocol: -1 +      ip_ranges: +        - cidr_ip: "0.0.0.0/0" +          ipv6_ranges: [] +          prefix_list_ids: [] +          user_id_group_pairs: [] +  type: list +  returned: on create/update +owner_id: +  description: AWS Account ID of the security group +  sample: 123456789012 +  type: int +  returned: on create/update +''' + +import json +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn +from ansible.module_utils.ec2 import get_aws_connection_info +from ansible.module_utils.ec2 import ec2_argument_spec +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.ec2 import HAS_BOTO3 +from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags +from ansible.module_utils.ec2 import AWSRetry +import traceback + +try: +    import botocore +except ImportError: +    pass  # caught by imported HAS_BOTO3 + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_security_groups_with_backoff(connection, **kwargs): +    return connection.describe_security_groups(**kwargs) + + +def deduplicate_rules_args(rules): +    """Returns unique rules""" +    if rules is None: +        return None +    return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def make_rule_key(prefix, rule, group_id, cidr_ip): +    if 'proto' in rule: +        proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] +    elif 'IpProtocol' in rule: +        proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')] +    if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1: +        from_port = 'none' +        to_port = 'none' +    key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip) +    return key.lower().replace('-none', '-None') + + +def add_rules_to_lookup(ipPermissions, group_id, prefix, dict): +    for rule in ipPermissions: +        for groupGrant in rule.get('UserIdGroupPairs', []): +            dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant) +        for ipv4Grants in rule.get('IpRanges', []): +            dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants) +        for ipv6Grants in rule.get('Ipv6Ranges', []): +            dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants) + + +def validate_rule(module, rule): +    VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', +                    'group_id', 'group_name', 'group_desc', +                    'proto', 'from_port', 'to_port') +    if not isinstance(rule, dict): +        module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) +    for k in rule: +        if k not in VALID_PARAMS: +            module.fail_json(msg='Invalid rule parameter \'{}\''.format(k)) + +    if 'group_id' in rule and 'cidr_ip' in rule: +        module.fail_json(msg='Specify group_id OR cidr_ip, not both') +    elif 'group_name' in rule and 'cidr_ip' in rule: +        module.fail_json(msg='Specify group_name OR cidr_ip, not both') +    elif 'group_id' in rule and 'cidr_ipv6' in rule: +        module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") +    elif 'group_name' in rule and 'cidr_ipv6' in rule: +        module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") +    elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: +        module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") +    elif 'group_id' in rule and 'group_name' in rule: +        module.fail_json(msg='Specify group_id OR group_name, not both') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): +    """ +    Returns tuple of (group_id, ip) after validating rule params. + +    rule: Dict describing a rule. +    name: Name of the security group being managed. +    groups: Dict of all available security groups. + +    AWS accepts an ip range or a security group as target of a rule. This +    function validate the rule specification and return either a non-None +    group_id or a non-None ip range. +    """ + +    FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)' +    group_id = None +    group_name = None +    ip = None +    ipv6 = None +    target_group_created = False + +    if 'group_id' in rule and 'cidr_ip' in rule: +        module.fail_json(msg="Specify group_id OR cidr_ip, not both") +    elif 'group_name' in rule and 'cidr_ip' in rule: +        module.fail_json(msg="Specify group_name OR cidr_ip, not both") +    elif 'group_id' in rule and 'cidr_ipv6' in rule: +        module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") +    elif 'group_name' in rule and 'cidr_ipv6' in rule: +        module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") +    elif 'group_id' in rule and 'group_name' in rule: +        module.fail_json(msg="Specify group_id OR group_name, not both") +    elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: +        module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") +    elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): +        # this is a foreign Security Group. Since you can't fetch it you must create an instance of it +        owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() +        group_instance = dict(GroupId=group_id, GroupName=group_name) +        groups[group_id] = group_instance +        groups[group_name] = group_instance +    elif 'group_id' in rule: +        group_id = rule['group_id'] +    elif 'group_name' in rule: +        group_name = rule['group_name'] +        if group_name == name: +            group_id = group['GroupId'] +            groups[group_id] = group +            groups[group_name] = group +        elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): +            # both are VPC groups, this is ok +            group_id = groups[group_name]['GroupId'] +        elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): +            # both are EC2 classic, this is ok +            group_id = groups[group_name]['GroupId'] +        else: +            # if we got here, either the target group does not exist, or there +            # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC +            # is bad, so we have to create a new SG because no compatible group +            # exists +            if not rule.get('group_desc', '').strip(): +                module.fail_json(msg="group %s will be automatically created by rule %s and " +                                     "no description was provided" % (group_name, rule)) +            if not module.check_mode: +                params = dict(GroupName=group_name, Description=rule['group_desc']) +                if vpc_id: +                    params['VpcId'] = vpc_id +                auto_group = client.create_security_group(**params) +                group_id = auto_group['GroupId'] +                groups[group_id] = auto_group +                groups[group_name] = auto_group +            target_group_created = True +    elif 'cidr_ip' in rule: +        ip = rule['cidr_ip'] +    elif 'cidr_ipv6' in rule: +        ipv6 = rule['cidr_ipv6'] + +    return group_id, ip, ipv6, target_group_created + + +def ports_expand(ports): +    # takes a list of ports and returns a list of (port_from, port_to) +    ports_expanded = [] +    for port in ports: +        if not isinstance(port, str): +            ports_expanded.append((port,) * 2) +        elif '-' in port: +            ports_expanded.append(tuple(p.strip() for p in port.split('-', 1))) +        else: +            ports_expanded.append((port.strip(),) * 2) + +    return ports_expanded + + +def rule_expand_ports(rule): +    # takes a rule dict and returns a list of expanded rule dicts +    if 'ports' not in rule: +        return [rule] + +    ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + +    rule_expanded = [] +    for from_to in ports_expand(ports): +        temp_rule = rule.copy() +        del temp_rule['ports'] +        temp_rule['from_port'], temp_rule['to_port'] = from_to +        rule_expanded.append(temp_rule) + +    return rule_expanded + + +def rules_expand_ports(rules): +    # takes a list of rules and expands it based on 'ports' +    if not rules: +        return rules + +    return [rule for rule_complex in rules +            for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): +    # takes a rule dict and returns a list of expanded rule dicts for specified source_type +    sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] +    source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') + +    rule_expanded = [] +    for source in sources: +        temp_rule = rule.copy() +        for s in source_types_all: +            temp_rule.pop(s, None) +        temp_rule[source_type] = source +        rule_expanded.append(temp_rule) + +    return rule_expanded + + +def rule_expand_sources(rule): +    # takes a rule dict and returns a list of expanded rule discts +    source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule) + +    return [r for stype in source_types +            for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): +    # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' +    if not rules: +        return rules + +    return [rule for rule_complex in rules +            for rule in rule_expand_sources(rule_complex)] + + +def authorize_ip(type, changed, client, group, groupRules, +                 ip, ip_permission, module, rule, ethertype): +    # If rule already exists, don't later delete it +    for thisip in ip: +        rule_id = make_rule_key(type, rule, group['GroupId'], thisip) +        if rule_id in groupRules: +            del groupRules[rule_id] +        else: +            if not module.check_mode: +                ip_permission = serialize_ip_grant(rule, thisip, ethertype) +                if ip_permission: +                    try: +                        if type == "in": +                            client.authorize_security_group_ingress(GroupId=group['GroupId'], +                                                                    IpPermissions=[ip_permission]) +                        elif type == "out": +                            client.authorize_security_group_egress(GroupId=group['GroupId'], +                                                                   IpPermissions=[ip_permission]) +                    except botocore.exceptions.ClientError as e: +                        module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" % +                                             (type, thisip, group['GroupName'], e), +                                         exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +            changed = True +    return changed, ip_permission + + +def serialize_group_grant(group_id, rule): +    permission = {'IpProtocol': rule['proto'], +                  'FromPort': rule['from_port'], +                  'ToPort': rule['to_port'], +                  'UserIdGroupPairs': [{'GroupId': group_id}]} + +    return fix_port_and_protocol(permission) + + +def serialize_revoke(grant, rule): +    permission = dict() +    fromPort = rule['FromPort'] if 'FromPort' in rule else None +    toPort = rule['ToPort'] if 'ToPort' in rule else None +    if 'GroupId' in grant: +        permission = {'IpProtocol': rule['IpProtocol'], +                      'FromPort': fromPort, +                      'ToPort': toPort, +                      'UserIdGroupPairs': [{'GroupId': grant['GroupId']}] +                      } +    elif 'CidrIp' in grant: +        permission = {'IpProtocol': rule['IpProtocol'], +                      'FromPort': fromPort, +                      'ToPort': toPort, +                      'IpRanges': [grant] +                      } +    elif 'CidrIpv6' in grant: +        permission = {'IpProtocol': rule['IpProtocol'], +                      'FromPort': fromPort, +                      'ToPort': toPort, +                      'Ipv6Ranges': [grant] +                      } +    return fix_port_and_protocol(permission) + + +def serialize_ip_grant(rule, thisip, ethertype): +    permission = {'IpProtocol': rule['proto'], +                  'FromPort': rule['from_port'], +                  'ToPort': rule['to_port']} +    if ethertype == "ipv4": +        permission['IpRanges'] = [{'CidrIp': thisip}] +    elif ethertype == "ipv6": +        permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}] + +    return fix_port_and_protocol(permission) + + +def fix_port_and_protocol(permission): +    for key in ['FromPort', 'ToPort']: +        if key in permission: +            if permission[key] is None: +                del permission[key] +            else: +                permission[key] = int(permission[key]) + +    permission['IpProtocol'] = str(permission['IpProtocol']) + +    return permission + + +def main(): +    argument_spec = ec2_argument_spec() +    argument_spec.update(dict( +        name=dict(), +        group_id=dict(), +        description=dict(), +        vpc_id=dict(), +        rules=dict(type='list'), +        rules_egress=dict(type='list'), +        state=dict(default='present', type='str', choices=['present', 'absent']), +        purge_rules=dict(default=True, required=False, type='bool'), +        purge_rules_egress=dict(default=True, required=False, type='bool'), +        tags=dict(required=False, type='dict', aliases=['resource_tags']), +        purge_tags=dict(default=True, required=False, type='bool') +    ) +    ) +    module = AnsibleModule( +        argument_spec=argument_spec, +        supports_check_mode=True, +        required_one_of=[['name', 'group_id']], +        required_if=[['state', 'present', ['name']]], +    ) + +    if not HAS_BOTO3: +        module.fail_json(msg='boto3 required for this module') + +    name = module.params['name'] +    group_id = module.params['group_id'] +    description = module.params['description'] +    vpc_id = module.params['vpc_id'] +    rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules']))) +    rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress']))) +    state = module.params.get('state') +    purge_rules = module.params['purge_rules'] +    purge_rules_egress = module.params['purge_rules_egress'] +    tags = module.params['tags'] +    purge_tags = module.params['purge_tags'] + +    if state == 'present' and not description: +        module.fail_json(msg='Must provide description when state is present.') + +    changed = False +    region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) +    if not region: +        module.fail_json(msg="The AWS region must be specified as an " +                             "environment variable or in the AWS credentials " +                             "profile.") +    client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params) +    group = None +    groups = dict() +    security_groups = [] +    # do get all security groups +    # find if the group is present +    try: +        response = get_security_groups_with_backoff(client) +        security_groups = response.get('SecurityGroups', []) +    except botocore.exceptions.NoCredentialsError as e: +        module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc()) +    except botocore.exceptions.ClientError as e: +        module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(), +                         **camel_dict_to_snake_dict(e.response)) + +    for sg in security_groups: +        groups[sg['GroupId']] = sg +        groupName = sg['GroupName'] +        if groupName in groups: +            # Prioritise groups from the current VPC +            # even if current VPC is EC2-Classic +            if groups[groupName].get('VpcId') == vpc_id: +                # Group saved already matches current VPC, change nothing +                pass +            elif vpc_id is None and groups[groupName].get('VpcId') is None: +                # We're in EC2 classic, and the group already saved is as well +                # No VPC groups can be used alongside EC2 classic groups +                pass +            else: +                # the current SG stored has no direct match, so we can replace it +                groups[groupName] = sg +        else: +            groups[groupName] = sg + +        if group_id and sg['GroupId'] == group_id: +            group = sg +        elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id): +            group = sg + +    # Ensure requested group is absent +    if state == 'absent': +        if group: +            # found a match, delete it +            try: +                if not module.check_mode: +                    client.delete_security_group(GroupId=group['GroupId']) +            except botocore.exceptions.ClientError as e: +                module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e), +                                 exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +            else: +                group = None +                changed = True +        else: +            # no match found, no changes required +            pass + +    # Ensure requested group is present +    elif state == 'present': +        if group: +            # existing group +            if group['Description'] != description: +                module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " +                            "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + +        # if the group doesn't exist, create it now +        else: +            # no match found, create it +            if not module.check_mode: +                params = dict(GroupName=name, Description=description) +                if vpc_id: +                    params['VpcId'] = vpc_id +                group = client.create_security_group(**params) +                # When a group is created, an egress_rule ALLOW ALL +                # to 0.0.0.0/0 is added automatically but it's not +                # reflected in the object returned by the AWS API +                # call. We re-read the group for getting an updated object +                # amazon sometimes takes a couple seconds to update the security group so wait till it exists +                while True: +                    group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] +                    if group.get('VpcId') and not group.get('IpPermissionsEgress'): +                        pass +                    else: +                        break + +            changed = True + +        if tags is not None: +            current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) +            tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) +            if tags_to_delete: +                try: +                    client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete]) +                except botocore.exceptions.ClientError as e: +                    module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                changed = True + +            # Add/update tags +            if tags_need_modify: +                try: +                    client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) +                except botocore.exceptions.ClientError as e: +                    module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                changed = True + +    else: +        module.fail_json(msg="Unsupported state requested: %s" % state) + +    # create a lookup for all existing rules on the group +    ip_permission = [] +    if group: +        # Manage ingress rules +        groupRules = {} +        add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules) +        # Now, go through all provided rules and ensure they are there. +        if rules is not None: +            for rule in rules: +                validate_rule(module, rule) +                group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name, +                                                                                group, groups, vpc_id) +                if target_group_created: +                    changed = True + +                if rule['proto'] in ('all', '-1', -1): +                    rule['proto'] = -1 +                    rule['from_port'] = None +                    rule['to_port'] = None + +                if group_id: +                    rule_id = make_rule_key('in', rule, group['GroupId'], group_id) +                    if rule_id in groupRules: +                        del groupRules[rule_id] +                    else: +                        if not module.check_mode: +                            ip_permission = serialize_group_grant(group_id, rule) +                            if ip_permission: +                                ips = ip_permission +                                if vpc_id: +                                    [useridpair.update({'VpcId': vpc_id}) for useridpair in +                                     ip_permission.get('UserIdGroupPairs', [])] +                                try: +                                    client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips]) +                                except botocore.exceptions.ClientError as e: +                                    module.fail_json( +                                        msg="Unable to authorize ingress for group %s security group '%s' - %s" % +                                            (group_id, group['GroupName'], e), +                                        exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                        changed = True +                elif ip: +                    # Convert ip to list we can iterate over +                    if ip and not isinstance(ip, list): +                        ip = [ip] + +                    changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission, +                                                          module, rule, "ipv4") +                elif ipv6: +                    # Convert ip to list we can iterate over +                    if not isinstance(ipv6, list): +                        ipv6 = [ipv6] +                    # If rule already exists, don't later delete it +                    changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission, +                                                          module, rule, "ipv6") +        # Finally, remove anything left in the groupRules -- these will be defunct rules +        if purge_rules: +            for (rule, grant) in groupRules.values(): +                ip_permission = serialize_revoke(grant, rule) +                if not module.check_mode: +                    try: +                        client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission]) +                    except botocore.exceptions.ClientError as e: +                        module.fail_json( +                            msg="Unable to revoke ingress for security group '%s' - %s" % +                                (group['GroupName'], e), +                            exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                changed = True + +        # Manage egress rules +        groupRules = {} +        add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules) +        # Now, go through all provided rules and ensure they are there. +        if rules_egress is not None: +            for rule in rules_egress: +                validate_rule(module, rule) +                group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name, +                                                                                group, groups, vpc_id) +                if target_group_created: +                    changed = True + +                if rule['proto'] in ('all', '-1', -1): +                    rule['proto'] = -1 +                    rule['from_port'] = None +                    rule['to_port'] = None + +                if group_id: +                    rule_id = make_rule_key('out', rule, group['GroupId'], group_id) +                    if rule_id in groupRules: +                        del groupRules[rule_id] +                    else: +                        if not module.check_mode: +                            ip_permission = serialize_group_grant(group_id, rule) +                            if ip_permission: +                                ips = ip_permission +                                if vpc_id: +                                    [useridpair.update({'VpcId': vpc_id}) for useridpair in +                                     ip_permission.get('UserIdGroupPairs', [])] +                                try: +                                    client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips]) +                                except botocore.exceptions.ClientError as e: +                                    module.fail_json( +                                        msg="Unable to authorize egress for group %s security group '%s' - %s" % +                                            (group_id, group['GroupName'], e), +                                        exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                        changed = True +                elif ip: +                    # Convert ip to list we can iterate over +                    if not isinstance(ip, list): +                        ip = [ip] +                    changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip, +                                                          ip_permission, module, rule, "ipv4") +                elif ipv6: +                    # Convert ip to list we can iterate over +                    if not isinstance(ipv6, list): +                        ipv6 = [ipv6] +                    # If rule already exists, don't later delete it +                    changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6, +                                                          ip_permission, module, rule, "ipv6") +        elif vpc_id is not None: +            # when no egress rules are specified and we're in a VPC, +            # we add in a default allow all out rule, which was the +            # default behavior before egress rules were added +            default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0' +            if default_egress_rule not in groupRules: +                if not module.check_mode: +                    ip_permission = [{'IpProtocol': '-1', +                                      'IpRanges': [{'CidrIp': '0.0.0.0/0'}] +                                      } +                                     ] +                    try: +                        client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission) +                    except botocore.exceptions.ClientError as e: +                        module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" % +                                             ('0.0.0.0/0', +                                              group['GroupName'], +                                              e), +                                         exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                changed = True +            else: +                # make sure the default egress rule is not removed +                del groupRules[default_egress_rule] + +        # Finally, remove anything left in the groupRules -- these will be defunct rules +        if purge_rules_egress and vpc_id is not None: +            for (rule, grant) in groupRules.values(): +                # we shouldn't be revoking 0.0.0.0 egress +                if grant != '0.0.0.0/0': +                    ip_permission = serialize_revoke(grant, rule) +                    if not module.check_mode: +                        try: +                            client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission]) +                        except botocore.exceptions.ClientError as e: +                            module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" % +                                                 (grant, group['GroupName'], e), +                                             exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) +                    changed = True + +    if group: +        security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] +        security_group = camel_dict_to_snake_dict(security_group) +        security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []), +                                                                tag_name_key_name='key', tag_value_key_name='value') +        module.exit_json(changed=changed, **security_group) +    else: +        module.exit_json(changed=changed, group_id=None) + + +if __name__ == '__main__': +    main() diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml index ef6060555..cafd09d82 100644 --- a/roles/openshift_aws/tasks/security_group_create.yml +++ b/roles/openshift_aws/tasks/security_group_create.yml @@ -1,6 +1,6 @@  ---  - name: create the node group sgs -  ec2_group: +  oo_ec2_group:      name: "{{ item.value.name}}"      description: "{{ item.value.desc }}"      rules: "{{ item.value.rules if 'rules' in item.value else [] }}" @@ -9,7 +9,7 @@    with_dict: "{{ l_security_groups }}"  - name: create the k8s sgs for the node group -  ec2_group: +  oo_ec2_group:      name: "{{ item.value.name }}_k8s"      description: "{{ item.value.desc }} for k8s"      region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index f8b784a63..81b49ce60 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info:    - system  dependencies:  - role: openshift_cli +- role: openshift_facts diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md index 6c5662a4e..94961f2d4 100644 --- a/roles/openshift_health_checker/HOWTO_CHECKS.md +++ b/roles/openshift_health_checker/HOWTO_CHECKS.md @@ -12,7 +12,7 @@ Checks are typically implemented as two parts:  The checks are called from Ansible playbooks via the `openshift_health_check`  action plugin. See -[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml) +[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml)  for an example.  The action plugin dynamically discovers all checks and executes only those diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index de302c740..429f0c514 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -126,7 +126,7 @@      selector: "{{ openshift_hosted_registry_selector }}"      replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"      service_account: "{{ openshift_hosted_registry_serviceaccount }}" -    images: "{{ penshift_hosted_registry_registryurl }}" +    images: "{{ openshift_hosted_registry_registryurl }}"      env_vars: "{{ openshift_hosted_registry_env_vars }}"      volume_mounts: "{{ openshift_hosted_registry_volumes }}"      edits: "{{ openshift_hosted_registry_edits }}" diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 0bfa9e85b..bf04094a3 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -9,6 +9,7 @@ metadata:      logging-infra: "{{logging_component}}"  spec:    replicas: {{es_replicas|default(1)}} +  revisionHistoryLimit: 0    selector:      provider: openshift      component: "{{component}}" diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 96de82669..974d9781a 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -164,14 +164,14 @@ away.  If you want to install CFME/MIQ at the same time you install your  OCP/Origin cluster, ensure that `openshift_management_install_management` is set  to `true` in your inventory. Call the standard -`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ +`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ  installation.  If you are installing CFME/MIQ on an *already provisioned cluster*  then you can call the CFME/MIQ playbook directly:  ``` -$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml  ```  *Note: Use `miq-template` in the following examples for ManageIQ installs* @@ -489,7 +489,7 @@ This playbook will:  ``` -$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml  ```  ## Multiple Providers @@ -567,7 +567,7 @@ the config file path.  ```  $ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ -      playbooks/byo/openshift-management/add_many_container_providers.yml +      playbooks/openshift-management/add_many_container_providers.yml  ```  Afterwards you will find two new container providers in your @@ -579,7 +579,7 @@ to see an overview.  This role includes a playbook to uninstall and erase the CFME/MIQ  installation: -* `playbooks/byo/openshift-management/uninstall.yml` +* `playbooks/openshift-management/uninstall.yml`  NFS export definitions and data stored on NFS exports are not  automatically removed. You are urged to manually erase any data from diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index e768961ce..b5e234b7f 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false  # name and password AND are trying to use integration scripts.  #  # For example, adding this cluster as a container provider, -# playbooks/byo/openshift-management/add_container_provider.yml +# playbooks/openshift-management/add_container_provider.yml  openshift_management_username: admin  openshift_management_password: smartvm diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml index 07ad51126..9f19704a8 100644 --- a/roles/openshift_management/meta/main.yml +++ b/roles/openshift_management/meta/main.yml @@ -16,3 +16,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 30e83e79b..0c2fcb2c5 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -23,7 +23,7 @@      state: absent      labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"    with_items: "{{ groups.all }}" -  when: glusterfs_wipe +  when: "'openshift' in hostvars[item] and glusterfs_wipe"  - name: Delete pre-existing GlusterFS config    file: diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml index 2d317700a..d0ad4b7d2 100644 --- a/roles/openshift_version/meta/main.yml +++ b/roles/openshift_version/meta/main.yml @@ -13,5 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: lib_utils -- role: container_runtime  - role: openshift_facts @@ -345,35 +345,29 @@ class OpenShiftAnsibleSyntaxCheck(Command):              print('-' * 60)              print('Syntax checking playbook: {}'.format(playbook)) -            # Error on any entry points in 'common' -            if 'common' in playbook: -                print('{}Invalid entry point playbook. All playbooks must' -                      ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC)) -                has_errors = True              # --syntax-check each entry point playbook -            else: -                try: -                    # Create a host group list to avoid WARNING on unmatched host patterns -                    host_group_list = [ -                        'etcd,masters,nodes,OSEv3', -                        'oo_all_hosts', -                        'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,' -                        'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate', -                        'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes', -                        'oo_nodes_to_config,oo_nodes_to_upgrade', -                        'oo_nodes_use_kuryr,oo_nodes_use_flannel', -                        'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv', -                        'oo_lb_to_config', -                        'oo_nfs_to_config', -                        'glusterfs,glusterfs_registry,'] -                    subprocess.check_output( -                        ['ansible-playbook', '-i ' + ','.join(host_group_list), -                         '--syntax-check', playbook] -                    ) -                except subprocess.CalledProcessError as cpe: -                    print('{}Execution failed: {}{}'.format( -                        self.FAIL, cpe, self.ENDC)) -                    has_errors = True +            try: +                # Create a host group list to avoid WARNING on unmatched host patterns +                host_group_list = [ +                    'etcd,masters,nodes,OSEv3', +                    'oo_all_hosts', +                    'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,' +                    'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate', +                    'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes', +                    'oo_nodes_to_config,oo_nodes_to_upgrade', +                    'oo_nodes_use_kuryr,oo_nodes_use_flannel', +                    'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv', +                    'oo_lb_to_config', +                    'oo_nfs_to_config', +                    'glusterfs,glusterfs_registry,'] +                subprocess.check_output( +                    ['ansible-playbook', '-i ' + ','.join(host_group_list), +                     '--syntax-check', playbook] +                ) +            except subprocess.CalledProcessError as cpe: +                print('{}Execution failed: {}{}'.format( +                    self.FAIL, cpe, self.ENDC)) +                has_errors = True          if has_errors:              raise SystemExit(1) diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh index 74a55fa51..74a55fa51 100755..100644 --- a/test/integration/build-images.sh +++ b/test/integration/build-images.sh diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh index 680b64602..680b64602 100755..100644 --- a/test/integration/run-tests.sh +++ b/test/integration/run-tests.sh @@ -3,7 +3,6 @@ minversion=2.3.1  envlist =      py{27,35}-{flake8,pylint,unit}      py27-{yamllint,ansible_syntax,generate_validation} -    integration  skipsdist=True  skip_missing_interpreters=True @@ -14,7 +13,6 @@ deps =      -rtest-requirements.txt      unit: -eutils      py35-flake8: flake8-bugbear==17.3.0 -    integration: docker-py==1.10.6  commands =      unit: pytest {posargs} @@ -23,4 +21,3 @@ commands =      yamllint: python setup.py yamllint      generate_validation: python setup.py generate_validation      ansible_syntax: python setup.py ansible_syntax -    integration: python -c 'print("run test/integration/run-tests.sh")' | 
