diff options
146 files changed, 1520 insertions, 2009 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index a7076c210..9a5acc500 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.125.0 ./ +3.7.0-0.126.0 ./ diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md index 009a1d95c..e52e47202 100644 --- a/DEPLOYMENT_TYPES.md +++ b/DEPLOYMENT_TYPES.md @@ -12,6 +12,6 @@ The table below outlines the defaults per `openshift_deployment_type`: |-----------------------------------------------------------------|------------------------------------------|----------------------------------------| | **openshift.common.service_type** (also used for package names) | origin | atomic-openshift | | **openshift.common.config_base** | /etc/origin | /etc/origin | -| **openshift.common.data_dir** | /var/lib/origin | /var/lib/origin | +| **openshift_data_dir** | /var/lib/origin | /var/lib/origin | | **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | | **Image Streams** | centos | rhel | @@ -38,7 +38,7 @@ Follow this release pattern and you can't go wrong: | Origin/OCP | OpenShift-Ansible version | openshift-ansible branch | | ------------- | ----------------- |----------------------------------| | 1.3 / 3.3 | 3.3 | release-1.3 | -| 1.4 / 3.4 | 3.4 | releaes-1.4 | +| 1.4 / 3.4 | 3.4 | release-1.4 | | 1.5 / 3.5 | 3.5 | release-1.5 | | 3.*X* | 3.*X* | release-3.x | diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py index cad95ea6d..50c360e97 100644 --- a/filter_plugins/openshift_node.py +++ b/filter_plugins/openshift_node.py @@ -25,18 +25,7 @@ class FilterModule(object): # We always use what they've specified if they've specified a value if openshift_dns_ip is not None: return openshift_dns_ip - - if bool(hostvars['openshift']['common']['use_dnsmasq']): - return hostvars['ansible_default_ipv4']['address'] - elif bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']): - if 'openshift_master_cluster_vip' in hostvars: - return hostvars['openshift_master_cluster_vip'] - else: - if 'openshift_master_cluster_vip' in hostvars: - return hostvars['openshift_master_cluster_vip'] - elif 'openshift_node_first_master_ip' in hostvars: - return hostvars['openshift_node_first_master_ip'] - return None + return hostvars['ansible_default_ipv4']['address'] def filters(self): ''' returns a mapping of filters to methods ''' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index ad69bd587..dbe57bbd2 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -118,7 +118,7 @@ openshift_release=v3.6 # Force the registry to use for the docker/crio system container. By default the registry # will be built off of the deployment type and ansible_distribution. Only # use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_registry_override="registry.example.com" +#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" #openshift_crio_systemcontainer_image_registry_override="registry.example.com" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" @@ -613,6 +613,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting # docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS # environment variable located in /etc/sysconfig/docker-network. +# When upgrading these must be specificed! #osm_cluster_network_cidr=10.128.0.0/14 #openshift_portal_net=172.30.0.0/16 @@ -634,6 +635,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure number of bits to allocate to each host’s subnet e.g. 9 # would mean a /23 network on the host. +# When upgrading this must be specificed! #osm_host_subnet_length=9 # Configure master API and console ports. @@ -719,11 +721,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Setting this variable to true will override that check. #openshift_override_hostname_check=true -# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq -# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults -# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot -# be used with 1.0 and 3.0. +# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail +# in versions >= 3.6 #openshift_use_dnsmasq=False + # Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf # This is useful for POC environments where DNS may not actually be available yet or to set # options like 'strict-order' to alter dnsmasq configuration. diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index b52806bc7..0d60de6d2 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -118,7 +118,7 @@ openshift_release=v3.6 # Force the registry to use for the container-engine/crio system container. By default the registry # will be built off of the deployment type and ansible_distribution. Only # use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_registry_override="registry.example.com" +#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" #openshift_crio_systemcontainer_image_registry_override="registry.example.com" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" @@ -621,6 +621,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting # docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS # environment variable located in /etc/sysconfig/docker-network. +# When upgrading these must be specificed! #osm_cluster_network_cidr=10.128.0.0/14 #openshift_portal_net=172.30.0.0/16 @@ -642,6 +643,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure number of bits to allocate to each host’s subnet e.g. 9 # would mean a /23 network on the host. +# When upgrading this must be specificed! #osm_host_subnet_length=9 # Configure master API and console ports. @@ -727,10 +729,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Setting this variable to true will override that check. #openshift_override_hostname_check=true -# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq -# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults -# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot -# be used with 1.0 and 3.0. +# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail +# in versions >= 3.6 #openshift_use_dnsmasq=False # Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf # This is useful for POC environments where DNS may not actually be available yet or to set diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 095f43dd8..3be13145e 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.125.0%{?dist} +Release: 0.126.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -280,6 +280,47 @@ Atomic OpenShift Utilities includes %changelog +* Mon Sep 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.126.0 +- Fix rpm version logic for hosts (mgugino@redhat.com) +- Revert back to hostnamectl and previous default of not setting hostname + (sdodson@redhat.com) +- Correct include path to not follow symlink (rteague@redhat.com) +- Fix include path for docker upgrade tasks (rteague@redhat.com) +- Fix issue with etcd_common when using pre_upgrade tag (rteague@redhat.com) +- inventory: Denote new required upgrade variables (smilner@redhat.com) +- upgrade: Verify required network items are set (smilner@redhat.com) +- ami build process calls openshift-node/config.yml (kwoodson@redhat.com) + +* Fri Sep 08 2017 Scott Dodson <sdodson@redhat.com> 3.7.0-0.125.1 +- Consolidating AWS roles and variables underneath openshift_aws role. + (kwoodson@redhat.com) +- Fix README.md typo (mgugino@redhat.com) +- Fixing variables and allowing custom ami. (kwoodson@redhat.com) +- Remove openshift-common (mgugino@redhat.com) +- Fix openshift_master_config_dir (sdodson@redhat.com) +- remove experimental-cri flag from node config (sjenning@redhat.com) +- cri-o: Split RHEL and CentOS images (smilner@redhat.com) +- openshift_checks aos_version: also check installed under yum + (lmeyer@redhat.com) +- Create ansible role for deploying prometheus on openshift (zgalor@redhat.com) +- Fix: set openshift_master_config_dir to the correct value. + (mgugino@redhat.com) +- Bump ansible requirement to 2.3 (sdodson@redhat.com) +- Move master additional config out of base (rteague@redhat.com) +- Import dnf only if importing yum fails (jhadvig@redhat.com) +- output skopeo image check command (nakayamakenjiro@gmail.com) +- skip openshift_cfme_nfs_server if not using nfs (sdw35@cornell.edu) +- bug 1487573. Bump the allowed ES versions (jcantril@redhat.com) +- update env in etcd.conf.j2 to reflect the latest naming (jchaloup@redhat.com) +- logging set memory request to limit (jcantril@redhat.com) +- Use the proper pod subnet instead the services one (edu@redhat.com) +- elasticsearch: reintroduce readiness probe (jwozniak@redhat.com) +- cri-o: add support for additional registries (gscrivan@redhat.com) +- reverse order between router cert generation (mewt.fr@gmail.com) +- ensured to always use a certificate for the router (mewt.fr@gmail.com) +- Adding proxy env vars for dc/docker-registry (kwoodson@redhat.com) +- oc_atomic_container: support Skopeo output (gscrivan@redhat.com) + * Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0 - diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 58b3a7835..5072d10fa 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -35,11 +35,9 @@ - /etc/dnsmasq.d/origin-upstream-dns.conf - /etc/dnsmasq.d/openshift-ansible.conf - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - when: openshift_use_dnsmasq | default(true) | bool - service: name: NetworkManager state: restarted - when: openshift_use_dnsmasq | default(true) | bool - name: Stop services service: name={{ item }} state=stopped with_items: diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 0fb29ca06..2b3d4329e 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -32,91 +32,54 @@ Before any provisioning may occur, AWS account credentials must be present in th ### Let's Provision! The newly added playbooks are the following: -- build_ami.yml -- provision.yml -- provision_nodes.yml +- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories. +- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc. +- install.yml - Calls the openshift-ansible installer on the newly created instances +- provision_nodes.yml - Creates the infra and compute node scale groups +- accept.yml - This is a playbook to accept infra and compute nodes into the cluster +- provision_install.yml - This is a combination of all 3 of the above playbooks. (provision, install, and provision_nodes as well as accept.yml) -The current expected work flow should be to provide the `vars.yml` file with the -desired settings for cluster instances. These settings are AWS specific and should -be tailored to the consumer's AWS custom account settings. +The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings. ```yaml -clusterid: mycluster -region: us-east-1 - -provision: - clusterid: "{{ clusterid }}" - region: "{{ region }}" - - build: - base_image: ami-bdd5d6ab # base image for AMI to build from - # when creating an encrypted AMI please specify use_encryption - use_encryption: False - - # for s3 registry backend - openshift_registry_s3: True - - # if using custom certificates these are required for the ELB - iam_cert_ca: - name: test_openshift - cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' - key_path: '/path/to/wildcard.<clusterid>.example.com.key' - chain_path: '/path/to/cert.ca.crt' - - instance_users: - - key_name: myuser_key - username: myuser - pub_key: | - ssh-rsa aaa<place public ssh key here>aaaaa user@<clusterid> - - node_group_config: - tags: - clusterid: "{{ clusterid }}" - environment: stg - ssh_key_name: myuser_key # name of the ssh key from above - - # configure master settings here - master: - instance_type: m4.xlarge - ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - # Set the following number to be the same for masters. - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True -... - vpc: - # name: mycluster # If missing; will default to clusterid - cidr: 172.31.0.0/16 - subnets: - us-east-1: # These are us-east-1 region defaults. Ensure this matches your region - - cidr: 172.31.48.0/20 - az: "us-east-1c" - - cidr: 172.31.32.0/20 - az: "us-east-1e" - - cidr: 172.31.16.0/20 - az: "us-east-1a" - +--- +# when creating an AMI set this to True +# when installing a cluster set this to False +openshift_node_bootstrap: True + +# specify a clusterid +# openshift_aws_clusterid: default + +# specify a region +# openshift_aws_region: us-east-1 + +# must specify a base_ami when building an AMI +# openshift_aws_base_ami: # base image for AMI to build from +# specify when using a custom AMI +# openshift_aws_ami: + +# when creating an encrypted AMI please specify use_encryption +# openshift_aws_ami_encrypt: False + +# custom certificates are required for the ELB +# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt' +# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key' +# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt' + +# This is required for any ec2 instances +# openshift_aws_ssh_key_name: myuser_key + +# This will ensure these users are created +#openshift_aws_users: +#- key_name: myuser_key +# username: myuser +# pub_key: | +# ssh-rsa AAAA ``` -Repeat the following setup for the infra and compute node groups. This most likely - will not need editing but if the install requires further customization then these parameters - can be updated. - -#### Step 1 +If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`. -Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example: +In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example: ```ini [OSEv3:children] @@ -133,6 +96,13 @@ etcd ################################################################################ # Ensure these variables are set for bootstrap ################################################################################ +# openshift_deployment_type is required for installation +openshift_deployment_type=origin + +# required when building an AMI. This will +# be dependent on the version provided by the yum repository +openshift_pkg_version=-3.6.0 + openshift_master_bootstrap_enabled=True openshift_hosted_router_wait=False @@ -153,77 +123,94 @@ openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', There are more examples of cluster inventory settings [`here`](../../inventory/byo/). -In order to create the bootstrapable AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. - - -#### Step 2 +#### Step 1 -Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI. +Once the `inventory` and the `provisioning_vars.yml` file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI. ``` -$ ansible-playbook -i inventory.yml build_ami.yml +$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml ``` 1. This script will build a VPC. Default name will be clusterid if not specified. 2. Create an ssh key required for the instance. -3. Create an instance. -4. Run some setup roles to ensure packages and services are correctly configured. -5. Create the AMI. -6. If encryption is desired +3. Create a security group. +4. Create an instance using the key from step 2 or a specified key. +5. Run openshift-ansible setup roles to ensure packages and services are correctly configured. +6. Create the AMI. +7. If encryption is desired - A KMS key is created with the name of $clusterid - An encrypted AMI will be produced with $clusterid KMS key -7. Terminate the instance used to configure the AMI. +8. Terminate the instance used to configure the AMI. +More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption: +``` +# openshift_aws_ami_encrypt: True # defaults to false +``` -#### Step 3 +**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date. -Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs): +#### Step 2 + +Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI. + +1. In the default behavior, the AMI id will be found and used in the last created fashion. +2. The `openshift_aws_ami` option can be specified. This will allow the user to override the behavior of the role and use a custom AMI specified in the `openshift_aws_ami` variable. +We are now ready to provision and install the cluster. This can be accomplished by calling all of the following steps at once or one-by-one. The all in one can be called like this: ``` - # when creating an encrypted AMI please specify use_encryption - use_encryption: False # defaults to false +$ ansible-playbook -i inventory.yml provision_install.yml -e @provisioning_vars.yml ``` -**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date. +If this is the first time running through this process, please attempt the following steps one-by-one and ensure the setup works correctly. +#### Step 3 -#### Step 4 - -We are ready to create the master instances and install Openshift. +We are ready to create the master instances. ``` -$ ansible-playbook -i <inventory from step 1> provision.yml +$ ansible-playbook provision.yml -e @provisioning_vars.yml ``` This playbook runs through the following steps: -1. Ensures a VPC is created -2. Ensures a SSH key exists -3. Creates an s3 bucket for the registry named $clusterid -4. Create master security groups -5. Create a master launch config -6. Create the master auto scaling groups -7. If certificates are desired for ELB, they will be uploaded -8. Create internal and external master ELBs -9. Add newly created masters to the correct groups -10. Set a couple of important facts for the masters -11. Run the [`byo`](../../common/openshift-cluster/config.yml) +1. Ensures a VPC is created. +2. Ensures a SSH key exists. +3. Creates an s3 bucket for the registry named $clusterid-docker-registry +4. Create master security groups. +5. Create a master launch config. +6. Create the master auto scaling groups. +7. If certificates are desired for ELB, they will be uploaded. +8. Create internal and external master ELBs. +9. Add newly created masters to the correct groups. +10. Set a couple of important facts for the masters. + +At this point we have successfully created the infrastructure including the master nodes. -At this point we have created a successful cluster with only the master nodes. +#### Step 4 + +Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook: + +``` +$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml +``` +This playbook accomplishes the following: +1. Builds a dynamic inventory file by querying AWS. +2. Runs the [`byo`](../../common/openshift-cluster/config.yml) +Once this playbook completes, the cluster masters should be installed and configured. #### Step 5 -Now that we have a cluster deployed it might be more interesting to create some node types. This can be done easily with the following playbook: +Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook: ``` -$ ansible-playbook provision_nodes.yml +$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml ``` Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator. #### Step 6 -The registration of our nodes can be automated by running the following script `accept.yml`. This script can handle the registration in a few different ways. +To facilitate the node registration process, nodes may be registered by running the following script `accept.yml`. This script can register in a few different ways. - approve_all - **Note**: this option is for development and test environments. Security is bypassed - nodes - A list of node names that will be accepted into the cluster @@ -233,10 +220,11 @@ The registration of our nodes can be automated by running the following script ` nodes: < list of nodes here > timeout: 0 ``` + Once the desired accept method is chosen, run the following playbook `accept.yml`: 1. Run the following playbook. ``` -$ ansible-playbook accept.yml +$ ansible-playbook accept.yml -e @provisioning_vars.yml ``` Login to a master and run the following command: @@ -263,6 +251,6 @@ ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster. -### Still to compute +### Still to come There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities. diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index d43c84205..ffc367f9f 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -1,12 +1,17 @@ +#!/usr/bin/ansible-playbook --- - name: Setup the vpc and the master node group - #hosts: oo_first_master hosts: localhost remote_user: root gather_facts: no tasks: - - name: get provisioning vars - include_vars: vars.yml + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - name: bring lib_openshift into scope include_role: @@ -14,9 +19,9 @@ - name: fetch masters ec2_remote_facts: - region: "{{ provision.region }}" + region: "{{ openshift_aws_region | default('us-east-1') }}" filters: - "tag:clusterid": "{{ provision.clusterid }}" + "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" "tag:host-type": master instance-state-name: running register: mastersout @@ -26,9 +31,9 @@ - name: fetch new node instances ec2_remote_facts: - region: "{{ provision.region }}" + region: "{{ openshift_aws_region | default('us-east-1') }}" filters: - "tag:clusterid": "{{ provision.clusterid }}" + "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" "tag:host-type": node instance-state-name: running register: instancesout diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index d27874200..fc11205d8 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -3,67 +3,49 @@ connection: local gather_facts: no tasks: - - name: get the necessary vars for ami building - include_vars: vars.yml - - - name: create a vpc with the name <clusterid> + - name: Require openshift_aws_base_ami + fail: + msg: "A base AMI is required for AMI building. Please ensure `openshift_aws_base_ami` is defined." + when: openshift_aws_base_ami is undefined + + - name: "Alert user to variables needed and their values - {{ item.name }}" + debug: + msg: "{{ item.msg }}" + with_items: + - name: openshift_aws_clusterid + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + - name: openshift_aws_region + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: create an instance and prepare for ami include_role: - name: openshift_aws_vpc + name: openshift_aws + tasks_from: build_ami.yml vars: - r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}" - r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}" - r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}" - r_openshift_aws_vpc_region: "{{ provision.region }}" - r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}" - r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}" + openshift_aws_node_group_type: compute - - name: create aws ssh keypair - include_role: - name: openshift_aws_ssh_keys - vars: - r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}" - r_openshift_aws_ssh_keys_region: "{{ provision.region }}" - - - name: fetch the default subnet id - ec2_vpc_subnet_facts: - region: "{{ provision.region }}" + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" filters: - "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}" - register: subnetout - - - name: create instance for ami creation - ec2: - assign_public_ip: yes - region: "{{ provision.region }}" - key_name: "{{ provision.node_group_config.ssh_key_name }}" - group: "{{ provision.clusterid }}" - instance_type: m4.xlarge - vpc_subnet_id: "{{ subnetout.subnets[0].id }}" - image: "{{ provision.build.base_image }}" - volumes: - - device_name: /dev/sdb - volume_type: gp2 - volume_size: 100 - delete_on_termination: true - wait: yes - exact_count: 1 - count_tag: - Name: ami_base - instance_tags: - Name: ami_base - register: amibase + "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 - name: wait for ssh to become available wait_for: port: 22 - host: "{{ amibase.tagged_instances.0.public_ip }}" + host: "{{ instancesout.instances[0].public_ip_address }}" timeout: 300 search_regex: OpenSSH - name: add host to nodes add_host: groups: nodes - name: "{{ amibase.tagged_instances.0.public_dns_name }}" + name: "{{ instancesout.instances[0].public_dns_name }}" - name: set the user to perform installation set_fact: @@ -81,70 +63,16 @@ - name: run the std_include include: ../../common/openshift-cluster/initialize_openshift_repos.yml -- hosts: nodes - remote_user: root - tasks: - - name: get the necessary vars for ami building - include_vars: vars.yml - - - set_fact: - openshift_node_bootstrap: True - - - name: run openshift image preparation - include_role: - name: openshift_node +- name: install node config + include: ../../common/openshift-node/config.yml - hosts: localhost connection: local become: no tasks: - - name: bundle ami - ec2_ami: - instance_id: "{{ amibase.tagged_instances.0.id }}" - region: "{{ provision.region }}" - state: present - description: "This was provisioned {{ ansible_date_time.iso8601 }}" - name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" - tags: "{{ provision.build.openshift_ami_tags }}" - wait: yes - register: amioutput - - - debug: var=amioutput - - - when: provision.build.use_encryption | default(False) - block: - - name: setup kms key for encryption - include_role: - name: openshift_aws_iam_kms - vars: - r_openshift_aws_iam_kms_region: "{{ provision.region }}" - r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms" - - - name: augment the encrypted ami tags with source-ami - set_fact: - source_tag: - source-ami: "{{ amioutput.image_id }}" - - - name: copy the ami for encrypted disks - include_role: - name: openshift_aws_ami_copy - vars: - r_openshift_aws_ami_copy_region: "{{ provision.region }}" - r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted" - r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" - r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms" - r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}" - r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}" - # this option currently fails due to boto waiters - # when supported this need to be reapplied - #r_openshift_aws_ami_copy_wait: True - - - name: Display newly created encrypted ami id - debug: - msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}" - - - name: terminate temporary instance - ec2: - state: absent - region: "{{ provision.region }}" - instance_ids: "{{ amibase.tagged_instances.0.id }}" + - name: seal the ami + include_role: + name: openshift_aws + tasks_from: seal_ami.yml + vars: + openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml deleted file mode 100644 index 3ef492238..000000000 --- a/playbooks/aws/openshift-cluster/build_node_group.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: fetch recently created AMI - ec2_ami_find: - region: "{{ provision.region }}" - sort: creationDate - sort_order: descending - name: "{{ provision.build.ami_name }}*" - ami_tags: "{{ provision.build.openshift_ami_tags }}" - #no_result_action: fail - register: amiout - -- block: - - name: "Create {{ openshift_build_node_type }} sgs" - include_role: - name: openshift_aws_sg - vars: - r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}" - r_openshift_aws_sg_region: "{{ provision.region }}" - r_openshift_aws_sg_type: "{{ openshift_build_node_type }}" - - - name: "generate a launch config name for {{ openshift_build_node_type }}" - set_fact: - launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}" - - - name: create "{{ openshift_build_node_type }} launch config" - include_role: - name: openshift_aws_launch_config - vars: - r_openshift_aws_launch_config_name: "{{ launch_config_name }}" - r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}" - r_openshift_aws_launch_config_region: "{{ provision.region }}" - r_openshift_aws_launch_config: "{{ provision.node_group_config }}" - r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}" - r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}" - r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}" - - - name: "create {{ openshift_build_node_type }} node groups" - include_role: - name: openshift_aws_node_group - vars: - r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}" - r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" - r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}" - r_openshift_aws_node_group_region: "{{ provision.region }}" - r_openshift_aws_node_group_config: "{{ provision.node_group_config }}" - r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}" - r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}" diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml new file mode 100644 index 000000000..86d58a68e --- /dev/null +++ b/playbooks/aws/openshift-cluster/install.yml @@ -0,0 +1,74 @@ +--- +- name: Setup the vpc and the master node group + hosts: localhost + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: add new master to masters group + add_host: + groups: "masters,etcd,nodes" + name: "{{ item.public_ip_address }}" + hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}" + with_items: "{{ instancesout.instances }}" + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" + +- name: set the master facts for hostname to elb + hosts: masters + gather_facts: no + remote_user: root + tasks: + - name: fetch elbs + ec2_elb_facts: + region: "{{ openshift_aws_region | default('us-east-1') }}" + names: + - "{{ item }}" + with_items: + - "{{ openshift_aws_clusterid | default('default') }}-master-external" + - "{{ openshift_aws_clusterid | default('default') }}-master-internal" + delegate_to: localhost + register: elbs + + - debug: var=elbs + + - name: set fact + set_fact: + openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + osm_custom_cors_origins: + - "{{ elbs.results[1].elbs[0].dns_name }}" + - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + with_items: "{{ groups['masters'] }}" + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/std_include.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index dfbf61cc7..db7afac6f 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -2,156 +2,16 @@ - name: Setup the vpc and the master node group hosts: localhost tasks: - - name: get provisioning vars - include_vars: vars.yml - - name: create default vpc - include_role: - name: openshift_aws_vpc - vars: - r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}" - r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}" - r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}" - r_openshift_aws_vpc_region: "{{ provision.region }}" - r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}" - r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}" - - - name: create aws ssh keypair - include_role: - name: openshift_aws_ssh_keys - vars: - r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}" - r_openshift_aws_ssh_keys_region: "{{ provision.region }}" - - - when: provision.openshift_registry_s3 | default(false) - name: create s3 bucket for registry - include_role: - name: openshift_aws_s3 - vars: - r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry" - r_openshift_aws_s3_region: "{{ provision.region }}" - r_openshift_aws_s3_mode: create + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" - - name: include scale group creation for master - include: build_node_group.yml - vars: - openshift_build_node_type: master + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - name: fetch new master instances - ec2_remote_facts: - region: "{{ provision.region }}" - filters: - "tag:clusterid": "{{ provision.clusterid }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 - - - name: bring iam_cert23 into scope - include_role: - name: lib_utils - - - name: upload certificates to AWS IAM - iam_cert23: - state: present - name: "{{ provision.clusterid }}-master-external" - cert: "{{ provision.iam_cert_ca.cert_path }}" - key: "{{ provision.iam_cert_ca.key_path }}" - cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}" - register: elb_cert_chain - failed_when: - - "'failed' in elb_cert_chain" - - elb_cert_chain.failed - - "'msg' in elb_cert_chain" - - "'already exists' not in elb_cert_chain.msg" - when: provision.iam_cert_ca is defined - - - debug: var=elb_cert_chain - - - name: create our master external and internal load balancers + - name: create default vpc include_role: - name: openshift_aws_elb - vars: - r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}" - r_openshift_aws_elb_region: "{{ provision.region }}" - r_openshift_aws_elb_instance_filter: - "tag:clusterid": "{{ provision.clusterid }}" - "tag:host-type": master - instance-state-name: running - r_openshift_aws_elb_type: master - r_openshift_aws_elb_direction: "{{ elb_item }}" - r_openshift_aws_elb_idle_timout: 400 - r_openshift_aws_elb_scheme: internet-facing - r_openshift_aws_elb_security_groups: - - "{{ provision.clusterid }}" - - "{{ provision.clusterid }}_master" - r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}" - r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}" - r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" - with_items: - - internal - - external - loop_control: - loop_var: elb_item - - - name: add new master to masters group - add_host: - groups: "masters,etcd,nodes" - name: "{{ item.public_ip_address }}" - hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}" - with_items: "{{ instancesout.instances }}" - - - name: set facts for group normalization - set_fact: - cluster_id: "{{ provision.clusterid }}" - cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}" - - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ item.public_ip_address }}" - timeout: 300 - search_regex: OpenSSH - with_items: "{{ instancesout.instances }}" - - -- name: set the master facts for hostname to elb - hosts: masters - gather_facts: no - remote_user: root - tasks: - - name: include vars - include_vars: vars.yml - - - name: fetch elbs - ec2_elb_facts: - region: "{{ provision.region }}" - names: - - "{{ item }}" - with_items: - - "{{ provision.clusterid }}-master-external" - - "{{ provision.clusterid }}-master-internal" - delegate_to: localhost - register: elbs - - - debug: var=elbs - - - name: set fact - set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" - osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" - - "console.{{ provision.clusterid }}.openshift.com" - - "api.{{ provision.clusterid }}.openshift.com" - with_items: "{{ groups['masters'] }}" - -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml - -- name: run the std_include - include: ../../common/openshift-cluster/std_include.yml - -- name: run the config - include: ../../common/openshift-cluster/config.yml + name: openshift_aws + tasks_from: provision.yml diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml new file mode 100644 index 000000000..e787deced --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -0,0 +1,16 @@ +--- +# Once an AMI is built then this script is used for +# the one stop shop to provision and install a cluster +# this playbook is run with the following parameters: +# ansible-playbook -i openshift-ansible-inventory provision_install.yml +- name: Include the provision.yml playbook to create cluster + include: provision.yml + +- name: Include the install.yml playbook to install cluster + include: install.yml + +- name: Include the install.yml playbook to install cluster + include: provision_nodes.yml + +- name: Include the accept.yml playbook to accept nodes into the cluster + include: accept.yml diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml index 5428fb307..44c686e08 100644 --- a/playbooks/aws/openshift-cluster/provision_nodes.yml +++ b/playbooks/aws/openshift-cluster/provision_nodes.yml @@ -1,47 +1,18 @@ --- -# Get bootstrap config token -# bootstrap should be created on first master -# need to fetch it and shove it into cloud data - name: create the node scale groups hosts: localhost connection: local gather_facts: yes tasks: - - name: get provisioning vars - include_vars: vars.yml + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" - - name: fetch master instances - ec2_remote_facts: - region: "{{ provision.region }}" - filters: - "tag:clusterid": "{{ provision.clusterid }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - name: slurp down the bootstrap.kubeconfig - slurp: - src: /etc/origin/master/bootstrap.kubeconfig - delegate_to: "{{ instancesout.instances[0].public_ip_address }}" - remote_user: root - register: bootstrap - - - name: set_fact on localhost for kubeconfig - set_fact: - local_bootstrap: "{{ bootstrap }}" - launch_config_name: - infra: "infra-{{ ansible_date_time.epoch }}" - compute: "compute-{{ ansible_date_time.epoch }}" - - - name: include build node group - include: build_node_group.yml - vars: - openshift_build_node_type: infra - - - name: include build node group - include: build_node_group.yml - vars: - openshift_build_node_type: compute + - name: create the node groups + include_role: + name: openshift_aws + tasks_from: provision_nodes.yml diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml new file mode 100644 index 000000000..28eb9c993 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml @@ -0,0 +1,28 @@ +--- +# when creating an AMI set this option to True +# when installing the cluster, set this to False +openshift_node_bootstrap: True + +# specify a clusterid +#openshift_aws_clusterid: default + +# must specify a base_ami when building an AMI +#openshift_aws_base_ami: + +# when creating an encrypted AMI please specify use_encryption +#openshift_aws_ami_encrypt: False + +# custom certificates are required for the ELB +#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' +#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key' +#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' + +# This is required for any ec2 instances +#openshift_aws_ssh_key_name: myuser_key + +# This will ensure these users are created +#openshift_aws_users: +#- key_name: myuser_key +# username: myuser +# pub_key: | +# ssh-rsa AAAA diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml deleted file mode 100644 index 47da03cb7..000000000 --- a/playbooks/aws/openshift-cluster/vars.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- - -clusterid: mycluster -region: us-east-1 - -provision: - clusterid: "{{ clusterid }}" - region: "{{ region }}" - - build: # build specific variables here - ami_name: "openshift-gi-" - base_image: ami-bdd5d6ab # base image for AMI to build from - - # when creating an encrypted AMI please specify use_encryption - use_encryption: False - - openshift_ami_tags: - bootstrap: "true" - openshift-created: "true" - clusterid: "{{ clusterid }}" - - # Use s3 backed registry storage - openshift_registry_s3: True - - # if using custom certificates these are required for the ELB - iam_cert_ca: - name: "{{ clusterid }}_openshift" - cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' - key_path: '/path/to/wildcard.<clusterid>.example.com.key' - chain_path: '/path/to/cert.ca.crt' - - instance_users: - - key_name: myuser_key - username: myuser - pub_key: | - ssh-rsa AAAA== myuser@system - - node_group_config: - tags: - clusterid: "{{ clusterid }}" - environment: stg - - ssh_key_name: myuser_key - - # master specific cluster node settings - master: - instance_type: m4.xlarge - ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True - - # compute specific cluster node settings - compute: - instance_type: m4.xlarge - ami: ami-cdeec8b6 - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: True - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 100 - desired_size: 3 - tags: - host-type: node - sub-host-type: compute - - # infra specific cluster node settings - infra: - instance_type: m4.xlarge - ami: ami-cdeec8b6 - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: True - health_check: - period: 60 - type: EC2 - min_size: 2 - max_size: 20 - desired_size: 2 - tags: - host-type: node - sub-host-type: infra - - # vpc settings - vpc: - cidr: 172.31.0.0/16 - subnets: - us-east-1: # These are us-east-1 region defaults. Ensure this matches your region - - cidr: 172.31.48.0/20 - az: "us-east-1c" - - cidr: 172.31.32.0/20 - az: "us-east-1e" - - cidr: 172.31.16.0/20 - az: "us-east-1a" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index acf5469bf..60fa44c5b 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -9,6 +9,4 @@ - include: ../../common/openshift-cluster/config.yml vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index bbec3a4c2..a523bb47f 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -13,6 +13,3 @@ - always - include: ../../common/openshift-cluster/openshift_logging.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml index 6f95b4e2d..40a7606e7 100644 --- a/playbooks/byo/openshift-cluster/service-catalog.yml +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -13,6 +13,3 @@ - always - include: ../../common/openshift-cluster/service_catalog.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml new file mode 100644 index 000000000..b3d7b5731 --- /dev/null +++ b/playbooks/byo/openshift-master/additional_config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/additional_config.yml diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 755852017..a09edd55a 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -18,6 +18,3 @@ - include: ../../common/openshift-cluster/std_include.yml - include: ../../common/openshift-master/scaleup.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 5f420a76c..bbd5a0185 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -22,6 +22,15 @@ tags: - always +- name: Set hostname + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + # TODO: switch back to hostname module once we depend on ansible-2.4 + # https://github.com/ansible/ansible/pull/25906 + - name: Set hostname + command: "hostnamectl set-hostname {{ openshift.common.hostname }}" + when: openshift_set_hostname | default(false,true) | bool + - include: ../openshift-etcd/config.yml - include: ../openshift-nfs/config.yml @@ -34,6 +43,8 @@ - include: ../openshift-master/config.yml +- include: ../openshift-master/additional_config.yml + - include: ../openshift-node/config.yml tags: - node diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index 50351588f..be14b06f0 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -27,9 +27,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: master local_facts: dns_port: '8053' @@ -50,9 +47,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: node local_facts: dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index c9f37109b..16a733899 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -48,6 +48,7 @@ when: - g_etcd_hosts | default([]) | length == 0 - not openshift_master_unsupported_all_in_one | default(False) + - not openshift_node_bootstrap | default(False) - name: Evaluate oo_all_hosts add_host: diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 9eaf3bc34..0723575c2 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -126,11 +126,9 @@ openshift_facts: role: common local_facts: - debug_level: "{{ openshift_debug_level | default(2) }}" deployment_type: "{{ openshift_deployment_type }}" deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" cli_image: "{{ osm_image | default(None) }}" - cluster_id: "{{ openshift_cluster_id | default('default') }}" hostname: "{{ openshift_hostname | default(None) }}" ip: "{{ openshift_ip | default(None) }}" is_containerized: "{{ l_is_containerized | default(None) }}" @@ -148,8 +146,6 @@ no_proxy: "{{ openshift_no_proxy | default(None) }}" generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" - sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" - use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" - name: initialize_facts set_fact repoquery command set_fact: diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 7112a6084..7af6b25bc 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -12,5 +12,10 @@ hosts: oo_all_hosts:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" + pre_tasks: + - set_fact: + openshift_pkg_version: -{{ openshift_version }} + when: openshift_pkg_version is not defined + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" roles: - openshift_version diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index a391b963a..75339f6df 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -29,7 +29,6 @@ - role: openshift_default_storage_class when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted - r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}" - role: openshift_metrics when: openshift_hosted_metrics_deploy | default(false) | bool - role: openshift_logging diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml new file mode 100644 index 000000000..26716a92d --- /dev/null +++ b/playbooks/common/openshift-cluster/sanity_checks.yml @@ -0,0 +1,51 @@ +--- +- name: Verify Requirements + hosts: oo_all_hosts + tasks: + - fail: + msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with nuage + when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool + + - fail: + msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. + when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: openshift_hostname must be 63 characters or less + when: openshift_hostname is defined and openshift_hostname | length > 63 + + - fail: + msg: openshift_public_hostname must be 63 characters or less + when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 6cc56889a..cef0072f3 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -7,6 +7,10 @@ tags: - always +- include: sanity_checks.yml + tags: + - always + - include: validate_hostnames.yml tags: - node diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml index 1a6580795..eb118365a 100644 --- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate etcd instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: etcd_names_output with_sequence: count={{ num_etcd }} diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml index 36d7b7870..783f70f50 100644 --- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate master instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: master_names_output with_sequence: count={{ num_masters }} diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml index 278942f8b..c103e40a9 100644 --- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml @@ -5,7 +5,7 @@ - name: Generate node instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" register: node_names_output with_sequence: count={{ number_nodes }} diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 7cc13137f..98953f72e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -4,7 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_nodes_to_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 0f421928b..c98065cf4 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -4,7 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_oo_option_facts.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml index 9d8b73cff..6d8503879 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml @@ -1,8 +1,10 @@ --- # Only check if docker upgrade is required if docker_upgrade is not # already set to False. -- include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool +- include: ../../docker/upgrade_check.yml + when: + - docker_upgrade is not defined or (docker_upgrade | bool) + - not (openshift.common.is_atomic | bool) # Additional checks for Atomic hosts: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml index 9a959a959..78923e04f 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml @@ -9,6 +9,21 @@ deployment types when: deployment_type not in ['origin','openshift-enterprise', 'online'] + # osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are + # required when upgrading to avoid changes that may occur between releases + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023 + - assert: + that: + - "osm_cluster_network_cidr is defined" + - "osm_host_subnet_length is defined" + - "openshift_portal_net is defined" + msg: > + osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory + variables when upgrading. These variables should match what is currently used in the cluster. If + you don't remember what these values are you can find them in /etc/origin/master/master-config.yaml + on a master with the names clusterNetworkCIDR (osm_cluster_network_cidr), + osm_host_subnet_length (hostSubnetLength), and openshift_portal_net (hostSubnetLength). + # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and # openshift_pkg_version variables. These must be removed or updated to proceed diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index c0ea93d2c..7468c78f0 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -11,13 +11,13 @@ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" - when: openshift.common.install_examples | bool + when: openshift_install_examples | default(True) - role: openshift_hosted_templates registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq - when: openshift.common.use_manageiq | bool + when: openshift_use_manageiq | default(false) | bool - role: cockpit when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' ) - role: flannel_register - when: openshift.common.use_flannel | bool + when: openshift_use_flannel | default(false) | bool diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index b29b9ef4f..c77d7bb87 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -208,18 +208,15 @@ openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" - role: nuage_master - when: openshift.common.use_nuage | bool + when: openshift_use_nuage | default(false) | bool - role: calico_master - when: openshift.common.use_calico | bool + when: openshift_use_calico | default(false) | bool post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False -- include: additional_config.yml - when: not g_openshift_master_is_scaleup - - name: Re-enable excluder if it was previously enabled hosts: oo_masters_to_config gather_facts: no diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 6fec346c3..4d73b8124 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -7,7 +7,7 @@ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 handlers: - - include: roles/openshift_master/handlers/main.yml + - include: ../../../roles/openshift_master/handlers/main.yml static: yes roles: - openshift_facts diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index c13417714..0801c41ff 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -71,17 +71,18 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" - when: openshift.common.use_flannel | bool + when: openshift_use_flannel | default(false) | bool - role: calico - when: openshift.common.use_calico | bool + when: openshift_use_calico | default(false) | bool - role: nuage_node - when: openshift.common.use_nuage | bool + when: openshift_use_nuage | default(false) | bool - role: contiv contiv_role: netplugin - when: openshift.common.use_contiv | bool + when: openshift_use_contiv | default(false) | bool - role: nickhammond.logrotate - role: openshift_manage_node openshift_master_host: "{{ groups.oo_first_master.0 }}" + when: not openshift_node_bootstrap | default(False) tasks: - name: Create group for deployment type group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 451b8498f..24ca0d9f8 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -97,10 +97,16 @@ l_crio_image_prepend: "docker.io/gscrivano" l_crio_image_name: "crio-o-fedora" - - name: Use Centos based image when distribution is Red Hat or CentOS + - name: Use Centos based image when distribution is CentOS set_fact: l_crio_image_name: "cri-o-centos" - when: ansible_distribution in ['RedHat', 'CentOS'] + when: ansible_distribution == "CentOS" + + - name: Use RHEL based image when distribution is Red Hat + set_fact: + l_crio_image_prepend: "registry.access.redhat.com" + l_crio_image_name: "cri-o" + when: ansible_distribution == "RedHat" # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - name: Use a testing registry if requested diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 57a84bc2c..146e5f430 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -100,18 +100,22 @@ l_docker_image_prepend: "registry.fedoraproject.org/f25" when: ansible_distribution == 'Fedora' - # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 - - name: Use a testing registry if requested - set_fact: - l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" - when: - - openshift_docker_systemcontainer_image_registry_override is defined - - openshift_docker_systemcontainer_image_registry_override != "" - - name: Set the full image name set_fact: l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 + - name: Use a specific image if requested + set_fact: + l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" + when: + - openshift_docker_systemcontainer_image_override is defined + - openshift_docker_systemcontainer_image_override != "" + + # Be nice and let the user see the variable result + - debug: + var: l_docker_image + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image command: "atomic pull --storage ostree {{ l_docker_image }}" diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml index f5bcd03ee..6ed87e6c7 100644 --- a/roles/etcd_common/tasks/main.yml +++ b/roles/etcd_common/tasks/main.yml @@ -6,4 +6,4 @@ - name: Include main action task file include: "{{ r_etcd_common_action }}.yml" - when: '"noop" not in r_etcd_common_action' + when: r_etcd_common_action != "noop" diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml new file mode 100644 index 000000000..a88d78235 --- /dev/null +++ b/roles/etcd_common/tasks/noop.yml @@ -0,0 +1,4 @@ +--- +# This is file is here because the usage of tags, specifically `pre_upgrade` +# breaks the functionality of this role. +# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025 diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md new file mode 100644 index 000000000..696efbea5 --- /dev/null +++ b/roles/openshift_aws/README.md @@ -0,0 +1,84 @@ +openshift_aws +================================== + +Provision AWS infrastructure helpers. + +Requirements +------------ + +* Ansible 2.3 +* Boto + +Role Variables +-------------- + +From this role: + +| Name | Default value +|---------------------------------------------------|----------------------- +| openshift_aws_clusterid | default +| openshift_aws_elb_scheme | internet-facing +| openshift_aws_launch_config_bootstrap_token | '' +| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}} +| openshift_aws_ami_copy_wait | False +| openshift_aws_users | [] +| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} +| openshift_aws_create_vpc | False +| openshift_aws_node_group_type | master +| openshift_aws_elb_cert_arn | '' +| openshift_aws_kubernetes_cluster_status | owned +| openshift_aws_s3_mode | create +| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'} +| openshift_aws_create_ssh_keys | False +| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms +| openshift_aws_use_custom_ami | False +| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }} +| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }} +| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2} +| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}} +| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}'] +| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'} +| openshift_aws_create_security_groups | False +| openshift_aws_create_iam_cert | False +| openshift_aws_create_scale_group | True +| openshift_aws_ami_encrypt | False +| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] +| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'} +| openshift_aws_region | us-east-1 +| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} +| openshift_aws_elb_idle_timout | 400 +| openshift_aws_subnet_name | us-east-1c +| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }} +| openshift_aws_create_launch_config | True +| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'} +| openshift_aws_ami_name | openshift-gi +| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] +| openshift_aws_vpc_name | {{ openshift_aws_clusterid }} +| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}} +| + + +Dependencies +------------ + + +Example Playbook +---------------- + +```yaml +- include_role: + name: openshift_aws + tasks_from: vpc.yml + vars: + openshift_aws_clusterid: test + openshift_aws_region: us-east-1 + openshift_aws_create_vpc: true +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml new file mode 100644 index 000000000..4e7f54f79 --- /dev/null +++ b/roles/openshift_aws/defaults/main.yml @@ -0,0 +1,209 @@ +--- +openshift_aws_create_vpc: True +openshift_aws_create_s3: True +openshift_aws_create_iam_cert: True +openshift_aws_create_security_groups: True +openshift_aws_create_launch_config: True +openshift_aws_create_scale_group: True +openshift_aws_kubernetes_cluster_status: owned # or shared +openshift_aws_node_group_type: master + +openshift_aws_wait_for_ssh: True + +openshift_aws_clusterid: default +openshift_aws_region: us-east-1 +openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" + +openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" +openshift_aws_iam_cert_path: '' +openshift_aws_iam_cert_chain_path: '' +openshift_aws_iam_cert_key_path: '' +openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}" + +openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" +openshift_aws_ami: '' +openshift_aws_ami_copy_wait: False +openshift_aws_ami_encrypt: False +openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}" +openshift_aws_ami_name: openshift-gi +openshift_aws_base_ami_name: ami_base + +openshift_aws_launch_config_bootstrap_token: '' +openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}" + +openshift_aws_users: [] + +openshift_aws_ami_tags: + bootstrap: "true" + openshift-created: "true" + clusterid: "{{ openshift_aws_clusterid }}" + +openshift_aws_s3_mode: create +openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" + +openshift_aws_elb_health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + +openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_idle_timout: 400 +openshift_aws_elb_scheme: internet-facing +openshift_aws_elb_cert_arn: '' + +openshift_aws_elb_listeners: + master: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: 443 + - protocol: ssl + load_balancer_port: 443 + instance_protocol: ssl + instance_port: 443 + # ssl certificate required for https or ssl + ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + internal: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + +openshift_aws_node_group_config_master_volumes: +- device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + +openshift_aws_node_group_config_node_volumes: +- device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: True + +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" + +openshift_aws_node_group_config: + tags: "{{ openshift_aws_node_group_config_tags }}" + master: + instance_type: m4.xlarge + ami: "{{ openshift_aws_ami }}" + volumes: "{{ openshift_aws_node_group_config_master_volumes }}" + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + wait_for_instances: True + compute: + instance_type: m4.xlarge + ami: "{{ openshift_aws_ami }}" + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 100 + desired_size: 3 + tags: + host-type: node + sub-host-type: compute + infra: + instance_type: m4.xlarge + ami: "{{ openshift_aws_ami }}" + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" + health_check: + period: 60 + type: EC2 + min_size: 2 + max_size: 20 + desired_size: 2 + tags: + host-type: node + sub-host-type: infra + +openshift_aws_elb_security_groups: +- "{{ openshift_aws_clusterid }}" +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" + +openshift_aws_elb_instance_filter: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": "{{ openshift_aws_node_group_type }}" + instance-state-name: running + +openshift_aws_node_security_groups: + default: + name: "{{ openshift_aws_clusterid }}" + desc: "{{ openshift_aws_clusterid }} default" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: all + from_port: all + to_port: all + group_name: "{{ openshift_aws_clusterid }}" + master: + name: "{{ openshift_aws_clusterid }}_master" + desc: "{{ openshift_aws_clusterid }} master instances" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + compute: + name: "{{ openshift_aws_clusterid }}_compute" + desc: "{{ openshift_aws_clusterid }} compute node instances" + infra: + name: "{{ openshift_aws_clusterid }}_infra" + desc: "{{ openshift_aws_clusterid }} infra node instances" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 30000 + to_port: 32000 + cidr_ip: 0.0.0.0/0 + etcd: + name: "{{ openshift_aws_clusterid }}_etcd" + desc: "{{ openshift_aws_clusterid }} etcd instances" + +openshift_aws_vpc_tags: + Name: "{{ openshift_aws_vpc_name }}" + +openshift_aws_subnet_name: us-east-1c + +openshift_aws_vpc: + name: "{{ openshift_aws_vpc_name }}" + cidr: 172.31.0.0/16 + subnets: + us-east-1: + - cidr: 172.31.48.0/20 + az: "us-east-1c" + - cidr: 172.31.32.0/20 + az: "us-east-1e" + - cidr: 172.31.16.0/20 + az: "us-east-1a" diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py new file mode 100644 index 000000000..06e1f9602 --- /dev/null +++ b/roles/openshift_aws/filter_plugins/filters.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_aws +''' + + +class FilterModule(object): + ''' Custom ansible filters for use by openshift_aws role''' + + @staticmethod + def build_instance_tags(clusterid, status='owned'): + ''' This function will return a dictionary of the instance tags. + + The main desire to have this inside of a filter_plugin is that we + need to build the following key. + + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + + ''' + tags = {'clusterid': clusterid, + 'kubernetes.io/cluster/{}'.format(clusterid): status} + + return tags + + def filters(self): + ''' returns a mapping of filters to methods ''' + return {'build_instance_tags': self.build_instance_tags} diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml new file mode 100644 index 000000000..875efcb8f --- /dev/null +++ b/roles/openshift_aws/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_utils diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml new file mode 100644 index 000000000..07020dd62 --- /dev/null +++ b/roles/openshift_aws/tasks/ami_copy.yml @@ -0,0 +1,34 @@ +--- +- fail: + msg: "{{ item }} needs to be defined" + when: item is not defined + with_items: + - openshift_aws_ami_copy_src_ami + - openshift_aws_ami_copy_name + +- name: Create IAM KMS key with alias + oo_iam_kms: + state: present + alias: "{{ openshift_aws_iam_kms_alias }}" + region: "{{ openshift_aws_region }}" + register: created_kms + +- debug: var=created_kms.results + +- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}" + ec2_ami_copy: + name: "{{ openshift_aws_ami_copy_name }}" + region: "{{ openshift_aws_region }}" + source_region: "{{ openshift_aws_ami_copy_src_region }}" + source_image_id: "{{ openshift_aws_ami_copy_src_ami }}" + encrypted: "{{ openshift_aws_ami_encrypt | bool }}" + kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}" + wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}" + tags: "{{ openshift_aws_ami_tags }}" + register: copy_result + +- debug: var=copy_result + +- name: return AMI ID with setfact + set_fact: + openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml new file mode 100644 index 000000000..8d4e5ac43 --- /dev/null +++ b/roles/openshift_aws/tasks/build_ami.yml @@ -0,0 +1,48 @@ +--- +- when: openshift_aws_create_vpc | bool + name: create a vpc + include: vpc.yml + +- when: openshift_aws_users | length > 0 + name: create aws ssh keypair + include: ssh_keys.yml + +- when: openshift_aws_create_security_groups | bool + name: Create compute security_groups + include: security_group.yml + +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: create instance for ami creation + ec2: + assign_public_ip: yes + region: "{{ openshift_aws_region }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + group: "{{ openshift_aws_clusterid }}" + instance_type: m4.xlarge + vpc_subnet_id: "{{ subnetout.subnets[0].id }}" + image: "{{ openshift_aws_base_ami }}" + volumes: + - device_name: /dev/sdb + volume_type: gp2 + volume_size: 100 + delete_on_termination: true + wait: yes + exact_count: 1 + count_tag: + Name: "{{ openshift_aws_base_ami_name }}" + instance_tags: + Name: "{{ openshift_aws_base_ami_name }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml new file mode 100644 index 000000000..0dac1c23d --- /dev/null +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -0,0 +1,34 @@ +--- +# When openshift_aws_use_custom_ami is '' then +# we retrieve the latest build AMI. +# Then set openshift_aws_ami to the ami. +- when: openshift_aws_ami == '' + block: + - name: fetch recently created AMI + ec2_ami_find: + region: "{{ openshift_aws_region }}" + sort: creationDate + sort_order: descending + name: "{{ openshift_aws_ami_name }}*" + ami_tags: "{{ openshift_aws_ami_tags }}" + no_result_action: fail + register: amiout + + - name: Set the openshift_aws_ami + set_fact: + openshift_aws_ami: "{{ amiout.results[0].ami_id }}" + when: + - "'results' in amiout" + - amiout.results|length > 0 + +- when: openshift_aws_create_security_groups + name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + +- when: openshift_aws_create_launch_config + name: "Create {{ openshift_aws_node_group_type }} launch config" + include: launch_config.yml + +- when: openshift_aws_create_scale_group + name: "Create {{ openshift_aws_node_group_type }} node group" + include: scale_group.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml new file mode 100644 index 000000000..a1fdd66fc --- /dev/null +++ b/roles/openshift_aws/tasks/elb.yml @@ -0,0 +1,68 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: debug + debug: var=vpcout + +- name: fetch the remote instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: "{{ openshift_aws_elb_instance_filter }}" + register: instancesout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: + debug: + msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] + if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type + else openshift_aws_elb_listeners }}" + +- name: "Create ELB {{ openshift_aws_elb_name }}" + ec2_elb_lb: + name: "{{ openshift_aws_elb_name }}" + state: present + security_group_names: "{{ openshift_aws_elb_security_groups }}" + idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + region: "{{ openshift_aws_region }}" + subnets: + - "{{ subnetout.subnets[0].id }}" + health_check: "{{ openshift_aws_elb_health_check }}" + listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] + if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type + else openshift_aws_elb_listeners }}" + scheme: "{{ openshift_aws_elb_scheme }}" + tags: + KubernetesCluster: "{{ openshift_aws_clusterid }}" + register: new_elb + +# It is necessary to ignore_errors here because the instances are not in 'ready' +# state when first added to ELB +- name: "Add instances to ELB {{ openshift_aws_elb_name }}" + ec2_elb: + instance_id: "{{ item.id }}" + ec2_elbs: "{{ openshift_aws_elb_name }}" + state: present + region: "{{ openshift_aws_region }}" + wait: False + with_items: "{{ instancesout.instances }}" + ignore_errors: True + retries: 10 + register: elb_call + until: elb_call|succeeded + +- debug: + msg: "{{ item }}" + with_items: + - "{{ new_elb }}" diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml new file mode 100644 index 000000000..cd9772a25 --- /dev/null +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -0,0 +1,29 @@ +--- +- name: upload certificates to AWS IAM + iam_cert23: + state: present + name: "{{ openshift_aws_iam_cert_name }}" + cert: "{{ openshift_aws_iam_cert_path }}" + key: "{{ openshift_aws_iam_cert_key_path }}" + cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}" + register: elb_cert_chain + failed_when: + - "'failed' in elb_cert_chain" + - elb_cert_chain.failed + - "'msg' in elb_cert_chain" + - "'already exists and has a different certificate body' in elb_cert_chain.msg" + - "'BotoServerError' in elb_cert_chain.msg" + when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' + +- name: set_fact openshift_aws_elb_cert_arn + set_fact: + openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + +- name: wait for cert to propagate + pause: + seconds: 5 + when: elb_cert_chain.changed diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml new file mode 100644 index 000000000..65c5a6cc0 --- /dev/null +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -0,0 +1,45 @@ +--- +- fail: + msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image." + when: + - openshift_aws_ami is undefined + +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: + - "{{ openshift_aws_clusterid }}" # default sg + - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg + - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + region: "{{ openshift_aws_region }}" + register: ec2sgs + +# Create the scale group config +- name: Create the node scale group launch config + ec2_lc: + name: "{{ openshift_aws_launch_config_name }}" + region: "{{ openshift_aws_region }}" + image_id: "{{ openshift_aws_ami }}" + instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" + security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" + user_data: |- + #cloud-config + {% if openshift_aws_node_group_type != 'master' %} + write_files: + - path: /root/csr_kubeconfig + owner: root:root + permissions: '0640' + content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} + - path: /root/openshift_settings + owner: root:root + permissions: '0640' + content: + openshift_type: "{{ openshift_aws_node_group_type }}" + runcmd: + - [ systemctl, enable, atomic-openshift-node] + - [ systemctl, start, atomic-openshift-node] + {% endif %} + key_name: "{{ openshift_aws_ssh_key_name }}" + ebs_optimized: False + volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" + assign_public_ip: True diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml new file mode 100644 index 000000000..189caeaee --- /dev/null +++ b/roles/openshift_aws/tasks/provision.yml @@ -0,0 +1,54 @@ +--- +- when: openshift_aws_create_vpc | bool + name: create default vpc + include: vpc.yml + +- when: openshift_aws_create_iam_cert | bool + name: create the iam_cert for elb certificate + include: iam_cert.yml + +- when: openshift_aws_users | length > 0 + name: create aws ssh keypair + include: ssh_keys.yml + +- when: openshift_aws_create_s3 | bool + name: create s3 bucket for registry + include: s3.yml + +- name: include scale group creation for master + include: build_node_group.yml + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": "{{ openshift_aws_node_group_type }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: create our master internal load balancers + include: elb.yml + vars: + openshift_aws_elb_direction: internal + openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal" + openshift_aws_elb_scheme: internal + +- name: create our master external load balancers + include: elb.yml + vars: + openshift_aws_elb_direction: external + openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external" + openshift_aws_elb_scheme: internet-facing + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" + when: openshift_aws_wait_for_ssh | bool diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml new file mode 100644 index 000000000..fc4996c68 --- /dev/null +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -0,0 +1,66 @@ +--- +# Get bootstrap config token +# bootstrap should be created on first master +# need to fetch it and shove it into cloud data +- name: fetch master instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: slurp down the bootstrap.kubeconfig + slurp: + src: /etc/origin/master/bootstrap.kubeconfig + delegate_to: "{{ instancesout.instances[0].public_ip_address }}" + remote_user: root + register: bootstrap + +- name: set_fact for kubeconfig token + set_fact: + openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}" + +- name: include build node group for infra + include: build_node_group.yml + vars: + openshift_aws_node_group_type: infra + openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra" + openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}" + +- name: include build node group for compute + include: build_node_group.yml + vars: + openshift_aws_node_group_type: compute + openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute" + openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}" + +- when: openshift_aws_wait_for_ssh | bool + block: + - name: pause and allow for instances to scale before we query them + pause: + seconds: 10 + + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": node + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml new file mode 100644 index 000000000..9cf37c840 --- /dev/null +++ b/roles/openshift_aws/tasks/s3.yml @@ -0,0 +1,7 @@ +--- +- name: Create an s3 bucket + s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: "{{ openshift_aws_s3_mode }}" + region: "{{ openshift_aws_region }}" + when: openshift_aws_create_s3 | bool diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml new file mode 100644 index 000000000..3e969fc43 --- /dev/null +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -0,0 +1,32 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the subnet to use in scale group + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: Create the scale group + ec2_asg: + name: "{{ openshift_aws_scale_group_name }}" + launch_config_name: "{{ openshift_aws_launch_config_name }}" + health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}" + health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}" + min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}" + max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}" + desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}" + region: "{{ openshift_aws_region }}" + termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" + load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" + wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}" + vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" + tags: + - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml new file mode 100644 index 000000000..0cb749dcc --- /dev/null +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -0,0 +1,49 @@ +--- +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: bundle ami + ec2_ami: + instance_id: "{{ instancesout.instances.0.id }}" + region: "{{ openshift_aws_region }}" + state: present + description: "This was provisioned {{ ansible_date_time.iso8601 }}" + name: "{{ openshift_aws_ami_name }}" + tags: "{{ openshift_aws_ami_tags }}" + wait: yes + register: amioutput + +- debug: var=amioutput + +- when: openshift_aws_ami_encrypt | bool + block: + - name: augment the encrypted ami tags with source-ami + set_fact: + source_tag: + source-ami: "{{ amioutput.image_id }}" + + - name: copy the ami for encrypted disks + include: ami_copy.yml + vars: + openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted" + openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" + # TODO: How does the kms alias get passed to ec2_ami_copy + openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" + openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}" + # this option currently fails due to boto waiters + # when supported this need to be reapplied + #openshift_aws_ami_copy_wait: True + +- name: terminate temporary instance + ec2: + state: absent + region: "{{ openshift_aws_region }}" + instance_ids: "{{ instancesout.instances.0.id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml new file mode 100644 index 000000000..161e72fb4 --- /dev/null +++ b/roles/openshift_aws/tasks/security_group.yml @@ -0,0 +1,45 @@ +--- +- name: Fetch the VPC for the vpc.id + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_clusterid }}" + register: vpcout + +- name: Create default security group for cluster + ec2_group: + name: "{{ openshift_aws_node_security_groups.default.name }}" + description: "{{ openshift_aws_node_security_groups.default.desc }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}" + register: sg_default_created + +- name: create the node group sgs + ec2_group: + name: "{{ item.name}}" + description: "{{ item.desc }}" + rules: "{{ item.rules if 'rules' in item else [] }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: sg_create + with_items: + - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: create the k8s sgs for the node group + ec2_group: + name: "{{ item.name }}_k8s" + description: "{{ item.desc }} for k8s" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: k8s_sg_create + with_items: + - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: tag sg groups with proper tags + ec2_tag: + tags: + KubernetesCluster: "{{ openshift_aws_clusterid }}" + resource: "{{ item.group_id }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws/tasks/ssh_keys.yml index 232cf20ed..f439ce74e 100644 --- a/roles/openshift_aws_ssh_keys/tasks/main.yml +++ b/roles/openshift_aws/tasks/ssh_keys.yml @@ -3,6 +3,6 @@ ec2_key: name: "{{ item.key_name }}" key_material: "{{ item.pub_key }}" - region: "{{ r_openshift_aws_ssh_keys_region }}" - with_items: "{{ r_openshift_aws_ssh_keys_users }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ openshift_aws_users }}" no_log: True diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws/tasks/vpc.yml index cfe08dae5..ce2c8eac5 100644 --- a/roles/openshift_aws_vpc/tasks/main.yml +++ b/roles/openshift_aws/tasks/vpc.yml @@ -2,13 +2,12 @@ - name: Create AWS VPC ec2_vpc_net: state: present - cidr_block: "{{ r_openshift_aws_vpc_cidr }}" + cidr_block: "{{ openshift_aws_vpc.cidr }}" dns_support: True dns_hostnames: True - region: "{{ r_openshift_aws_vpc_region }}" - name: "{{ r_openshift_aws_vpc_clusterid }}" - tags: - Name: "{{ r_openshift_aws_vpc_clusterid }}" + region: "{{ openshift_aws_region }}" + name: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_vpc_tags }}" register: vpc - name: Sleep to avoid a race condition when creating the vpc @@ -18,23 +17,23 @@ - name: assign the vpc igw ec2_vpc_igw: - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" vpc_id: "{{ vpc.vpc.id }}" register: igw - name: assign the vpc subnets ec2_vpc_subnet: - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" vpc_id: "{{ vpc.vpc.id }}" cidr: "{{ item.cidr }}" az: "{{ item.az }}" resource_tags: Name: "{{ item.az }}" - with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}" + with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}" - name: Grab the route tables from our VPC ec2_vpc_route_table_facts: - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" filters: vpc-id: "{{ vpc.vpc.id }}" register: route_table @@ -44,9 +43,9 @@ lookup: id route_table_id: "{{ route_table.route_tables[0].id }}" vpc_id: "{{ vpc.vpc.id }}" - region: "{{ r_openshift_aws_vpc_region }}" + region: "{{ openshift_aws_region }}" tags: - Name: "{{ r_openshift_aws_vpc_name }}" + Name: "{{ openshift_aws_vpc_name }}" routes: - dest: 0.0.0.0/0 gateway_id: igw diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md deleted file mode 100644 index 111818451..000000000 --- a/roles/openshift_aws_ami_copy/README.md +++ /dev/null @@ -1,50 +0,0 @@ -openshift_aws_ami_perms -========= - -Ansible role for copying an AMI - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- openshift_aws_ami_copy_src_ami: source AMI id to copy from -- openshift_aws_ami_copy_region: region where the AMI is found -- openshift_aws_ami_copy_name: name to assign to new AMI -- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption -- openshift_aws_ami_copy_tags: dict with desired tags -- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters. - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml - - name: copy the ami for encrypted disks - include_role: - name: openshift_aws_ami_copy - vars: - r_openshift_aws_ami_copy_region: us-east-1 - r_openshift_aws_ami_copy_name: myami - r_openshift_aws_ami_copy_src_ami: ami-1234 - r_openshift_aws_ami_copy_kms_arn: arn:xxxx - r_openshift_aws_ami_copy_tags: {} - r_openshift_aws_ami_copy_encrypt: False - -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml deleted file mode 100644 index bcccd4042..000000000 --- a/roles/openshift_aws_ami_copy/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- fail: - msg: "{{ item }} needs to be defined" - when: item is not defined - with_items: - - r_openshift_aws_ami_copy_src_ami - - r_openshift_aws_ami_copy_name - - r_openshift_aws_ami_copy_region - -- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}" - ec2_ami_copy: - region: "{{ r_openshift_aws_ami_copy_region }}" - source_region: "{{ r_openshift_aws_ami_copy_region }}" - name: "{{ r_openshift_aws_ami_copy_name }}" - source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}" - encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}" - kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}" - wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}" - tags: "{{ r_openshift_aws_ami_copy_tags }}" - register: copy_result - -- debug: var=copy_result - -- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami - set_fact: - r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md deleted file mode 100644 index ecc45fa14..000000000 --- a/roles/openshift_aws_elb/README.md +++ /dev/null @@ -1,75 +0,0 @@ -openshift_aws_elb -========= - -Ansible role to provision and manage AWS ELB's for Openshift. - -Requirements ------------- - -Ansible Modules: - -- ec2_elb -- ec2_elb_lb - -python package: - -python-boto - -Role Variables --------------- - -- r_openshift_aws_elb_instances: instances to put in ELB -- r_openshift_aws_elb_elb_name: name of elb -- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to -- r_openshift_aws_elb_region: AWS Region -- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb -```yaml - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 -``` -- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb -```yaml -- protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 -- protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" -``` - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- include_role: - name: openshift_aws_elb - vars: - r_openshift_aws_elb_instances: aws_instances_to_put_in_elb - r_openshift_aws_elb_elb_name: elb_name - r_openshift_aws_elb_security_groups: security_group_names - r_openshift_aws_elb_region: aws_region - r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}" - r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}" -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml deleted file mode 100644 index ed5d38079..000000000 --- a/roles/openshift_aws_elb/defaults/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -r_openshift_aws_elb_health_check: - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 - -r_openshift_aws_elb_cert_arn: '' - -r_openshift_aws_elb_listeners: - master: - external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 - - protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" - internal: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 80 - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml deleted file mode 100644 index 58be652a5..000000000 --- a/roles/openshift_aws_elb/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: Openshift ELB provisioning - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 1.2 - platforms: - - name: EL - versions: - - 7 -dependencies: [] diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml deleted file mode 100644 index 64ec18545..000000000 --- a/roles/openshift_aws_elb/tasks/main.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: fetch the default subnet id - ec2_remote_facts: - region: "{{ r_openshift_aws_elb_region }}" - filters: "{{ r_openshift_aws_elb_instance_filter }}" - register: instancesout - -- name: fetch the default subnet id - ec2_vpc_subnet_facts: - region: "{{ r_openshift_aws_elb_region }}" - filters: - "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}" - register: subnetout - -- name: - debug: - msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] - if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type - else r_openshift_aws_elb_listeners }}" - -- name: "Create ELB {{ r_openshift_aws_elb_name }}" - ec2_elb_lb: - name: "{{ r_openshift_aws_elb_name }}" - state: present - security_group_names: "{{ r_openshift_aws_elb_security_groups }}" - idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}" - region: "{{ r_openshift_aws_elb_region }}" - subnets: - - "{{ subnetout.subnets[0].id }}" - health_check: "{{ r_openshift_aws_elb_health_check }}" - listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] - if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type - else r_openshift_aws_elb_listeners }}" - scheme: "{{ r_openshift_aws_elb_scheme }}" - tags: - KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}" - register: new_elb - -# It is necessary to ignore_errors here because the instances are not in 'ready' -# state when first added to ELB -- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}" - ec2_elb: - instance_id: "{{ item.id }}" - ec2_elbs: "{{ r_openshift_aws_elb_name }}" - state: present - region: "{{ r_openshift_aws_elb_region }}" - wait: False - with_items: "{{ instancesout.instances }}" - ignore_errors: True - retries: 10 - register: elb_call - until: elb_call|succeeded - -- debug: - msg: "{{ item }}" - with_items: - - "{{ new_elb }}" diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md deleted file mode 100644 index 9468e785c..000000000 --- a/roles/openshift_aws_iam_kms/README.md +++ /dev/null @@ -1,43 +0,0 @@ -openshift_aws_iam_kms -========= - -Ansible role to create AWS IAM KMS keys for encryption - -Requirements ------------- - -Ansible Modules: - -oo_iam_kms - -Role Variables --------------- - -- r_openshift_aws_iam_kms_region: AWS region to create KMS key -- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key - -Dependencies ------------- - -lib_utils - -Example Playbook ----------------- -```yaml -- include_role: - name: openshift_aws_iam_kms - vars: - r_openshift_aws_iam_kms_region: 'us-east-1' - r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms' -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_iam_kms/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml deleted file mode 100644 index e29aaf96b..000000000 --- a/roles/openshift_aws_iam_kms/meta/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: AWS IAM KMS setup and management - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 1.2 - platforms: - - name: EL - versions: - - 7 -dependencies: -- lib_utils diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml deleted file mode 100644 index 32aac2666..000000000 --- a/roles/openshift_aws_iam_kms/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- fail: - msg: "{{ item.name }} needs to be defined." - when: item.cond | bool - with_items: - - name: "{{ r_openshift_aws_iam_kms_alias }}" - cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}" - - name: "{{ r_openshift_aws_iam_kms_region }}" - cond: "{{ r_openshift_aws_iam_kms_region is undefined }}" - -- name: Create IAM KMS key with alias - oo_iam_kms: - state: present - alias: "{{ r_openshift_aws_iam_kms_alias }}" - region: "{{ r_openshift_aws_iam_kms_region }}" - register: created_kms - -- debug: var=created_kms.results diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md deleted file mode 100644 index 52b7e83b6..000000000 --- a/roles/openshift_aws_launch_config/README.md +++ /dev/null @@ -1,72 +0,0 @@ -openshift_aws_launch_config -========= - -Ansible role to create an AWS launch config for a scale group. - -This includes the AMI, volumes, user_data, etc. - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- -- r_openshift_aws_launch_config_name: "{{ launch_config_name }}" -- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" -- r_openshift_aws_launch_config_region: "{{ region }}" -- r_openshift_aws_launch_config: "{{ node_group_config }}" -```yaml - master: - instance_type: m4.xlarge - ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True -``` -- r_openshift_aws_launch_config_type: compute -- r_openshift_aws_launch_config_custom_image: ami-xxxxx -- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig> - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml - - name: create compute nodes config - include_role: - name: openshift_aws_launch_config - vars: - r_openshift_aws_launch_config_name: "{{ launch_config_name }}" - r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" - r_openshift_aws_launch_config_region: "{{ region }}" - r_openshift_aws_launch_config: "{{ node_group_config }}" - r_openshift_aws_launch_config_type: compute - r_openshift_aws_launch_config_custom_image: ami-1234 - r_openshift_aws_launch_config_bootstrap_token: abcd -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_launch_config/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml deleted file mode 100644 index e61670cc2..000000000 --- a/roles/openshift_aws_launch_config/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: Openshift AWS VPC creation - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 2.3 - platforms: - - name: EL - versions: - - 7 -dependencies: [] diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml deleted file mode 100644 index 437cf1f71..000000000 --- a/roles/openshift_aws_launch_config/tasks/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: fail when params are not set - fail: - msg: Please specify the role parameters. - when: - - r_openshift_aws_launch_config_cluseterid is undefined - - r_openshift_aws_launch_config_type is undefined - - r_openshift_aws_launch_config_region is undefined - - r_openshift_aws_launch_config is undefined - -- name: fetch the security groups for launch config - ec2_group_facts: - filters: - group-name: - - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg - - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg - - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s - region: "{{ r_openshift_aws_launch_config_region }}" - register: ec2sgs - -# Create the scale group config -- name: Create the node scale group config - ec2_lc: - name: "{{ r_openshift_aws_launch_config_name }}" - region: "{{ r_openshift_aws_launch_config_region }}" - image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}" - instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}" - security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" - user_data: |- - #cloud-config - {% if r_openshift_aws_launch_config_type != 'master' %} - write_files: - - path: /root/csr_kubeconfig - owner: root:root - permissions: '0640' - content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} - - path: /root/openshift_settings - owner: root:root - permissions: '0640' - content: - openshift_type: "{{ r_openshift_aws_launch_config_type }}" - runcmd: - - [ systemctl, enable, atomic-openshift-node] - - [ systemctl, start, atomic-openshift-node] - {% endif %} - key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}" - ebs_optimized: False - volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}" - assign_public_ip: True - register: test diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2 deleted file mode 100644 index 1a1e29550..000000000 --- a/roles/openshift_aws_launch_config/templates/cloud-init.j2 +++ /dev/null @@ -1,9 +0,0 @@ -{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %} -#cloud-config -write_files: -- path: /root/csr_kubeconfig - owner: root:root - permissions: '0640' - content: |- - {{ r_openshift_aws_launch_config_bootstrap_token }} -{% endif %} diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md deleted file mode 100644 index c32c57bc5..000000000 --- a/roles/openshift_aws_node_group/README.md +++ /dev/null @@ -1,77 +0,0 @@ -openshift_aws_node_group -========= - -Ansible role to create an aws node group. - -This includes the security group, launch config, and scale group. - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- -```yaml -- r_openshift_aws_node_group_name: myscalegroup -- r_openshift_aws_node_group_clusterid: myclusterid -- r_openshift_aws_node_group_region: us-east-1 -- r_openshift_aws_node_group_lc_name: launch_config -- r_openshift_aws_node_group_type: master|infra|compute -- r_openshift_aws_node_group_config: "{{ node_group_config }}" -```yaml -master: - instance_type: m4.xlarge - ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True -``` -- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" - -```yaml -us-east-1a # name of subnet -``` - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml - - name: "create {{ openshift_build_node_type }} node groups" - include_role: - name: openshift_aws_node_group - vars: - r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute" - r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" - r_openshift_aws_node_group_clusterid: "{{ clusterid }}" - r_openshift_aws_node_group_region: "{{ region }}" - r_openshift_aws_node_group_config: "{{ node_group_config }}" - r_openshift_aws_node_group_type: compute - r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml deleted file mode 100644 index 44c5116a1..000000000 --- a/roles/openshift_aws_node_group/defaults/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -r_openshift_aws_node_group_type: master - -r_openshift_aws_node_group_config: - tags: - clusterid: "{{ r_openshift_aws_node_group_clusterid }}" - master: - instance_type: m4.xlarge - ami: "{{ r_openshift_aws_node_group_ami }}" - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: False - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 - tags: - host-type: master - sub-host-type: default - wait_for_instances: True - compute: - instance_type: m4.xlarge - ami: "{{ r_openshift_aws_node_group_ami }}" - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: True - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 100 - desired_size: 3 - tags: - host-type: node - sub-host-type: compute - infra: - instance_type: m4.xlarge - ami: "{{ r_openshift_aws_node_group_ami }}" - volumes: - - device_name: /dev/sdb - volume_size: 100 - device_type: gp2 - delete_on_termination: True - health_check: - period: 60 - type: EC2 - min_size: 2 - max_size: 20 - desired_size: 2 - tags: - host-type: node - sub-host-type: infra diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml deleted file mode 100644 index 6f5364b03..000000000 --- a/roles/openshift_aws_node_group/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: validate role inputs - fail: - msg: Please pass in the required role variables - when: - - r_openshift_aws_node_group_clusterid is not defined - - r_openshift_aws_node_group_region is not defined - - r_openshift_aws_node_group_subnet_name is not defined - -- name: fetch the subnet to use in scale group - ec2_vpc_subnet_facts: - region: "{{ r_openshift_aws_node_group_region }}" - filters: - "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}" - register: subnetout - -- name: Create the scale group - ec2_asg: - name: "{{ r_openshift_aws_node_group_name }}" - launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}" - health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}" - health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}" - min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}" - max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}" - desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}" - region: "{{ r_openshift_aws_node_group_region }}" - termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" - load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" - wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}" - vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" - tags: - - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md deleted file mode 100644 index afafe61cf..000000000 --- a/roles/openshift_aws_s3/README.md +++ /dev/null @@ -1,43 +0,0 @@ -openshift_aws_s3 -========= - -Ansible role to create an s3 bucket - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_s3_clusterid: myclusterid -- r_openshift_aws_s3_region: us-east-1 -- r_openshift_aws_s3_mode: create|delete - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- name: create an s3 bucket - include_role: - name: openshift_aws_s3 - vars: - r_openshift_aws_s3_clusterid: mycluster - r_openshift_aws_s3_region: us-east-1 - r_openshift_aws_s3_mode: create -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml deleted file mode 100644 index 46bd781bd..000000000 --- a/roles/openshift_aws_s3/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Create an s3 bucket - s3: - bucket: "{{ r_openshift_aws_s3_clusterid }}" - mode: "{{ r_openshift_aws_s3_mode }}" - region: "{{ r_openshift_aws_s3_region }}" diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md deleted file mode 100644 index eeb76bbb6..000000000 --- a/roles/openshift_aws_sg/README.md +++ /dev/null @@ -1,59 +0,0 @@ -openshift_aws_sg -========= - -Ansible role to create an aws security groups - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_sg_clusterid: myclusterid -- r_openshift_aws_sg_region: us-east-1 -- r_openshift_aws_sg_type: master|infra|compute -```yaml -# defaults/main.yml - default: - name: "{{ r_openshift_aws_sg_clusterid }}" - desc: "{{ r_openshift_aws_sg_clusterid }} default" - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: all - from_port: all - to_port: all - group_name: "{{ r_openshift_aws_sg_clusterid }}" -``` - - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- name: create security groups for master - include_role: - name: openshift_aws_sg - vars: - r_openshift_aws_sg_clusterid: mycluster - r_openshift_aws_sg_region: us-east-1 - r_openshift_aws_sg_type: master -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml deleted file mode 100644 index 9c480d337..000000000 --- a/roles/openshift_aws_sg/defaults/main.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -r_openshift_aws_sg_sg: - default: - name: "{{ r_openshift_aws_sg_clusterid }}" - desc: "{{ r_openshift_aws_sg_clusterid }} default" - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: all - from_port: all - to_port: all - group_name: "{{ r_openshift_aws_sg_clusterid }}" - master: - name: "{{ r_openshift_aws_sg_clusterid }}_master" - desc: "{{ r_openshift_aws_sg_clusterid }} master instances" - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 443 - to_port: 443 - cidr_ip: 0.0.0.0/0 - compute: - name: "{{ r_openshift_aws_sg_clusterid }}_compute" - desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances" - infra: - name: "{{ r_openshift_aws_sg_clusterid }}_infra" - desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances" - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 443 - to_port: 443 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 30000 - to_port: 32000 - cidr_ip: 0.0.0.0/0 - etcd: - name: "{{ r_openshift_aws_sg_clusterid }}_etcd" - desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances" diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml deleted file mode 100644 index 2294fdcc9..000000000 --- a/roles/openshift_aws_sg/tasks/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Validate role inputs - fail: - msg: Please ensure to pass the correct variables - when: - - r_openshift_aws_sg_region is undefined - - r_openshift_aws_sg_region is undefined - - -- name: Fetch the VPC for vpc.id - ec2_vpc_net_facts: - region: "{{ r_openshift_aws_sg_region }}" - filters: - "tag:Name": "{{ r_openshift_aws_sg_clusterid }}" - register: vpcout - -- name: Create default security group for cluster - ec2_group: - name: "{{ r_openshift_aws_sg_sg.default.name }}" - description: "{{ r_openshift_aws_sg_sg.default.desc }}" - region: "{{ r_openshift_aws_sg_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}" - register: sg_default_created - -- name: create the node group sgs - ec2_group: - name: "{{ item.name}}" - description: "{{ item.desc }}" - rules: "{{ item.rules if 'rules' in item else [] }}" - region: "{{ r_openshift_aws_sg_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - register: sg_create - with_items: - - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}" - -- name: create the k8s sgs for the node group - ec2_group: - name: "{{ item.name }}_k8s" - description: "{{ item.desc }} for k8s" - region: "{{ r_openshift_aws_sg_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - register: k8s_sg_create - with_items: - - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}" - -- name: tag sg groups with proper tags - ec2_tag: - tags: - KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}" - resource: "{{ item.group_id }}" - region: "{{ r_openshift_aws_sg_region }}" - with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md deleted file mode 100644 index 4f8667918..000000000 --- a/roles/openshift_aws_ssh_keys/README.md +++ /dev/null @@ -1,49 +0,0 @@ -openshift_aws_ssh_keys -========= - -Ansible role for sshind SSH keys - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_ssh_keys_users: list of dicts of users -- r_openshift_aws_ssh_keys_region: ec2_region to install the keys - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -users: -- username: user1 - pub_key: <user1 ssh public key> -- username: user2 - pub_key: <user2 ssh public key> - -region: us-east-1 - -- include_role: - name: openshift_aws_ssh_keys - vars: - r_openshift_aws_ssh_keys_users: "{{ users }}" - r_openshift_aws_ssh_keys_region: "{{ region }}" -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md deleted file mode 100644 index d88cf0581..000000000 --- a/roles/openshift_aws_vpc/README.md +++ /dev/null @@ -1,62 +0,0 @@ -openshift_aws_vpc -========= - -Ansible role to create a default AWS VPC - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_vpc_clusterid: "{{ clusterid }}" -- r_openshift_aws_vpc_cidr: 172.31.48.0/20 -- r_openshift_aws_vpc_subnets: "{{ subnets }}" -```yaml - subnets: - us-east-1: # These are us-east-1 region defaults. Ensure this matches your region - - cidr: 172.31.48.0/20 - az: "us-east-1c" - - cidr: 172.31.32.0/20 - az: "us-east-1e" - - cidr: 172.31.16.0/20 - az: "us-east-1a" -``` -- r_openshift_aws_vpc_region: "{{ region }}" -- r_openshift_aws_vpc_tags: dict of tags to apply to vpc -- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}" - -Dependencies ------------- - - -Example Playbook ----------------- - -```yaml - - name: create default vpc - include_role: - name: openshift_aws_vpc - vars: - r_openshift_aws_vpc_clusterid: mycluster - r_openshift_aws_vpc_cidr: 172.31.48.0/20 - r_openshift_aws_vpc_subnets: "{{ subnets }}" - r_openshift_aws_vpc_region: us-east-1 - r_openshift_aws_vpc_tags: {} - r_openshift_aws_vpc_name: mycluster - -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_vpc/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml index 9200f2c3c..162d817f0 100644 --- a/roles/openshift_cfme/meta/main.yml +++ b/roles/openshift_cfme/meta/main.yml @@ -16,5 +16,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils -- role: openshift_common - role: openshift_master_facts diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 04a1ce873..29ed82783 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -14,5 +14,4 @@ galaxy_info: dependencies: - role: openshift_docker when: not skip_docker_role | default(False) | bool -- role: openshift_common - role: openshift_facts diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md deleted file mode 100644 index 2a271854b..000000000 --- a/roles/openshift_common/README.md +++ /dev/null @@ -1,45 +0,0 @@ -OpenShift/Atomic Enterprise Common -=================================== - -OpenShift/Atomic Enterprise common installation and configuration tasks. - -Requirements ------------- - -A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, -rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos. - -Role Variables --------------- - -| Name | Default value | | -|---------------------------|-------------------|---------------------------------------------| -| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters | -| openshift_debug_level | 2 | Global openshift debug log verbosity | -| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) | -| openshift_ip | UNDEF | Internal IP address to use for this host | -| openshift_public_hostname | UNDEF | Public hostname to use for this host | -| openshift_public_ip | UNDEF | Public IP address to use for this host | -| openshift_portal_net | UNDEF | Service IP CIDR | - -Dependencies ------------- - -os_firewall -openshift_facts -openshift_repos - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml deleted file mode 100644 index 267c03605..000000000 --- a/roles/openshift_common/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_cluster_id: 'default' -openshift_debug_level: 2 diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml deleted file mode 100644 index 7cc95d8fa..000000000 --- a/roles/openshift_common/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Common - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.7 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_facts diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml deleted file mode 100644 index a0bd6c860..000000000 --- a/roles/openshift_common/tasks/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -- fail: - msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - -- fail: - msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool - -- fail: - msg: Nuage sdn can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - -- fail: - msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: Contiv can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: Contiv can not be used with nuage - when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool - -- fail: - msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. - when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool - -- fail: - msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool - -- fail: - msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: - msg: openshift_hostname must be 63 characters or less - when: openshift_hostname is defined and openshift_hostname | length > 63 - -- name: Set common Cluster facts - openshift_facts: - role: common - local_facts: - install_examples: "{{ openshift_install_examples | default(True) }}" - use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" - sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" - use_flannel: "{{ openshift_use_flannel | default(None) }}" - use_calico: "{{openshift_use_calico | default(None) }}" - use_nuage: "{{ openshift_use_nuage | default(None) }}" - use_contiv: "{{ openshift_use_contiv | default(None) }}" - use_manageiq: "{{ openshift_use_manageiq | default(None) }}" - data_dir: "{{ openshift_data_dir | default(None) }}" - use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}" - -- name: Install the base package for versioning - package: - name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - when: not openshift.common.is_containerized | bool - -- name: Set version facts - openshift_facts: - -# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the -# hostname by default. -- set_fact: - set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}" - -- name: Set hostname - command: > - hostnamectl set-hostname {{ openshift.common.hostname }} - when: openshift_set_hostname | default(set_hostname_default) | bool diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml index 5cfda1c89..f3fe2dcbe 100644 --- a/roles/openshift_examples/meta/main.yml +++ b/roles/openshift_examples/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_common +dependencies: [] diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index cf78b4a75..ebfa6bb8f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -449,78 +449,6 @@ def normalize_provider_facts(provider, metadata): return facts -def set_flannel_facts_if_unset(facts): - """ Set flannel facts if not already present in facts dict - dict: the facts dict updated with the flannel facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the flannel - facts if they were not already present - - """ - if 'common' in facts: - if 'use_flannel' not in facts['common']: - use_flannel = False - facts['common']['use_flannel'] = use_flannel - return facts - - -def set_calico_facts_if_unset(facts): - """ Set calico facts if not already present in facts dict - dict: the facts dict updated with the calico facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the calico - facts if they were not already present - - """ - if 'common' in facts: - if 'use_calico' not in facts['common']: - use_calico = False - facts['common']['use_calico'] = use_calico - return facts - - -def set_nuage_facts_if_unset(facts): - """ Set nuage facts if not already present in facts dict - dict: the facts dict updated with the nuage facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the nuage - facts if they were not already present - - """ - if 'common' in facts: - if 'use_nuage' not in facts['common']: - use_nuage = False - facts['common']['use_nuage'] = use_nuage - return facts - - -def set_contiv_facts_if_unset(facts): - """ Set contiv facts if not already present in facts dict - dict: the facts dict updated with the contiv facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the contiv - facts if they were not already present - - """ - if 'common' in facts: - if 'use_contiv' not in facts['common']: - use_contiv = False - facts['common']['use_contiv'] = use_contiv - return facts - - def set_node_schedulability(facts): """ Set schedulable facts if not already present in facts dict Args: @@ -590,13 +518,8 @@ def set_dnsmasq_facts_if_unset(facts): """ if 'common' in facts: - if 'use_dnsmasq' not in facts['common']: - facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2'])) if 'master' in facts and 'dns_port' not in facts['master']: - if safe_get_bool(facts['common']['use_dnsmasq']): - facts['master']['dns_port'] = 8053 - else: - facts['master']['dns_port'] = 53 + facts['master']['dns_port'] = 8053 return facts @@ -968,27 +891,6 @@ def set_version_facts_if_unset(facts): return facts -def set_manageiq_facts_if_unset(facts): - """ Set manageiq facts. This currently includes common.use_manageiq. - - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with version facts. - Raises: - OpenShiftFactsInternalError: - """ - if 'common' not in facts: - if 'version_gte_3_1_or_1_1' not in facts['common']: - raise OpenShiftFactsInternalError( - "Invalid invocation: The required facts are not set" - ) - if 'use_manageiq' not in facts['common']: - facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1'] - - return facts - - def set_sdn_facts_if_unset(facts, system_facts): """ Set sdn facts if not already present in facts dict @@ -999,15 +901,6 @@ def set_sdn_facts_if_unset(facts, system_facts): dict: the facts dict updated with the generated sdn facts if they were not already present """ - # pylint: disable=too-many-branches - if 'common' in facts: - use_sdn = facts['common']['use_openshift_sdn'] - if not (use_sdn == '' or isinstance(use_sdn, bool)): - use_sdn = safe_get_bool(use_sdn) - facts['common']['use_openshift_sdn'] = use_sdn - if 'sdn_network_plugin_name' not in facts['common']: - plugin = 'redhat/openshift-ovs-subnet' if use_sdn else '' - facts['common']['sdn_network_plugin_name'] = plugin if 'master' in facts: # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length @@ -1996,10 +1889,6 @@ class OpenShiftFacts(object): facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_project_cfg_facts_if_unset(facts) - facts = set_flannel_facts_if_unset(facts) - facts = set_calico_facts_if_unset(facts) - facts = set_nuage_facts_if_unset(facts) - facts = set_contiv_facts_if_unset(facts) facts = set_node_schedulability(facts) facts = set_selectors(facts) facts = set_identity_providers_if_unset(facts) @@ -2011,7 +1900,6 @@ class OpenShiftFacts(object): facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) facts = set_dnsmasq_facts_if_unset(facts) - facts = set_manageiq_facts_if_unset(facts) facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) facts = set_proxy_facts(facts) @@ -2039,7 +1927,7 @@ class OpenShiftFacts(object): self.system_facts['ansible_fqdn']] hostname = choose_hostname(hostname_values, ip_addr) - defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr, + defaults['common'] = dict(ip=ip_addr, public_ip=ip_addr, deployment_type=deployment_type, deployment_subtype=deployment_subtype, @@ -2048,10 +1936,8 @@ class OpenShiftFacts(object): portal_net='172.30.0.0/16', client_binary='oc', admin_binary='oadm', dns_domain='cluster.local', - install_examples=True, debug_level=2, - config_base='/etc/origin', - data_dir='/var/lib/origin') + config_base='/etc/origin') if 'master' in roles: defaults['master'] = dict(api_use_ssl=True, api_port='8443', diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 857a80c74..866c74d7c 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -32,6 +32,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # we use python-docker-py to check local docker for images, and skopeo # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] + skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false" def is_active(self): """Skip hosts with unsupported deployment types.""" @@ -67,8 +68,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): "failed": True, "msg": ( "One or more required Docker images are not available:\n {}\n" - "Configured registries: {}" - ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)), + "Configured registries: {}\n" + "Checked by: {}" + ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries), + self.skopeo_img_check_command), } return {} @@ -169,8 +172,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): for registry in registries: args = { - "_raw_params": "timeout 10 skopeo inspect --tls-verify=false " - "docker://{}/{}".format(registry, image) + "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image) } result = self.execute_module("command", args) if result.get("rc", 0) == 0 and not result.get("failed"): diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 3e5d7f860..29ae58556 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -39,7 +39,6 @@ variables also control configuration behavior: Dependencies ------------ -* openshift_common * openshift_hosted_facts Example Playbook diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index c26df3afa..08c1d849e 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -47,3 +47,9 @@ r_openshift_hosted_registry_os_firewall_allow: - service: Docker Registry Port port: 5000/tcp cond: "{{ r_openshift_hosted_use_calico }}" + +# NOTE +# r_openshift_hosted_use_calico_default may be defined external to this role. +# openshift_use_calico, if defined, may affect other roles or play behavior. +r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}" +r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}" diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index e57ed733e..68ec7233e 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -18,6 +18,15 @@ openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}" openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" +- name: Get the certificate contents for router + copy: + backup: True + dest: "/etc/origin/master/{{ item | basename }}" + src: "{{ item }}" + with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | + oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" + when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} + # This is for when we desire a cluster signed cert # The certificate is generated and placed in master_config_dir/ - block: @@ -43,15 +52,6 @@ # End Block when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {} -- name: Get the certificate contents for router - copy: - backup: True - dest: "/etc/origin/master/{{ item | basename }}" - src: "{{ item }}" - with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | - oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" - when: not openshift_hosted_router_create_certificate | bool - - name: Create the router service account(s) oc_serviceaccount: name: "{{ item.serviceaccount }}" diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml index 044c8043c..ab07a77c1 100644 --- a/roles/openshift_hosted_logging/meta/main.yaml +++ b/roles/openshift_hosted_logging/meta/main.yaml @@ -1,4 +1,3 @@ --- dependencies: - - { role: openshift_common } - { role: openshift_master_facts } diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index 9c12865bf..4027f524b 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info: - 7 categories: - cloud -dependencies: -- role: openshift_common +dependencies: [] diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 41a2b12a2..239b16427 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -24,4 +24,10 @@ r_openshift_loadbalancer_os_firewall_allow: port: "{{ openshift_master_api_port | default(8443) }}/tcp" - service: nuage mon port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" - cond: "{{ openshift_use_nuage | default(false) | bool }}" + cond: "{{ r_openshift_lb_use_nuage | bool }}" + +# NOTE +# r_openshift_lb_use_nuage_default may be defined external to this role. +# openshift_use_nuage, if defined, may affect other roles or play behavior. +r_openshift_lb_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +r_openshift_lb_use_nuage: "{{ r_openshift_lb_use_nuage_default }}" diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 97525479e..95bf462d1 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_5" -__allowed_curator_versions: ["3_5", "3_6"] +__latest_curator_version: "3_6" +__allowed_curator_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 index 0c06a7677..65b08d970 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -24,7 +24,8 @@ network: cloud: kubernetes: - service: ${SERVICE_DNS} + pod_label: ${POD_LABEL} + pod_port: 9300 namespace: ${NAMESPACE} discovery: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 5f2932541..3c8f390c4 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -90,6 +90,12 @@ spec: name: "RECOVER_AFTER_TIME" value: "{{openshift_logging_elasticsearch_recover_after_time}}" - + name: "READINESS_PROBE_TIMEOUT" + value: "30" + - + name: "POD_LABEL" + value: "component={{component}}" + - name: "IS_MASTER" value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" @@ -106,6 +112,13 @@ spec: readOnly: true - name: elasticsearch-storage mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/java/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 10 + timeoutSeconds: 30 + periodSeconds: 5 volumes: - name: elasticsearch secret: diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 20fa63543..09e2ee4d0 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_5" -__allowed_es_versions: ["3_5", "3_6"] +__latest_es_version: "3_6" +__allowed_es_versions: ["3_5", "3_6", "3_7"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index ec8e565c3..92a426952 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_5" -__allowed_fluentd_versions: ["3_5", "3_6"] +__latest_fluentd_version: "3_6" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index 87b281c4b..241877a02 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_5" -__allowed_kibana_versions: ["3_5", "3_6"] +__latest_kibana_version: "3_6" +__allowed_kibana_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index 4234b74e2..e7b57f4b5 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_5" -__allowed_mux_versions: ["3_5", "3_6"] +__latest_mux_version: "3_6" +__allowed_mux_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index fbf69c270..86fa57b50 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -17,7 +17,6 @@ From this role: | Name | Default value | | |---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------| -| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master | | openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up | | oreg_url | UNDEF | Default docker registry to use | | oreg_url_master | UNDEF | Default docker registry to use, specifically on the master | @@ -29,18 +28,10 @@ From this role: | openshift_master_public_console_url | UNDEF | | | openshift_master_saconfig_limit_secret_references | false | | -From openshift_common: - -| Name | Default Value | | -|-------------------------------|----------------|----------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | -| openshift_public_ip | UNDEF | Public IP address to use for this host | -| openshift_hostname | UNDEF | hostname to use for this instance | Dependencies ------------ -openshift_common Example Playbook ---------------- diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index d70106276..71bb09a76 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -22,5 +22,24 @@ r_openshift_master_os_firewall_allow: oreg_url: '' oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" -oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False + + +# NOTE +# r_openshift_master_*_default may be defined external to this role. +# openshift_use_*, if defined, may affect other roles or play behavior. +r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}" +r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}" + +r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}" + +r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}" +r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}" + +r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}" + +r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}" diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index bd2383f61..b0237141b 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -29,4 +29,4 @@ dependencies: - role: nickhammond.logrotate - role: contiv contiv_role: netmaster - when: openshift.common.use_contiv | bool + when: openshift_use_contiv | default(False) | bool diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index a06defdb9..121261e94 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -47,9 +47,9 @@ when: - not openshift.common.is_containerized | bool -- name: Create openshift.common.data_dir +- name: Create r_openshift_master_data_dir file: - path: "{{ openshift.common.data_dir }}" + path: "{{ r_openshift_master_data_dir }}" state: directory mode: 0755 owner: root @@ -169,7 +169,7 @@ register: l_already_set - set_fact: - openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" + openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index c480d8223..7a918c57e 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -10,6 +10,13 @@ openshift_master_config_dir: '/etc/origin/master' when: openshift_master_config_dir is not defined +# This play may be consumed outside the role, we need to ensure that +# r_openshift_master_data_dir is set. +- name: Set r_openshift_master_data_dir if unset + set_fact: + r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" + when: r_openshift_master_data_dir is not defined + - name: Remove the legacy master service if it exists include: clean_systemd_units.yml diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index e8f7c47b0..f06448d71 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,12 +12,12 @@ Requires={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-api Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 69db62f16..b7f36491b 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,12 +11,12 @@ PartOf={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index c14579435..d045b402b 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -106,7 +106,7 @@ etcdConfig: clientCA: ca.crt {% endif %} keyFile: etcd.server.key - storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd + storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd {% endif %} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io @@ -179,8 +179,8 @@ masterPublicURL: {{ openshift.master.public_api_url }} networkConfig: clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }} hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} -{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %} - networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %} + networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }} {% endif %} # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet serviceNetworkCIDR: {{ openshift.common.portal_net }} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index 0e78d2d23..02bfd6f62 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 @@ -13,7 +13,7 @@ Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier=atomic-openshift-master-api Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index 94928f88c..e284413f7 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -17,7 +17,7 @@ Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=131072 LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=always RestartSec=5s diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index fb0b494da..32670b18e 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -17,22 +17,12 @@ From this role: | Name | Default value | | |----------------------------|-----------------------|----------------------------------------------------------| -| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node | | oreg_url | UNDEF (Optional) | Default docker registry to use | | oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node | -From openshift_common: - -| Name | Default Value | | -|-------------------------------|---------------------|---------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | -| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | UNDEF (Required) | hostname to use for this instance | - Dependencies ------------ -openshift_common Example Playbook ---------------- diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index a7dad5b1f..f1e64f3aa 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -69,10 +69,10 @@ r_openshift_node_os_firewall_allow: port: 443/tcp - service: OpenShift OVS sdn port: 4789/udp - cond: openshift.common.use_openshift_sdn | default(true) | bool + cond: openshift_use_openshift_sdn | bool - service: Calico BGP Port port: 179/tcp - cond: "{{ openshift.common.use_calico | bool }}" + cond: "{{ openshift_node_use_calico }}" - service: Kubernetes service NodePort TCP port: "{{ openshift_node_port_range | default('') }}/tcp" cond: "{{ openshift_node_port_range is defined }}" @@ -82,5 +82,27 @@ r_openshift_node_os_firewall_allow: oreg_url: '' oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" -oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False + + +# NOTE +# r_openshift_node_*_default may be defined external to this role. +# openshift_use_*, if defined, may affect other roles or play behavior. +openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}" +openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}" + +openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}" + +openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}" +openshift_node_use_calico: "{{ openshift_node_use_calico_default }}" + +openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}" + +openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}" +openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}" + +openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 14ba48aba..855b0a8d8 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -3,7 +3,7 @@ systemd: name: openvswitch state: restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool + when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool register: l_openshift_node_stop_openvswitch_result until: not l_openshift_node_stop_openvswitch_result | failed retries: 3 diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 3db980514..ce5ecb9d0 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -15,11 +15,9 @@ dependencies: - role: openshift_node_facts - role: lib_openshift - role: lib_os_firewall -- role: openshift_common - role: openshift_clock - role: openshift_docker - role: openshift_node_certificates when: not openshift_node_bootstrap - role: openshift_cloud_provider - role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq | bool diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index cb1440283..b83b2c452 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -42,14 +42,25 @@ path: /etc/origin/.config_managed register: rpmgenerated_config -- name: Remove RPM generated config files if present - file: - path: "/etc/origin/{{ item }}" - state: absent - when: - - rpmgenerated_config.stat.exists - - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise'] - with_items: - - master - - node - - .config_managed +- when: rpmgenerated_config.stat.exists + block: + - name: Remove RPM generated config files if present + file: + path: "/etc/origin/{{ item }}" + state: absent + with_items: + - master + + # with_fileglob doesn't work correctly due to a few issues. + # Could change this to fileglob when it gets fixed. + - name: find all files in /etc/origin/node so we can remove them + find: + path: /etc/origin/node/ + register: find_results + + - name: Remove everything except the resolv.conf required for node + file: + path: "{{ item.path }}" + state: absent + when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path" + with_items: "{{ find_results.files }}" diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 8210fd881..7af3f54b5 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -22,7 +22,7 @@ daemon_reload: yes when: - openshift.common.is_containerized | bool - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_node_use_openshift_sdn | default(true) | bool register: ovs_start_result until: not ovs_start_result | failed retries: 3 diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 9bf4ed879..02b8ee67c 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -13,7 +13,7 @@ name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_node_use_openshift_sdn | bool - name: Install conntrack-tools package package: diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 60a25dcc6..22ff6dfd2 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -49,6 +49,13 @@ state: restarted when: openshift_use_crio | default(false) +- name: restart NetworkManager to ensure resolv.conf is present + systemd: + name: NetworkManager + enabled: yes + state: restarted + when: openshift_node_bootstrap | bool + # The atomic-openshift-node service will set this parameter on # startup, but if the network service is restarted this setting is # lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388 @@ -121,4 +128,4 @@ ##### END Storage ##### - include: config/workaround-bz1331590-ovs-oom-fix.yml - when: openshift.common.use_openshift_sdn | default(true) | bool + when: openshift_node_use_openshift_sdn | default(true) | bool diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 4687400cd..6b4490f61 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -26,7 +26,7 @@ - name: Install OpenvSwitch system containers include: openvswitch_system_container.yml when: - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_node_use_openshift_sdn | bool - openshift.common.is_openvswitch_system_container | bool - block: @@ -39,7 +39,7 @@ - include: config/install-ovs-docker-service-file.yml when: - openshift.common.is_containerized | bool - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_node_use_openshift_sdn | bool - not openshift.common.is_openvswitch_system_container | bool - include: config/configure-node-settings.yml diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index f59aa6fb4..7049f7189 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -37,15 +37,15 @@ masterClientConnectionOverrides: qps: 100 {% endif %} masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig -{% if openshift.common.use_openshift_sdn | bool %} -networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift_node_use_openshift_sdn | bool %} +networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} # networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which # deprecates networkPluginName above. The two should match. networkConfig: mtu: {{ openshift.node.sdn_mtu }} -{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %} - networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %} + networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} {% if openshift.node.set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} @@ -66,7 +66,7 @@ servingInfo: - {{ cipher_suite }} {% endfor %} {% endif %} -volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes +volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes proxyArguments: proxy-mode: - {{ openshift.node.proxy_mode }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 639b6f6c8..57094f28e 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service After=openvswitch.service PartOf={{ openshift.docker.service_name }}.service Requires={{ openshift.docker.service_name }}.service -{% if openshift.common.use_openshift_sdn %} +{% if openshift_node_use_openshift_sdn %} Wants=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service @@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml index 84035b88c..d80ed1b72 100644 --- a/roles/openshift_node_dnsmasq/meta/main.yml +++ b/roles/openshift_node_dnsmasq/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_common - role: openshift_node_facts diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index 4e6229bfb..5ad994df9 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -32,14 +32,12 @@ From openshift.common: | Name | Default Value | | |------------------------------------|---------------------|---------------------| | openshift.common.config_base |---------------------|---------------------| -| openshift.common.data_dir |---------------------|---------------------| | openshift.common.hostname |---------------------|---------------------| | openshift.common.http_proxy |---------------------|---------------------| | openshift.common.is_atomic |---------------------|---------------------| | openshift.common.is_containerized |---------------------|---------------------| | openshift.common.portal_net |---------------------|---------------------| | openshift.common.service_type |---------------------|---------------------| -| openshift.common.use_openshift_sdn |---------------------|---------------------| From openshift.master: @@ -58,7 +56,7 @@ From openshift.node: Dependencies ------------ -openshift_common + TODO diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml index ed97d539c..3d8704308 100644 --- a/roles/openshift_node_upgrade/defaults/main.yml +++ b/roles/openshift_node_upgrade/defaults/main.yml @@ -1 +1,6 @@ --- +openshift_use_openshift_sdn: True +os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" + +openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml index d31b899cf..90d80855e 100644 --- a/roles/openshift_node_upgrade/handlers/main.yml +++ b/roles/openshift_node_upgrade/handlers/main.yml @@ -6,7 +6,7 @@ when: - not skip_node_svc_handlers | default(False) | bool - not (ovs_service_status_changed | default(false) | bool) - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_use_openshift_sdn | bool register: l_openshift_node_upgrade_stop_openvswitch_result until: not l_openshift_node_upgrade_stop_openvswitch_result | failed retries: 3 diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml index 2a36d8945..a810b01dc 100644 --- a/roles/openshift_node_upgrade/meta/main.yml +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -11,4 +11,3 @@ galaxy_info: - 7 dependencies: - role: lib_utils -- role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index bc092c26c..e34319186 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -44,7 +44,7 @@ changed_when: "'Downloaded newer image' in pull_result.stdout" when: - openshift.common.is_containerized | bool - - openshift.common.use_openshift_sdn | default(true) | bool + - openshift_use_openshift_sdn | bool - include: docker/upgrade.yml vars: @@ -142,7 +142,7 @@ # End Disable Swap Block - name: Reset selinux context - command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes + command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes when: - ansible_selinux is defined - ansible_selinux.status == 'enabled' diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index 4e9550150..afff2f8ba 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -4,7 +4,7 @@ # - openshift_image_tag # - openshift.common.is_containerized # - openshift.node.ovs_image -# - openshift.common.use_openshift_sdn +# - openshift_use_openshift_sdn # - openshift.common.service_type # - openshift.node.debug_level # - openshift.common.config_base @@ -28,10 +28,10 @@ when: openshift.common.is_containerized | bool - include: config/workaround-bz1331590-ovs-oom-fix.yml - when: openshift.common.use_openshift_sdn | default(true) | bool + when: openshift_use_openshift_sdn | bool - include: config/install-ovs-docker-service-file.yml - when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool + when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool - include: config/configure-node-settings.yml - include: config/configure-proxy-settings.yml diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 639b6f6c8..451412ab0 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service After=openvswitch.service PartOf={{ openshift.docker.service_name }}.service Requires={{ openshift.docker.service_name }}.service -{% if openshift.common.use_openshift_sdn %} +{% if openshift_use_openshift_sdn %} Wants=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service @@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md index 1489cb0bd..0407d6ef1 100644 --- a/roles/openshift_persistent_volumes/README.md +++ b/roles/openshift_persistent_volumes/README.md @@ -17,13 +17,6 @@ From this role: | persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes | -From openshift_common: - -| Name | Default Value | | -|-------------------------------|----------------|----------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | - - Dependencies ------------ diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 25e5a38dd..8d3d010e4 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -10,5 +10,4 @@ galaxy_info: versions: - 7 dependencies: -- role: openshift_common - role: openshift_hosted_facts diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml index 01ee2544d..7c848cb12 100644 --- a/roles/openshift_service_catalog/defaults/main.yml +++ b/roles/openshift_service_catalog/defaults/main.yml @@ -1,3 +1,7 @@ --- openshift_service_catalog_remove: false openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"} + +openshift_use_openshift_sdn: True +# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}" +os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 64f94347b..746c73eaf 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -28,7 +28,7 @@ - name: Make kube-service-catalog project network global command: > oc adm pod-network make-projects-global kube-service-catalog - when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant' + when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant' - include: generate_certs.yml diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index c0ea00f34..204abe27e 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -5,6 +5,12 @@ is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}" is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" +- name: Install the base package for versioning + package: + name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + state: present + when: not is_containerized | bool + # Block attempts to install origin without specifying some kind of version information. # This is because the latest tags for origin are usually alpha builds, which should not # be used by default. Users must indicate what they want. |