summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/aws/README.md220
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml19
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml146
-rw-r--r--playbooks/aws/openshift-cluster/build_node_group.yml47
-rw-r--r--playbooks/aws/openshift-cluster/install.yml74
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml158
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml16
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml49
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml113
l---------playbooks/byo/openshift-checks/roles1
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml3
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml3
-rw-r--r--playbooks/byo/openshift-master/additional_config.yml6
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml7
-rw-r--r--playbooks/common/openshift-cluster/config.yml11
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml6
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml1
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml4
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml1
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml4
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml15
-rw-r--r--playbooks/common/openshift-master/additional_config.yml6
-rw-r--r--playbooks/common/openshift-master/config.yml7
-rw-r--r--playbooks/common/openshift-master/restart.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml9
37 files changed, 405 insertions, 627 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 58b3a7835..5072d10fa 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -35,11 +35,9 @@
- /etc/dnsmasq.d/origin-upstream-dns.conf
- /etc/dnsmasq.d/openshift-ansible.conf
- /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- when: openshift_use_dnsmasq | default(true) | bool
- service:
name: NetworkManager
state: restarted
- when: openshift_use_dnsmasq | default(true) | bool
- name: Stop services
service: name={{ item }} state=stopped
with_items:
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 0fb29ca06..2b3d4329e 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -32,91 +32,54 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
The newly added playbooks are the following:
-- build_ami.yml
-- provision.yml
-- provision_nodes.yml
+- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
+- install.yml - Calls the openshift-ansible installer on the newly created instances
+- provision_nodes.yml - Creates the infra and compute node scale groups
+- accept.yml - This is a playbook to accept infra and compute nodes into the cluster
+- provision_install.yml - This is a combination of all 3 of the above playbooks. (provision, install, and provision_nodes as well as accept.yml)
-The current expected work flow should be to provide the `vars.yml` file with the
-desired settings for cluster instances. These settings are AWS specific and should
-be tailored to the consumer's AWS custom account settings.
+The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
```yaml
-clusterid: mycluster
-region: us-east-1
-
-provision:
- clusterid: "{{ clusterid }}"
- region: "{{ region }}"
-
- build:
- base_image: ami-bdd5d6ab # base image for AMI to build from
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False
-
- # for s3 registry backend
- openshift_registry_s3: True
-
- # if using custom certificates these are required for the ELB
- iam_cert_ca:
- name: test_openshift
- cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
- key_path: '/path/to/wildcard.<clusterid>.example.com.key'
- chain_path: '/path/to/cert.ca.crt'
-
- instance_users:
- - key_name: myuser_key
- username: myuser
- pub_key: |
- ssh-rsa aaa<place public ssh key here>aaaaa user@<clusterid>
-
- node_group_config:
- tags:
- clusterid: "{{ clusterid }}"
- environment: stg
- ssh_key_name: myuser_key # name of the ssh key from above
-
- # configure master settings here
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- # Set the following number to be the same for masters.
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-...
- vpc:
- # name: mycluster # If missing; will default to clusterid
- cidr: 172.31.0.0/16
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
-
+---
+# when creating an AMI set this to True
+# when installing a cluster set this to False
+openshift_node_bootstrap: True
+
+# specify a clusterid
+# openshift_aws_clusterid: default
+
+# specify a region
+# openshift_aws_region: us-east-1
+
+# must specify a base_ami when building an AMI
+# openshift_aws_base_ami: # base image for AMI to build from
+# specify when using a custom AMI
+# openshift_aws_ami:
+
+# when creating an encrypted AMI please specify use_encryption
+# openshift_aws_ami_encrypt: False
+
+# custom certificates are required for the ELB
+# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
+# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
+# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
+
+# This is required for any ec2 instances
+# openshift_aws_ssh_key_name: myuser_key
+
+# This will ensure these users are created
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
```
-Repeat the following setup for the infra and compute node groups. This most likely
- will not need editing but if the install requires further customization then these parameters
- can be updated.
-
-#### Step 1
+If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
+In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
```ini
[OSEv3:children]
@@ -133,6 +96,13 @@ etcd
################################################################################
# Ensure these variables are set for bootstrap
################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+# required when building an AMI. This will
+# be dependent on the version provided by the yum repository
+openshift_pkg_version=-3.6.0
+
openshift_master_bootstrap_enabled=True
openshift_hosted_router_wait=False
@@ -153,77 +123,94 @@ openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo',
There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-In order to create the bootstrapable AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles.
-
-
-#### Step 2
+#### Step 1
-Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
+Once the `inventory` and the `provisioning_vars.yml` file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
```
-$ ansible-playbook -i inventory.yml build_ami.yml
+$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
1. This script will build a VPC. Default name will be clusterid if not specified.
2. Create an ssh key required for the instance.
-3. Create an instance.
-4. Run some setup roles to ensure packages and services are correctly configured.
-5. Create the AMI.
-6. If encryption is desired
+3. Create a security group.
+4. Create an instance using the key from step 2 or a specified key.
+5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+6. Create the AMI.
+7. If encryption is desired
- A KMS key is created with the name of $clusterid
- An encrypted AMI will be produced with $clusterid KMS key
-7. Terminate the instance used to configure the AMI.
+8. Terminate the instance used to configure the AMI.
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
-#### Step 3
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs):
+#### Step 2
+
+Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
+
+1. In the default behavior, the AMI id will be found and used in the last created fashion.
+2. The `openshift_aws_ami` option can be specified. This will allow the user to override the behavior of the role and use a custom AMI specified in the `openshift_aws_ami` variable.
+We are now ready to provision and install the cluster. This can be accomplished by calling all of the following steps at once or one-by-one. The all in one can be called like this:
```
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False # defaults to false
+$ ansible-playbook -i inventory.yml provision_install.yml -e @provisioning_vars.yml
```
-**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date.
+If this is the first time running through this process, please attempt the following steps one-by-one and ensure the setup works correctly.
+#### Step 3
-#### Step 4
-
-We are ready to create the master instances and install Openshift.
+We are ready to create the master instances.
```
-$ ansible-playbook -i <inventory from step 1> provision.yml
+$ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created
-2. Ensures a SSH key exists
-3. Creates an s3 bucket for the registry named $clusterid
-4. Create master security groups
-5. Create a master launch config
-6. Create the master auto scaling groups
-7. If certificates are desired for ELB, they will be uploaded
-8. Create internal and external master ELBs
-9. Add newly created masters to the correct groups
-10. Set a couple of important facts for the masters
-11. Run the [`byo`](../../common/openshift-cluster/config.yml)
+1. Ensures a VPC is created.
+2. Ensures a SSH key exists.
+3. Creates an s3 bucket for the registry named $clusterid-docker-registry
+4. Create master security groups.
+5. Create a master launch config.
+6. Create the master auto scaling groups.
+7. If certificates are desired for ELB, they will be uploaded.
+8. Create internal and external master ELBs.
+9. Add newly created masters to the correct groups.
+10. Set a couple of important facts for the masters.
+
+At this point we have successfully created the infrastructure including the master nodes.
-At this point we have created a successful cluster with only the master nodes.
+#### Step 4
+
+Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
+
+```
+$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+```
+This playbook accomplishes the following:
+1. Builds a dynamic inventory file by querying AWS.
+2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+Once this playbook completes, the cluster masters should be installed and configured.
#### Step 5
-Now that we have a cluster deployed it might be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
```
-$ ansible-playbook provision_nodes.yml
+$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
#### Step 6
-The registration of our nodes can be automated by running the following script `accept.yml`. This script can handle the registration in a few different ways.
+To facilitate the node registration process, nodes may be registered by running the following script `accept.yml`. This script can register in a few different ways.
- approve_all - **Note**: this option is for development and test environments. Security is bypassed
- nodes - A list of node names that will be accepted into the cluster
@@ -233,10 +220,11 @@ The registration of our nodes can be automated by running the following script `
nodes: < list of nodes here >
timeout: 0
```
+
Once the desired accept method is chosen, run the following playbook `accept.yml`:
1. Run the following playbook.
```
-$ ansible-playbook accept.yml
+$ ansible-playbook accept.yml -e @provisioning_vars.yml
```
Login to a master and run the following command:
@@ -263,6 +251,6 @@ ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d
At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster.
-### Still to compute
+### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index d43c84205..ffc367f9f 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -1,12 +1,17 @@
+#!/usr/bin/ansible-playbook
---
- name: Setup the vpc and the master node group
- #hosts: oo_first_master
hosts: localhost
remote_user: root
gather_facts: no
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: bring lib_openshift into scope
include_role:
@@ -14,9 +19,9 @@
- name: fetch masters
ec2_remote_facts:
- region: "{{ provision.region }}"
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
"tag:host-type": master
instance-state-name: running
register: mastersout
@@ -26,9 +31,9 @@
- name: fetch new node instances
ec2_remote_facts:
- region: "{{ provision.region }}"
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
"tag:host-type": node
instance-state-name: running
register: instancesout
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index d27874200..fc11205d8 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -3,67 +3,49 @@
connection: local
gather_facts: no
tasks:
- - name: get the necessary vars for ami building
- include_vars: vars.yml
-
- - name: create a vpc with the name <clusterid>
+ - name: Require openshift_aws_base_ami
+ fail:
+ msg: "A base AMI is required for AMI building. Please ensure `openshift_aws_base_ami` is defined."
+ when: openshift_aws_base_ami is undefined
+
+ - name: "Alert user to variables needed and their values - {{ item.name }}"
+ debug:
+ msg: "{{ item.msg }}"
+ with_items:
+ - name: openshift_aws_clusterid
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+ - name: openshift_aws_region
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+ - name: create an instance and prepare for ami
include_role:
- name: openshift_aws_vpc
+ name: openshift_aws
+ tasks_from: build_ami.yml
vars:
- r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
- r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
- r_openshift_aws_vpc_region: "{{ provision.region }}"
- r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
- r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
+ openshift_aws_node_group_type: compute
- - name: create aws ssh keypair
- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
- r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
-
- - name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ provision.region }}"
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}"
- register: subnetout
-
- - name: create instance for ami creation
- ec2:
- assign_public_ip: yes
- region: "{{ provision.region }}"
- key_name: "{{ provision.node_group_config.ssh_key_name }}"
- group: "{{ provision.clusterid }}"
- instance_type: m4.xlarge
- vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
- image: "{{ provision.build.base_image }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
- wait: yes
- exact_count: 1
- count_tag:
- Name: ami_base
- instance_tags:
- Name: ami_base
- register: amibase
+ "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
- name: wait for ssh to become available
wait_for:
port: 22
- host: "{{ amibase.tagged_instances.0.public_ip }}"
+ host: "{{ instancesout.instances[0].public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- name: add host to nodes
add_host:
groups: nodes
- name: "{{ amibase.tagged_instances.0.public_dns_name }}"
+ name: "{{ instancesout.instances[0].public_dns_name }}"
- name: set the user to perform installation
set_fact:
@@ -81,70 +63,16 @@
- name: run the std_include
include: ../../common/openshift-cluster/initialize_openshift_repos.yml
-- hosts: nodes
- remote_user: root
- tasks:
- - name: get the necessary vars for ami building
- include_vars: vars.yml
-
- - set_fact:
- openshift_node_bootstrap: True
-
- - name: run openshift image preparation
- include_role:
- name: openshift_node
+- name: install node config
+ include: ../../common/openshift-node/config.yml
- hosts: localhost
connection: local
become: no
tasks:
- - name: bundle ami
- ec2_ami:
- instance_id: "{{ amibase.tagged_instances.0.id }}"
- region: "{{ provision.region }}"
- state: present
- description: "This was provisioned {{ ansible_date_time.iso8601 }}"
- name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
- tags: "{{ provision.build.openshift_ami_tags }}"
- wait: yes
- register: amioutput
-
- - debug: var=amioutput
-
- - when: provision.build.use_encryption | default(False)
- block:
- - name: setup kms key for encryption
- include_role:
- name: openshift_aws_iam_kms
- vars:
- r_openshift_aws_iam_kms_region: "{{ provision.region }}"
- r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms"
-
- - name: augment the encrypted ami tags with source-ami
- set_fact:
- source_tag:
- source-ami: "{{ amioutput.image_id }}"
-
- - name: copy the ami for encrypted disks
- include_role:
- name: openshift_aws_ami_copy
- vars:
- r_openshift_aws_ami_copy_region: "{{ provision.region }}"
- r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted"
- r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
- r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms"
- r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}"
- r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}"
- # this option currently fails due to boto waiters
- # when supported this need to be reapplied
- #r_openshift_aws_ami_copy_wait: True
-
- - name: Display newly created encrypted ami id
- debug:
- msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}"
-
- - name: terminate temporary instance
- ec2:
- state: absent
- region: "{{ provision.region }}"
- instance_ids: "{{ amibase.tagged_instances.0.id }}"
+ - name: seal the ami
+ include_role:
+ name: openshift_aws
+ tasks_from: seal_ami.yml
+ vars:
+ openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml
deleted file mode 100644
index 3ef492238..000000000
--- a/playbooks/aws/openshift-cluster/build_node_group.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: fetch recently created AMI
- ec2_ami_find:
- region: "{{ provision.region }}"
- sort: creationDate
- sort_order: descending
- name: "{{ provision.build.ami_name }}*"
- ami_tags: "{{ provision.build.openshift_ami_tags }}"
- #no_result_action: fail
- register: amiout
-
-- block:
- - name: "Create {{ openshift_build_node_type }} sgs"
- include_role:
- name: openshift_aws_sg
- vars:
- r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_sg_region: "{{ provision.region }}"
- r_openshift_aws_sg_type: "{{ openshift_build_node_type }}"
-
- - name: "generate a launch config name for {{ openshift_build_node_type }}"
- set_fact:
- launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}"
-
- - name: create "{{ openshift_build_node_type }} launch config"
- include_role:
- name: openshift_aws_launch_config
- vars:
- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
- r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_launch_config_region: "{{ provision.region }}"
- r_openshift_aws_launch_config: "{{ provision.node_group_config }}"
- r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}"
- r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}"
- r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}"
-
- - name: "create {{ openshift_build_node_type }} node groups"
- include_role:
- name: openshift_aws_node_group
- vars:
- r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}"
- r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
- r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_node_group_region: "{{ provision.region }}"
- r_openshift_aws_node_group_config: "{{ provision.node_group_config }}"
- r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}"
- r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
new file mode 100644
index 000000000..86d58a68e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -0,0 +1,74 @@
+---
+- name: Setup the vpc and the master node group
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: add new master to masters group
+ add_host:
+ groups: "masters,etcd,nodes"
+ name: "{{ item.public_ip_address }}"
+ hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}"
+ with_items: "{{ instancesout.instances }}"
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+
+- name: set the master facts for hostname to elb
+ hosts: masters
+ gather_facts: no
+ remote_user: root
+ tasks:
+ - name: fetch elbs
+ ec2_elb_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
+ names:
+ - "{{ item }}"
+ with_items:
+ - "{{ openshift_aws_clusterid | default('default') }}-master-external"
+ - "{{ openshift_aws_clusterid | default('default') }}-master-internal"
+ delegate_to: localhost
+ register: elbs
+
+ - debug: var=elbs
+
+ - name: set fact
+ set_fact:
+ openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ osm_custom_cors_origins:
+ - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ with_items: "{{ groups['masters'] }}"
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index dfbf61cc7..db7afac6f 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -2,156 +2,16 @@
- name: Setup the vpc and the master node group
hosts: localhost
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
- - name: create default vpc
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
- r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
- r_openshift_aws_vpc_region: "{{ provision.region }}"
- r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
- r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
-
- - name: create aws ssh keypair
- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
- r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
-
- - when: provision.openshift_registry_s3 | default(false)
- name: create s3 bucket for registry
- include_role:
- name: openshift_aws_s3
- vars:
- r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry"
- r_openshift_aws_s3_region: "{{ provision.region }}"
- r_openshift_aws_s3_mode: create
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
- - name: include scale group creation for master
- include: build_node_group.yml
- vars:
- openshift_build_node_type: master
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: fetch new master instances
- ec2_remote_facts:
- region: "{{ provision.region }}"
- filters:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: bring iam_cert23 into scope
- include_role:
- name: lib_utils
-
- - name: upload certificates to AWS IAM
- iam_cert23:
- state: present
- name: "{{ provision.clusterid }}-master-external"
- cert: "{{ provision.iam_cert_ca.cert_path }}"
- key: "{{ provision.iam_cert_ca.key_path }}"
- cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}"
- register: elb_cert_chain
- failed_when:
- - "'failed' in elb_cert_chain"
- - elb_cert_chain.failed
- - "'msg' in elb_cert_chain"
- - "'already exists' not in elb_cert_chain.msg"
- when: provision.iam_cert_ca is defined
-
- - debug: var=elb_cert_chain
-
- - name: create our master external and internal load balancers
+ - name: create default vpc
include_role:
- name: openshift_aws_elb
- vars:
- r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_elb_region: "{{ provision.region }}"
- r_openshift_aws_elb_instance_filter:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- r_openshift_aws_elb_type: master
- r_openshift_aws_elb_direction: "{{ elb_item }}"
- r_openshift_aws_elb_idle_timout: 400
- r_openshift_aws_elb_scheme: internet-facing
- r_openshift_aws_elb_security_groups:
- - "{{ provision.clusterid }}"
- - "{{ provision.clusterid }}_master"
- r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
- r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}"
- r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
- with_items:
- - internal
- - external
- loop_control:
- loop_var: elb_item
-
- - name: add new master to masters group
- add_host:
- groups: "masters,etcd,nodes"
- name: "{{ item.public_ip_address }}"
- hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}"
- with_items: "{{ instancesout.instances }}"
-
- - name: set facts for group normalization
- set_fact:
- cluster_id: "{{ provision.clusterid }}"
- cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}"
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ item.public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
-
-
-- name: set the master facts for hostname to elb
- hosts: masters
- gather_facts: no
- remote_user: root
- tasks:
- - name: include vars
- include_vars: vars.yml
-
- - name: fetch elbs
- ec2_elb_facts:
- region: "{{ provision.region }}"
- names:
- - "{{ item }}"
- with_items:
- - "{{ provision.clusterid }}-master-external"
- - "{{ provision.clusterid }}-master-internal"
- delegate_to: localhost
- register: elbs
-
- - debug: var=elbs
-
- - name: set fact
- set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
- osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
- - "console.{{ provision.clusterid }}.openshift.com"
- - "api.{{ provision.clusterid }}.openshift.com"
- with_items: "{{ groups['masters'] }}"
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/std_include.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+ name: openshift_aws
+ tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
new file mode 100644
index 000000000..e787deced
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -0,0 +1,16 @@
+---
+# Once an AMI is built then this script is used for
+# the one stop shop to provision and install a cluster
+# this playbook is run with the following parameters:
+# ansible-playbook -i openshift-ansible-inventory provision_install.yml
+- name: Include the provision.yml playbook to create cluster
+ include: provision.yml
+
+- name: Include the install.yml playbook to install cluster
+ include: install.yml
+
+- name: Include the install.yml playbook to install cluster
+ include: provision_nodes.yml
+
+- name: Include the accept.yml playbook to accept nodes into the cluster
+ include: accept.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
index 5428fb307..44c686e08 100644
--- a/playbooks/aws/openshift-cluster/provision_nodes.yml
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -1,47 +1,18 @@
---
-# Get bootstrap config token
-# bootstrap should be created on first master
-# need to fetch it and shove it into cloud data
- name: create the node scale groups
hosts: localhost
connection: local
gather_facts: yes
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
- - name: fetch master instances
- ec2_remote_facts:
- region: "{{ provision.region }}"
- filters:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: slurp down the bootstrap.kubeconfig
- slurp:
- src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
- register: bootstrap
-
- - name: set_fact on localhost for kubeconfig
- set_fact:
- local_bootstrap: "{{ bootstrap }}"
- launch_config_name:
- infra: "infra-{{ ansible_date_time.epoch }}"
- compute: "compute-{{ ansible_date_time.epoch }}"
-
- - name: include build node group
- include: build_node_group.yml
- vars:
- openshift_build_node_type: infra
-
- - name: include build node group
- include: build_node_group.yml
- vars:
- openshift_build_node_type: compute
+ - name: create the node groups
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
new file mode 100644
index 000000000..28eb9c993
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
@@ -0,0 +1,28 @@
+---
+# when creating an AMI set this option to True
+# when installing the cluster, set this to False
+openshift_node_bootstrap: True
+
+# specify a clusterid
+#openshift_aws_clusterid: default
+
+# must specify a base_ami when building an AMI
+#openshift_aws_base_ami:
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# custom certificates are required for the ELB
+#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
+#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
+#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
+
+# This is required for any ec2 instances
+#openshift_aws_ssh_key_name: myuser_key
+
+# This will ensure these users are created
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
deleted file mode 100644
index 47da03cb7..000000000
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-
-clusterid: mycluster
-region: us-east-1
-
-provision:
- clusterid: "{{ clusterid }}"
- region: "{{ region }}"
-
- build: # build specific variables here
- ami_name: "openshift-gi-"
- base_image: ami-bdd5d6ab # base image for AMI to build from
-
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False
-
- openshift_ami_tags:
- bootstrap: "true"
- openshift-created: "true"
- clusterid: "{{ clusterid }}"
-
- # Use s3 backed registry storage
- openshift_registry_s3: True
-
- # if using custom certificates these are required for the ELB
- iam_cert_ca:
- name: "{{ clusterid }}_openshift"
- cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
- key_path: '/path/to/wildcard.<clusterid>.example.com.key'
- chain_path: '/path/to/cert.ca.crt'
-
- instance_users:
- - key_name: myuser_key
- username: myuser
- pub_key: |
- ssh-rsa AAAA== myuser@system
-
- node_group_config:
- tags:
- clusterid: "{{ clusterid }}"
- environment: stg
-
- ssh_key_name: myuser_key
-
- # master specific cluster node settings
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-
- # compute specific cluster node settings
- compute:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
-
- # infra specific cluster node settings
- infra:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
-
- # vpc settings
- vpc:
- cidr: 172.31.0.0/16
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/byo/openshift-checks/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-checks/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index acf5469bf..60fa44c5b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -9,6 +9,4 @@
- include: ../../common/openshift-cluster/config.yml
vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index bbec3a4c2..a523bb47f 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -13,6 +13,3 @@
- always
- include: ../../common/openshift-cluster/openshift_logging.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
index 6f95b4e2d..40a7606e7 100644
--- a/playbooks/byo/openshift-cluster/service-catalog.yml
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -13,6 +13,3 @@
- always
- include: ../../common/openshift-cluster/service_catalog.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml
new file mode 100644
index 000000000..b3d7b5731
--- /dev/null
+++ b/playbooks/byo/openshift-master/additional_config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/additional_config.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index e3ef704e5..a09edd55a 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,7 +1,7 @@
---
- include: ../openshift-cluster/initialize_groups.yml
-- name: Ensure there are new_masters
+- name: Ensure there are new_masters or new_nodes
hosts: localhost
connection: local
become: no
@@ -13,11 +13,8 @@
add hosts to the new_masters and new_nodes host groups to add
masters.
when:
- - (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0)
+ - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0)
- include: ../../common/openshift-cluster/std_include.yml
- include: ../../common/openshift-master/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 5f420a76c..bbd5a0185 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -22,6 +22,15 @@
tags:
- always
+- name: Set hostname
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ # TODO: switch back to hostname module once we depend on ansible-2.4
+ # https://github.com/ansible/ansible/pull/25906
+ - name: Set hostname
+ command: "hostnamectl set-hostname {{ openshift.common.hostname }}"
+ when: openshift_set_hostname | default(false,true) | bool
+
- include: ../openshift-etcd/config.yml
- include: ../openshift-nfs/config.yml
@@ -34,6 +43,8 @@
- include: ../openshift-master/config.yml
+- include: ../openshift-master/additional_config.yml
+
- include: ../openshift-node/config.yml
tags:
- node
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 50351588f..be14b06f0 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -27,9 +27,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: master
local_facts:
dns_port: '8053'
@@ -50,9 +47,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c9f37109b..16a733899 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -48,6 +48,7 @@
when:
- g_etcd_hosts | default([]) | length == 0
- not openshift_master_unsupported_all_in_one | default(False)
+ - not openshift_node_bootstrap | default(False)
- name: Evaluate oo_all_hosts
add_host:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 9eaf3bc34..0723575c2 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -126,11 +126,9 @@
openshift_facts:
role: common
local_facts:
- debug_level: "{{ openshift_debug_level | default(2) }}"
deployment_type: "{{ openshift_deployment_type }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
cli_image: "{{ osm_image | default(None) }}"
- cluster_id: "{{ openshift_cluster_id | default('default') }}"
hostname: "{{ openshift_hostname | default(None) }}"
ip: "{{ openshift_ip | default(None) }}"
is_containerized: "{{ l_is_containerized | default(None) }}"
@@ -148,8 +146,6 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: initialize_facts set_fact repoquery command
set_fact:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..7af6b25bc 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -12,5 +12,10 @@
hosts: oo_all_hosts:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ pre_tasks:
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when: openshift_pkg_version is not defined
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
roles:
- openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index a391b963a..75339f6df 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -29,7 +29,6 @@
- role: openshift_default_storage_class
when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}"
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_logging
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
new file mode 100644
index 000000000..26716a92d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/sanity_checks.yml
@@ -0,0 +1,51 @@
+---
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ tasks:
+ - fail:
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+ - fail:
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
+
+ - fail:
+ msg: openshift_public_hostname must be 63 characters or less
+ when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 6cc56889a..cef0072f3 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -7,6 +7,10 @@
tags:
- always
+- include: sanity_checks.yml
+ tags:
+ - always
+
- include: validate_hostnames.yml
tags:
- node
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..eb118365a 100644
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate etcd instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..783f70f50 100644
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate master instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..c103e40a9 100644
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
@@ -5,7 +5,7 @@
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 7cc13137f..98953f72e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_nodes_to_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 616ba04f8..2cc6c9019 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -2,7 +2,7 @@
- name: Backup etcd
hosts: oo_etcd_hosts_to_backup
roles:
- - role: openshift_facts
+ - role: openshift_etcd_facts
- role: etcd_common
r_etcd_common_action: backup
r_etcd_common_backup_tag: etcd_backup_tag
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 0f421928b..c98065cf4 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_oo_option_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..6d8503879 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,8 +1,10 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+- include: ../../docker/upgrade_check.yml
+ when:
+ - docker_upgrade is not defined or (docker_upgrade | bool)
+ - not (openshift.common.is_atomic | bool)
# Additional checks for Atomic hosts:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
index 9a959a959..4c345dbe8 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -9,6 +9,21 @@
deployment types
when: deployment_type not in ['origin','openshift-enterprise', 'online']
+ # osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are
+ # required when upgrading to avoid changes that may occur between releases
+ # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023
+ - assert:
+ that:
+ - "osm_cluster_network_cidr is defined"
+ - "osm_host_subnet_length is defined"
+ - "openshift_portal_net is defined"
+ msg: >
+ osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory
+ variables when upgrading. These variables should match what is currently used in the cluster. If
+ you don't remember what these values are you can find them in /etc/origin/master/master-config.yaml
+ on a master with the names clusterNetworkCIDR (osm_cluster_network_cidr),
+ hostSubnetLength (osm_host_subnet_length), and serviceNetworkCIDR (openshift_portal_net).
+
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
# openshift_pkg_version variables. These must be removed or updated to proceed
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index c0ea93d2c..7468c78f0 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -11,13 +11,13 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
+ when: openshift_install_examples | default(True)
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
+ when: openshift_use_manageiq | default(false) | bool
- role: cockpit
when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- role: flannel_register
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b29b9ef4f..c77d7bb87 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -208,18 +208,15 @@
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
- role: nuage_master
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: calico_master
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-- include: additional_config.yml
- when: not g_openshift_master_is_scaleup
-
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config
gather_facts: no
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 6fec346c3..4d73b8124 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -7,7 +7,7 @@
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
handlers:
- - include: roles/openshift_master/handlers/main.yml
+ - include: ../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index c13417714..0801c41ff 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -71,17 +71,18 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
- role: calico
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
- role: nuage_node
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: contiv
contiv_role: netplugin
- when: openshift.common.use_contiv | bool
+ when: openshift_use_contiv | default(false) | bool
- role: nickhammond.logrotate
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ when: not openshift_node_bootstrap | default(False)
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}