summaryrefslogtreecommitdiffstats
path: root/roles/openshift_aws
diff options
context:
space:
mode:
authorScott Dodson <sdodson@redhat.com>2017-09-07 16:32:56 -0400
committerGitHub <noreply@github.com>2017-09-07 16:32:56 -0400
commit7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8 (patch)
treeb900625cbb8a97af4d2cf0d19d452bd643a9e0ec /roles/openshift_aws
parentdc0e3d218ba953e1bc1525ef337f99677deee6c3 (diff)
parentefe86b44bce679db38cca654818dc3837bb05f6a (diff)
downloadopenshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.tar.gz
openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.tar.bz2
openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.tar.xz
openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.zip
Merge pull request #5211 from kwoodson/provisioning_fixes
Provisioning updates.
Diffstat (limited to 'roles/openshift_aws')
-rw-r--r--roles/openshift_aws/README.md84
-rw-r--r--roles/openshift_aws/defaults/main.yml209
-rw-r--r--roles/openshift_aws/filter_plugins/filters.py28
-rw-r--r--roles/openshift_aws/meta/main.yml3
-rw-r--r--roles/openshift_aws/tasks/ami_copy.yml34
-rw-r--r--roles/openshift_aws/tasks/build_ami.yml48
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml34
-rw-r--r--roles/openshift_aws/tasks/elb.yml68
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml29
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml45
-rw-r--r--roles/openshift_aws/tasks/provision.yml54
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml66
-rw-r--r--roles/openshift_aws/tasks/s3.yml7
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml32
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml49
-rw-r--r--roles/openshift_aws/tasks/security_group.yml45
-rw-r--r--roles/openshift_aws/tasks/ssh_keys.yml8
-rw-r--r--roles/openshift_aws/tasks/vpc.yml52
18 files changed, 895 insertions, 0 deletions
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
new file mode 100644
index 000000000..696efbea5
--- /dev/null
+++ b/roles/openshift_aws/README.md
@@ -0,0 +1,84 @@
+openshift_aws
+==================================
+
+Provision AWS infrastructure helpers.
+
+Requirements
+------------
+
+* Ansible 2.3
+* Boto
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value
+|---------------------------------------------------|-----------------------
+| openshift_aws_clusterid | default
+| openshift_aws_elb_scheme | internet-facing
+| openshift_aws_launch_config_bootstrap_token | ''
+| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
+| openshift_aws_ami_copy_wait | False
+| openshift_aws_users | []
+| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_create_vpc | False
+| openshift_aws_node_group_type | master
+| openshift_aws_elb_cert_arn | ''
+| openshift_aws_kubernetes_cluster_status | owned
+| openshift_aws_s3_mode | create
+| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_ssh_keys | False
+| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
+| openshift_aws_use_custom_ami | False
+| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
+| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
+| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
+| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
+| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_security_groups | False
+| openshift_aws_create_iam_cert | False
+| openshift_aws_create_scale_group | True
+| openshift_aws_ami_encrypt | False
+| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
+| openshift_aws_region | us-east-1
+| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_elb_idle_timout | 400
+| openshift_aws_subnet_name | us-east-1c
+| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
+| openshift_aws_create_launch_config | True
+| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
+| openshift_aws_ami_name | openshift-gi
+| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
+|
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+```yaml
+- include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ vars:
+ openshift_aws_clusterid: test
+ openshift_aws_region: us-east-1
+ openshift_aws_create_vpc: true
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
new file mode 100644
index 000000000..4e7f54f79
--- /dev/null
+++ b/roles/openshift_aws/defaults/main.yml
@@ -0,0 +1,209 @@
+---
+openshift_aws_create_vpc: True
+openshift_aws_create_s3: True
+openshift_aws_create_iam_cert: True
+openshift_aws_create_security_groups: True
+openshift_aws_create_launch_config: True
+openshift_aws_create_scale_group: True
+openshift_aws_kubernetes_cluster_status: owned # or shared
+openshift_aws_node_group_type: master
+
+openshift_aws_wait_for_ssh: True
+
+openshift_aws_clusterid: default
+openshift_aws_region: us-east-1
+openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
+openshift_aws_iam_cert_path: ''
+openshift_aws_iam_cert_chain_path: ''
+openshift_aws_iam_cert_key_path: ''
+openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
+
+openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+openshift_aws_ami: ''
+openshift_aws_ami_copy_wait: False
+openshift_aws_ami_encrypt: False
+openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}"
+openshift_aws_ami_name: openshift-gi
+openshift_aws_base_ami_name: ami_base
+
+openshift_aws_launch_config_bootstrap_token: ''
+openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}"
+
+openshift_aws_users: []
+
+openshift_aws_ami_tags:
+ bootstrap: "true"
+ openshift-created: "true"
+ clusterid: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_s3_mode: create
+openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
+
+openshift_aws_elb_health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+
+openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_idle_timout: 400
+openshift_aws_elb_scheme: internet-facing
+openshift_aws_elb_cert_arn: ''
+
+openshift_aws_elb_listeners:
+ master:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: 443
+ - protocol: ssl
+ load_balancer_port: 443
+ instance_protocol: ssl
+ instance_port: 443
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ internal:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+
+openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+
+openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+
+openshift_aws_node_group_config:
+ tags: "{{ openshift_aws_node_group_config_tags }}"
+ master:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+ compute:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 100
+ desired_size: 3
+ tags:
+ host-type: node
+ sub-host-type: compute
+ infra:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 2
+ max_size: 20
+ desired_size: 2
+ tags:
+ host-type: node
+ sub-host-type: infra
+
+openshift_aws_elb_security_groups:
+- "{{ openshift_aws_clusterid }}"
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+
+openshift_aws_elb_instance_filter:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+
+openshift_aws_node_security_groups:
+ default:
+ name: "{{ openshift_aws_clusterid }}"
+ desc: "{{ openshift_aws_clusterid }} default"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ from_port: all
+ to_port: all
+ group_name: "{{ openshift_aws_clusterid }}"
+ master:
+ name: "{{ openshift_aws_clusterid }}_master"
+ desc: "{{ openshift_aws_clusterid }} master instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ compute:
+ name: "{{ openshift_aws_clusterid }}_compute"
+ desc: "{{ openshift_aws_clusterid }} compute node instances"
+ infra:
+ name: "{{ openshift_aws_clusterid }}_infra"
+ desc: "{{ openshift_aws_clusterid }} infra node instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 30000
+ to_port: 32000
+ cidr_ip: 0.0.0.0/0
+ etcd:
+ name: "{{ openshift_aws_clusterid }}_etcd"
+ desc: "{{ openshift_aws_clusterid }} etcd instances"
+
+openshift_aws_vpc_tags:
+ Name: "{{ openshift_aws_vpc_name }}"
+
+openshift_aws_subnet_name: us-east-1c
+
+openshift_aws_vpc:
+ name: "{{ openshift_aws_vpc_name }}"
+ cidr: 172.31.0.0/16
+ subnets:
+ us-east-1:
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py
new file mode 100644
index 000000000..06e1f9602
--- /dev/null
+++ b/roles/openshift_aws/filter_plugins/filters.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_aws
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_aws role'''
+
+ @staticmethod
+ def build_instance_tags(clusterid, status='owned'):
+ ''' This function will return a dictionary of the instance tags.
+
+ The main desire to have this inside of a filter_plugin is that we
+ need to build the following key.
+
+ {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+
+ '''
+ tags = {'clusterid': clusterid,
+ 'kubernetes.io/cluster/{}'.format(clusterid): status}
+
+ return tags
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'build_instance_tags': self.build_instance_tags}
diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml
new file mode 100644
index 000000000..875efcb8f
--- /dev/null
+++ b/roles/openshift_aws/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_utils
diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml
new file mode 100644
index 000000000..07020dd62
--- /dev/null
+++ b/roles/openshift_aws/tasks/ami_copy.yml
@@ -0,0 +1,34 @@
+---
+- fail:
+ msg: "{{ item }} needs to be defined"
+ when: item is not defined
+ with_items:
+ - openshift_aws_ami_copy_src_ami
+ - openshift_aws_ami_copy_name
+
+- name: Create IAM KMS key with alias
+ oo_iam_kms:
+ state: present
+ alias: "{{ openshift_aws_iam_kms_alias }}"
+ region: "{{ openshift_aws_region }}"
+ register: created_kms
+
+- debug: var=created_kms.results
+
+- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}"
+ ec2_ami_copy:
+ name: "{{ openshift_aws_ami_copy_name }}"
+ region: "{{ openshift_aws_region }}"
+ source_region: "{{ openshift_aws_ami_copy_src_region }}"
+ source_image_id: "{{ openshift_aws_ami_copy_src_ami }}"
+ encrypted: "{{ openshift_aws_ami_encrypt | bool }}"
+ kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}"
+ wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ register: copy_result
+
+- debug: var=copy_result
+
+- name: return AMI ID with setfact
+ set_fact:
+ openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml
new file mode 100644
index 000000000..8d4e5ac43
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_ami.yml
@@ -0,0 +1,48 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create a vpc
+ include: vpc.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_security_groups | bool
+ name: Create compute security_groups
+ include: security_group.yml
+
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: create instance for ami creation
+ ec2:
+ assign_public_ip: yes
+ region: "{{ openshift_aws_region }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ group: "{{ openshift_aws_clusterid }}"
+ instance_type: m4.xlarge
+ vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ image: "{{ openshift_aws_base_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_type: gp2
+ volume_size: 100
+ delete_on_termination: true
+ wait: yes
+ exact_count: 1
+ count_tag:
+ Name: "{{ openshift_aws_base_ami_name }}"
+ instance_tags:
+ Name: "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
new file mode 100644
index 000000000..0dac1c23d
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -0,0 +1,34 @@
+---
+# When openshift_aws_use_custom_ami is '' then
+# we retrieve the latest build AMI.
+# Then set openshift_aws_ami to the ami.
+- when: openshift_aws_ami == ''
+ block:
+ - name: fetch recently created AMI
+ ec2_ami_find:
+ region: "{{ openshift_aws_region }}"
+ sort: creationDate
+ sort_order: descending
+ name: "{{ openshift_aws_ami_name }}*"
+ ami_tags: "{{ openshift_aws_ami_tags }}"
+ no_result_action: fail
+ register: amiout
+
+ - name: Set the openshift_aws_ami
+ set_fact:
+ openshift_aws_ami: "{{ amiout.results[0].ami_id }}"
+ when:
+ - "'results' in amiout"
+ - amiout.results|length > 0
+
+- when: openshift_aws_create_security_groups
+ name: "Create {{ openshift_aws_node_group_type }} security groups"
+ include: security_group.yml
+
+- when: openshift_aws_create_launch_config
+ name: "Create {{ openshift_aws_node_group_type }} launch config"
+ include: launch_config.yml
+
+- when: openshift_aws_create_scale_group
+ name: "Create {{ openshift_aws_node_group_type }} node group"
+ include: scale_group.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
new file mode 100644
index 000000000..a1fdd66fc
--- /dev/null
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -0,0 +1,68 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: debug
+ debug: var=vpcout
+
+- name: fetch the remote instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters: "{{ openshift_aws_elb_instance_filter }}"
+ register: instancesout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name:
+ debug:
+ msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+
+- name: "Create ELB {{ openshift_aws_elb_name }}"
+ ec2_elb_lb:
+ name: "{{ openshift_aws_elb_name }}"
+ state: present
+ security_group_names: "{{ openshift_aws_elb_security_groups }}"
+ idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+ region: "{{ openshift_aws_region }}"
+ subnets:
+ - "{{ subnetout.subnets[0].id }}"
+ health_check: "{{ openshift_aws_elb_health_check }}"
+ listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+ scheme: "{{ openshift_aws_elb_scheme }}"
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ register: new_elb
+
+# It is necessary to ignore_errors here because the instances are not in 'ready'
+# state when first added to ELB
+- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+ ec2_elb:
+ instance_id: "{{ item.id }}"
+ ec2_elbs: "{{ openshift_aws_elb_name }}"
+ state: present
+ region: "{{ openshift_aws_region }}"
+ wait: False
+ with_items: "{{ instancesout.instances }}"
+ ignore_errors: True
+ retries: 10
+ register: elb_call
+ until: elb_call|succeeded
+
+- debug:
+ msg: "{{ item }}"
+ with_items:
+ - "{{ new_elb }}"
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
new file mode 100644
index 000000000..cd9772a25
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -0,0 +1,29 @@
+---
+- name: upload certificates to AWS IAM
+ iam_cert23:
+ state: present
+ name: "{{ openshift_aws_iam_cert_name }}"
+ cert: "{{ openshift_aws_iam_cert_path }}"
+ key: "{{ openshift_aws_iam_cert_key_path }}"
+ cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}"
+ register: elb_cert_chain
+ failed_when:
+ - "'failed' in elb_cert_chain"
+ - elb_cert_chain.failed
+ - "'msg' in elb_cert_chain"
+ - "'already exists and has a different certificate body' in elb_cert_chain.msg"
+ - "'BotoServerError' in elb_cert_chain.msg"
+ when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
+
+- name: set_fact openshift_aws_elb_cert_arn
+ set_fact:
+ openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+
+- name: wait for cert to propagate
+ pause:
+ seconds: 5
+ when: elb_cert_chain.changed
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
new file mode 100644
index 000000000..65c5a6cc0
--- /dev/null
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -0,0 +1,45 @@
+---
+- fail:
+ msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
+ when:
+ - openshift_aws_ami is undefined
+
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_launch_config_name }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami }}"
+ instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
+ security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+ user_data: |-
+ #cloud-config
+ {% if openshift_aws_node_group_type != 'master' %}
+ write_files:
+ - path: /root/csr_kubeconfig
+ owner: root:root
+ permissions: '0640'
+ content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
+ - path: /root/openshift_settings
+ owner: root:root
+ permissions: '0640'
+ content:
+ openshift_type: "{{ openshift_aws_node_group_type }}"
+ runcmd:
+ - [ systemctl, enable, atomic-openshift-node]
+ - [ systemctl, start, atomic-openshift-node]
+ {% endif %}
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
new file mode 100644
index 000000000..189caeaee
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -0,0 +1,54 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create default vpc
+ include: vpc.yml
+
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include: iam_cert.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_s3 | bool
+ name: create s3 bucket for registry
+ include: s3.yml
+
+- name: include scale group creation for master
+ include: build_node_group.yml
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: create our master internal load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: internal
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+ openshift_aws_elb_scheme: internal
+
+- name: create our master external load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: external
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+ openshift_aws_elb_scheme: internet-facing
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+ when: openshift_aws_wait_for_ssh | bool
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
new file mode 100644
index 000000000..fc4996c68
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -0,0 +1,66 @@
+---
+# Get bootstrap config token
+# bootstrap should be created on first master
+# need to fetch it and shove it into cloud data
+- name: fetch master instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: slurp down the bootstrap.kubeconfig
+ slurp:
+ src: /etc/origin/master/bootstrap.kubeconfig
+ delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
+ remote_user: root
+ register: bootstrap
+
+- name: set_fact for kubeconfig token
+ set_fact:
+ openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
+
+- name: include build node group for infra
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: infra
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}"
+
+- name: include build node group for compute
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}"
+
+- when: openshift_aws_wait_for_ssh | bool
+ block:
+ - name: pause and allow for instances to scale before we query them
+ pause:
+ seconds: 10
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": node
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
new file mode 100644
index 000000000..9cf37c840
--- /dev/null
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -0,0 +1,7 @@
+---
+- name: Create an s3 bucket
+ s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: "{{ openshift_aws_s3_mode }}"
+ region: "{{ openshift_aws_region }}"
+ when: openshift_aws_create_s3 | bool
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
new file mode 100644
index 000000000..3e969fc43
--- /dev/null
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -0,0 +1,32 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the subnet to use in scale group
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: Create the scale group
+ ec2_asg:
+ name: "{{ openshift_aws_scale_group_name }}"
+ launch_config_name: "{{ openshift_aws_launch_config_name }}"
+ health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}"
+ health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}"
+ min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}"
+ max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}"
+ desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}"
+ region: "{{ openshift_aws_region }}"
+ termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+ vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ tags:
+ - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
new file mode 100644
index 000000000..0cb749dcc
--- /dev/null
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -0,0 +1,49 @@
+---
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_base_ami_name }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: bundle ami
+ ec2_ami:
+ instance_id: "{{ instancesout.instances.0.id }}"
+ region: "{{ openshift_aws_region }}"
+ state: present
+ description: "This was provisioned {{ ansible_date_time.iso8601 }}"
+ name: "{{ openshift_aws_ami_name }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ wait: yes
+ register: amioutput
+
+- debug: var=amioutput
+
+- when: openshift_aws_ami_encrypt | bool
+ block:
+ - name: augment the encrypted ami tags with source-ami
+ set_fact:
+ source_tag:
+ source-ami: "{{ amioutput.image_id }}"
+
+ - name: copy the ami for encrypted disks
+ include: ami_copy.yml
+ vars:
+ openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
+ openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
+ # TODO: How does the kms alias get passed to ec2_ami_copy
+ openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+ openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}"
+ # this option currently fails due to boto waiters
+ # when supported this need to be reapplied
+ #openshift_aws_ami_copy_wait: True
+
+- name: terminate temporary instance
+ ec2:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ instance_ids: "{{ instancesout.instances.0.id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
new file mode 100644
index 000000000..161e72fb4
--- /dev/null
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -0,0 +1,45 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+
+- name: Create default security group for cluster
+ ec2_group:
+ name: "{{ openshift_aws_node_security_groups.default.name }}"
+ description: "{{ openshift_aws_node_security_groups.default.desc }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}"
+ register: sg_default_created
+
+- name: create the node group sgs
+ ec2_group:
+ name: "{{ item.name}}"
+ description: "{{ item.desc }}"
+ rules: "{{ item.rules if 'rules' in item else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: create the k8s sgs for the node group
+ ec2_group:
+ name: "{{ item.name }}_k8s"
+ description: "{{ item.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: k8s_sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/ssh_keys.yml b/roles/openshift_aws/tasks/ssh_keys.yml
new file mode 100644
index 000000000..f439ce74e
--- /dev/null
+++ b/roles/openshift_aws/tasks/ssh_keys.yml
@@ -0,0 +1,8 @@
+---
+- name: Add the public keys for the users
+ ec2_key:
+ name: "{{ item.key_name }}"
+ key_material: "{{ item.pub_key }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
diff --git a/roles/openshift_aws/tasks/vpc.yml b/roles/openshift_aws/tasks/vpc.yml
new file mode 100644
index 000000000..ce2c8eac5
--- /dev/null
+++ b/roles/openshift_aws/tasks/vpc.yml
@@ -0,0 +1,52 @@
+---
+- name: Create AWS VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ dns_support: True
+ dns_hostnames: True
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_vpc_tags }}"
+ register: vpc
+
+- name: Sleep to avoid a race condition when creating the vpc
+ pause:
+ seconds: 5
+ when: vpc.changed
+
+- name: assign the vpc igw
+ ec2_vpc_igw:
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ register: igw
+
+- name: assign the vpc subnets
+ ec2_vpc_subnet:
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ resource_tags:
+ Name: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+- name: Grab the route tables from our VPC
+ ec2_vpc_route_table_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ vpc-id: "{{ vpc.vpc.id }}"
+ register: route_table
+
+- name: update the route table in the vpc
+ ec2_vpc_route_table:
+ lookup: id
+ route_table_id: "{{ route_table.route_tables[0].id }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ region: "{{ openshift_aws_region }}"
+ tags:
+ Name: "{{ openshift_aws_vpc_name }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ register: route_table_out