diff options
Diffstat (limited to 'roles')
38 files changed, 1724 insertions, 59 deletions
diff --git a/roles/lib_utils/library/iam_cert23.py b/roles/lib_utils/library/iam_cert23.py new file mode 100644 index 000000000..07b3d3bdf --- /dev/null +++ b/roles/lib_utils/library/iam_cert23.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# pylint: skip-file +# flake8: noqa +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_cert +short_description: Manage server certificates for use on ELBs and CloudFront +description: + - Allows for the management of server certificates +version_added: "2.0" +options: + name: + description: + - Name of certificate to add, update or remove. + required: true + new_name: + description: + - When state is present, this will update the name of the cert. + - The cert, key and cert_chain parameters will be ignored if this is defined. + new_path: + description: + - When state is present, this will update the path of the cert. + - The cert, key and cert_chain parameters will be ignored if this is defined. + state: + description: + - Whether to create(or update) or delete certificate. + - If new_path or new_name is defined, specifying present will attempt to make an update these. + required: true + choices: [ "present", "absent" ] + path: + description: + - When creating or updating, specify the desired path of the certificate. + default: "/" + cert_chain: + description: + - The path to, or content of the CA certificate chain in PEM encoded format. + As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + cert: + description: + - The path to, or content of the certificate body in PEM encoded format. + As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + key: + description: + - The path to, or content of the private key in PEM encoded format. + As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. + dup_ok: + description: + - By default the module will not upload a certificate that is already uploaded into AWS. + If set to True, it will upload the certificate as long as the name is unique. + default: False + + +requirements: [ "boto" ] +author: Jonathan I. Davila +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Basic server certificate upload from local file +- iam_cert: + name: very_ssl + state: present + cert: "{{ lookup('file', 'path/to/cert') }}" + key: "{{ lookup('file', 'path/to/key') }}" + cert_chain: "{{ lookup('file', 'path/to/certchain') }}" + +# Basic server certificate upload +- iam_cert: + name: very_ssl + state: present + cert: path/to/cert + key: path/to/key + cert_chain: path/to/certchain + +# Server certificate upload using key string +- iam_cert: + name: very_ssl + state: present + path: "/a/cert/path/" + cert: body_of_somecert + key: vault_body_of_privcertkey + cert_chain: body_of_myverytrustedchain + +# Basic rename of existing certificate +- iam_cert: + name: very_ssl + new_name: new_very_ssl + state: present + +''' +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws +import os + +try: + import boto + import boto.iam + import boto.ec2 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + + +def cert_meta(iam, name): + certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate + ocert = certificate.certificate_body + opath = certificate.server_certificate_metadata.path + ocert_id = certificate.server_certificate_metadata.server_certificate_id + upload_date = certificate.server_certificate_metadata.upload_date + exp = certificate.server_certificate_metadata.expiration + arn = certificate.server_certificate_metadata.arn + return opath, ocert, ocert_id, upload_date, exp, arn + + +def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): + update = False + + # IAM cert names are case insensitive + names_lower = [n.lower() for n in [name, new_name] if n is not None] + orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names] + + if any(ct in orig_cert_names_lower for ct in names_lower): + for i_name in names_lower: + if cert is not None: + try: + c_index = orig_cert_names_lower.index(i_name) + except NameError: + continue + else: + # NOTE: remove the carriage return to strictly compare the cert bodies. + slug_cert = cert.replace('\r', '') + slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '') + if slug_orig_cert_bodies == slug_cert: + update = True + break + elif slug_cert.startswith(slug_orig_cert_bodies): + update = True + break + elif slug_orig_cert_bodies != slug_cert: + module.fail_json(changed=False, msg='A cert with the name %s already exists and' + ' has a different certificate body associated' + ' with it. Certificates cannot have the same name' % orig_cert_names[c_index]) + else: + update = True + break + elif cert in orig_cert_bodies and not dup_ok: + for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): + if crt_body == cert: + module.fail_json(changed=False, msg='This certificate already' + ' exists under the name %s' % crt_name) + + return update + + +def cert_action(module, iam, name, cpath, new_name, new_path, state, + cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok): + if state == 'present': + update = dup_check(module, iam, name, new_name, cert, orig_cert_names, + orig_cert_bodies, dup_ok) + if update: + opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) + changed = True + if new_name and new_path: + iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + elif new_name and not new_path: + iam.update_server_cert(name, new_cert_name=new_name) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + elif not new_name and new_path: + iam.update_server_cert(name, new_path=new_path) + module.exit_json(changed=changed, name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + else: + changed = False + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn, + msg='No new path or name specified. No changes made') + else: + changed = True + iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath) + opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, arn=arn) + elif state == 'absent': + if name in orig_cert_names: + changed = True + iam.delete_server_cert(name) + module.exit_json(changed=changed, deleted_cert=name) + else: + changed = False + module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name) + + +def load_data(cert, key, cert_chain): + # if paths are provided rather than lookups read the files and return the contents + if cert and os.path.isfile(cert): + cert = open(cert, 'r').read().rstrip() + if key and os.path.isfile(key): + key = open(key, 'r').read().rstrip() + if cert_chain and os.path.isfile(cert_chain): + cert_chain = open(cert_chain, 'r').read() + return cert, key, cert_chain + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(), + cert=dict(), + key=dict(no_log=True), + cert_chain=dict(), + new_name=dict(), + path=dict(default='/'), + new_path=dict(), + dup_ok=dict(type='bool') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['new_path', 'key'], + ['new_path', 'cert'], + ['new_path', 'cert_chain'], + ['new_name', 'key'], + ['new_name', 'cert'], + ['new_name', 'cert_chain'], + ], + ) + + if not HAS_BOTO: + module.fail_json(msg="Boto is required for this module") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + try: + if region: + iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) + else: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + name = module.params.get('name') + path = module.params.get('path') + new_name = module.params.get('new_name') + new_path = module.params.get('new_path') + dup_ok = module.params.get('dup_ok') + if state == 'present' and not new_name and not new_path: + cert, key, cert_chain = load_data(cert=module.params.get('cert'), + key=module.params.get('key'), + cert_chain=module.params.get('cert_chain')) + else: + cert = key = cert_chain = None + + orig_cert_names = [ctb['server_certificate_name'] for ctb in + iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list] + orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body + for thing in orig_cert_names] + if new_name == name: + new_name = None + if new_path == path: + new_path = None + + changed = False + try: + cert_action(module, iam, name, path, new_name, new_path, state, + cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok) + except boto.exception.BotoServerError as err: + module.fail_json(changed=changed, msg=str(err), debug=[cert, key]) + + +if __name__ == '__main__': + main() diff --git a/roles/lib_utils/library/oo_iam_kms.py b/roles/lib_utils/library/oo_iam_kms.py new file mode 100644 index 000000000..c85745f01 --- /dev/null +++ b/roles/lib_utils/library/oo_iam_kms.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +''' +ansible module for creating AWS IAM KMS keys +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# AWS IAM KMS ansible module +# +# +# Copyright 2016 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Jenkins environment doesn't have all the required libraries +# pylint: disable=import-error +import time +import boto3 +# Ansible modules need this wildcard import +# pylint: disable=unused-wildcard-import, wildcard-import, redefined-builtin +from ansible.module_utils.basic import AnsibleModule + +AWS_ALIAS_URL = "http://docs.aws.amazon.com/kms/latest/developerguide/programming-aliases.html" + + +class AwsIamKms(object): + ''' + ansible module for AWS IAM KMS + ''' + + def __init__(self): + ''' constructor ''' + self.module = None + self.kms_client = None + self.aliases = None + + @staticmethod + def valid_alias_name(user_alias): + ''' AWS KMS aliases must start with 'alias/' ''' + valid_start = 'alias/' + if user_alias.startswith(valid_start): + return True + + return False + + def get_all_kms_info(self): + '''fetch all kms info and return them + + list_keys doesn't have information regarding aliases + list_aliases doesn't have the full kms arn + + fetch both and join them on the targetKeyId + ''' + aliases = self.kms_client.list_aliases()['Aliases'] + keys = self.kms_client.list_keys()['Keys'] + + for alias in aliases: + for key in keys: + if 'TargetKeyId' in alias and 'KeyId' in key: + if alias['TargetKeyId'] == key['KeyId']: + alias.update(key) + + return aliases + + def get_kms_entry(self, user_alias, alias_list): + ''' return single alias details from list of aliases ''' + for alias in alias_list: + if user_alias == alias.get('AliasName', False): + return alias + + msg = "Did not find alias {}".format(user_alias) + self.module.exit_json(failed=True, results=msg) + + @staticmethod + def exists(user_alias, alias_list): + ''' Check if KMS alias already exists ''' + for alias in alias_list: + if user_alias == alias.get('AliasName'): + return True + + return False + + def main(self): + ''' entry point for module ''' + + self.module = AnsibleModule( + argument_spec=dict( + state=dict(default='list', choices=['list', 'present'], type='str'), + region=dict(default=None, required=True, type='str'), + alias=dict(default=None, type='str'), + # description default cannot be None + description=dict(default='', type='str'), + aws_access_key=dict(default=None, type='str'), + aws_secret_key=dict(default=None, type='str'), + ), + ) + + state = self.module.params['state'] + aws_access_key = self.module.params['aws_access_key'] + aws_secret_key = self.module.params['aws_secret_key'] + if aws_access_key and aws_secret_key: + boto3.setup_default_session(aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key, + region_name=self.module.params['region']) + else: + boto3.setup_default_session(region_name=self.module.params['region']) + + self.kms_client = boto3.client('kms') + + aliases = self.get_all_kms_info() + + if state == 'list': + if self.module.params['alias'] is not None: + user_kms = self.get_kms_entry(self.module.params['alias'], + aliases) + self.module.exit_json(changed=False, results=user_kms, + state="list") + else: + self.module.exit_json(changed=False, results=aliases, + state="list") + + if state == 'present': + + # early sanity check to make sure the alias name conforms with + # AWS alias name requirements + if not self.valid_alias_name(self.module.params['alias']): + self.module.exit_json(failed=True, changed=False, + results="Alias must start with the prefix " + + "'alias/'. Please see " + AWS_ALIAS_URL, + state='present') + + if not self.exists(self.module.params['alias'], aliases): + # if we didn't find it, create it + response = self.kms_client.create_key(KeyUsage='ENCRYPT_DECRYPT', + Description=self.module.params['description']) + kid = response['KeyMetadata']['KeyId'] + response = self.kms_client.create_alias(AliasName=self.module.params['alias'], + TargetKeyId=kid) + # sleep for a bit so that the KMS data can be queried + time.sleep(10) + # get details for newly created KMS entry + new_alias_list = self.kms_client.list_aliases()['Aliases'] + user_kms = self.get_kms_entry(self.module.params['alias'], + new_alias_list) + + self.module.exit_json(changed=True, results=user_kms, + state='present') + + # already exists, normally we would check whether we need to update it + # but this module isn't written to allow changing the alias name + # or changing whether the key is enabled/disabled + user_kms = self.get_kms_entry(self.module.params['alias'], aliases) + self.module.exit_json(changed=False, results=user_kms, + state="present") + + self.module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + + +if __name__ == '__main__': + AwsIamKms().main() diff --git a/roles/openshift_ami_prep/defaults/main.yml b/roles/openshift_ami_prep/defaults/main.yml new file mode 100644 index 000000000..2ba6d8eae --- /dev/null +++ b/roles/openshift_ami_prep/defaults/main.yml @@ -0,0 +1,50 @@ +--- + + +r_openshift_ami_prep_packages: +- atomic-openshift-master +- atomic-openshift-node +- atomic-openshift-docker-excluder +- atomic-openshift-sdn-ovs +- openvswitch +- docker +- etcd +#- pcs +- haproxy +- dnsmasq +- ntp +- logrotate +- httpd-tools +- bind +- firewalld +- libselinux-python +- conntrack-tools +- openssl +- cloud-init +- iproute +- python-dbus +- PyYAML +- yum-utils +- python2-boto +- python2-boto3 +- cloud-utils-growpart +# gluster +- glusterfs-fuse +- heketi-client +# nfs +- nfs-utils +- flannel +- bash-completion +# cockpit +- cockpit-ws +- cockpit-system +- cockpit-bridge +- cockpit-docker +# iscsi +- iscsi-initiator-utils +# ceph +- ceph-common +# systemcontainer +# - runc +# - container-selinux +# - atomic diff --git a/roles/openshift_ami_prep/tasks/main.yml b/roles/openshift_ami_prep/tasks/main.yml new file mode 100644 index 000000000..98f7bc0e2 --- /dev/null +++ b/roles/openshift_ami_prep/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: install repositories + include: yum_repos.yml + static: yes + +- name: install needed rpm(s) + package: + name: "{{ item }}" + state: present + with_items: "{{ r_openshift_ami_prep_packages }}" + +- name: create the directory for node + file: + state: directory + path: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d" + +- name: laydown systemd override + copy: + dest: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d/override.conf" + content: | + [Unit] + After=cloud-init.service + +- name: update the sysconfig to have KUBECONFIG + lineinfile: + dest: "/etc/sysconfig/{{ r_openshift_ami_prep_node }}" + line: "KUBECONFIG=/root/csr_kubeconfig" + regexp: "^KUBECONFIG=.*" + +- name: update the ExecStart to have bootstrap + lineinfile: + dest: "/usr/lib/systemd/system/{{ r_openshift_ami_prep_node }}.service" + line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" + regexp: "^ExecStart=.*" + +- name: systemctl enable origin-node + systemd: + name: "{{ item }}" + enabled: no + with_items: + - "{{ r_openshift_ami_prep_node }}.service" + - "{{ r_openshift_ami_prep_master }}.service" diff --git a/roles/openshift_ami_prep/tasks/yum_repos.yml b/roles/openshift_ami_prep/tasks/yum_repos.yml new file mode 100644 index 000000000..c48c67ac2 --- /dev/null +++ b/roles/openshift_ami_prep/tasks/yum_repos.yml @@ -0,0 +1,14 @@ +--- +- name: Create our install repository + yum_repository: + description: "{{ item.description | default(omit) }}" + name: "{{ item.name }}" + baseurl: "{{ item.baseurl }}" + gpgkey: "{{ item.gpgkey | default(omit)}}" + gpgcheck: "{{ item.gpgcheck | default(1) }}" + sslverify: "{{ item.sslverify | default(1) }}" + sslclientkey: "{{ item.sslclientkey | default(omit) }}" + sslclientcert: "{{ item.sslclientcert | default(omit) }}" + file: "{{ item.file }}" + enabled: "{{ item.enabled }}" + with_items: "{{ r_openshift_ami_prep_yum_repositories }}" diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md new file mode 100644 index 000000000..111818451 --- /dev/null +++ b/roles/openshift_aws_ami_copy/README.md @@ -0,0 +1,50 @@ +openshift_aws_ami_perms +========= + +Ansible role for copying an AMI + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- openshift_aws_ami_copy_src_ami: source AMI id to copy from +- openshift_aws_ami_copy_region: region where the AMI is found +- openshift_aws_ami_copy_name: name to assign to new AMI +- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption +- openshift_aws_ami_copy_tags: dict with desired tags +- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters. + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml + - name: copy the ami for encrypted disks + include_role: + name: openshift_aws_ami_copy + vars: + r_openshift_aws_ami_copy_region: us-east-1 + r_openshift_aws_ami_copy_name: myami + r_openshift_aws_ami_copy_src_ami: ami-1234 + r_openshift_aws_ami_copy_kms_arn: arn:xxxx + r_openshift_aws_ami_copy_tags: {} + r_openshift_aws_ami_copy_encrypt: False + +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml new file mode 100644 index 000000000..15444c8d0 --- /dev/null +++ b/roles/openshift_aws_ami_copy/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- fail: + msg: "{{ item }} needs to be defined" + when: "{{ item }} is not defined" + with_items: + - r_openshift_aws_ami_copy_src_ami + - r_openshift_aws_ami_copy_name + - r_openshift_aws_ami_copy_region + +- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}" + ec2_ami_copy: + region: "{{ r_openshift_aws_ami_copy_region }}" + source_region: "{{ r_openshift_aws_ami_copy_region }}" + name: "{{ r_openshift_aws_ami_copy_name }}" + source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}" + encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}" + kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}" + wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}" + tags: "{{ r_openshift_aws_ami_copy_tags }}" + register: copy_result + +- debug: var=copy_result + +- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami + set_fact: + r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md new file mode 100644 index 000000000..ecc45fa14 --- /dev/null +++ b/roles/openshift_aws_elb/README.md @@ -0,0 +1,75 @@ +openshift_aws_elb +========= + +Ansible role to provision and manage AWS ELB's for Openshift. + +Requirements +------------ + +Ansible Modules: + +- ec2_elb +- ec2_elb_lb + +python package: + +python-boto + +Role Variables +-------------- + +- r_openshift_aws_elb_instances: instances to put in ELB +- r_openshift_aws_elb_elb_name: name of elb +- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to +- r_openshift_aws_elb_region: AWS Region +- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb +```yaml + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 +``` +- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb +```yaml +- protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: 443 +- protocol: ssl + load_balancer_port: 443 + instance_protocol: ssl + instance_port: 443 + # ssl certificate required for https or ssl + ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" +``` + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +- include_role: + name: openshift_aws_elb + vars: + r_openshift_aws_elb_instances: aws_instances_to_put_in_elb + r_openshift_aws_elb_elb_name: elb_name + r_openshift_aws_elb_security_groups: security_group_names + r_openshift_aws_elb_region: aws_region + r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}" + r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}" +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml new file mode 100644 index 000000000..ed5d38079 --- /dev/null +++ b/roles/openshift_aws_elb/defaults/main.yml @@ -0,0 +1,33 @@ +--- +r_openshift_aws_elb_health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + +r_openshift_aws_elb_cert_arn: '' + +r_openshift_aws_elb_listeners: + master: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: 443 + - protocol: ssl + load_balancer_port: 443 + instance_protocol: ssl + instance_port: 443 + # ssl certificate required for https or ssl + ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" + internal: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml new file mode 100644 index 000000000..58be652a5 --- /dev/null +++ b/roles/openshift_aws_elb/meta/main.yml @@ -0,0 +1,12 @@ +--- +galaxy_info: + author: OpenShift + description: Openshift ELB provisioning + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 +dependencies: [] diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml new file mode 100644 index 000000000..64ec18545 --- /dev/null +++ b/roles/openshift_aws_elb/tasks/main.yml @@ -0,0 +1,57 @@ +--- +- name: fetch the default subnet id + ec2_remote_facts: + region: "{{ r_openshift_aws_elb_region }}" + filters: "{{ r_openshift_aws_elb_instance_filter }}" + register: instancesout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ r_openshift_aws_elb_region }}" + filters: + "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}" + register: subnetout + +- name: + debug: + msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] + if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type + else r_openshift_aws_elb_listeners }}" + +- name: "Create ELB {{ r_openshift_aws_elb_name }}" + ec2_elb_lb: + name: "{{ r_openshift_aws_elb_name }}" + state: present + security_group_names: "{{ r_openshift_aws_elb_security_groups }}" + idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}" + region: "{{ r_openshift_aws_elb_region }}" + subnets: + - "{{ subnetout.subnets[0].id }}" + health_check: "{{ r_openshift_aws_elb_health_check }}" + listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] + if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type + else r_openshift_aws_elb_listeners }}" + scheme: "{{ r_openshift_aws_elb_scheme }}" + tags: + KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}" + register: new_elb + +# It is necessary to ignore_errors here because the instances are not in 'ready' +# state when first added to ELB +- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}" + ec2_elb: + instance_id: "{{ item.id }}" + ec2_elbs: "{{ r_openshift_aws_elb_name }}" + state: present + region: "{{ r_openshift_aws_elb_region }}" + wait: False + with_items: "{{ instancesout.instances }}" + ignore_errors: True + retries: 10 + register: elb_call + until: elb_call|succeeded + +- debug: + msg: "{{ item }}" + with_items: + - "{{ new_elb }}" diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md new file mode 100644 index 000000000..9468e785c --- /dev/null +++ b/roles/openshift_aws_iam_kms/README.md @@ -0,0 +1,43 @@ +openshift_aws_iam_kms +========= + +Ansible role to create AWS IAM KMS keys for encryption + +Requirements +------------ + +Ansible Modules: + +oo_iam_kms + +Role Variables +-------------- + +- r_openshift_aws_iam_kms_region: AWS region to create KMS key +- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key + +Dependencies +------------ + +lib_utils + +Example Playbook +---------------- +```yaml +- include_role: + name: openshift_aws_iam_kms + vars: + r_openshift_aws_iam_kms_region: 'us-east-1' + r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms' +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/lib_utils/tasks/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml index ed97d539c..ed97d539c 100644 --- a/roles/lib_utils/tasks/main.yml +++ b/roles/openshift_aws_iam_kms/defaults/main.yml diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml new file mode 100644 index 000000000..e29aaf96b --- /dev/null +++ b/roles/openshift_aws_iam_kms/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: OpenShift + description: AWS IAM KMS setup and management + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- lib_utils diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml new file mode 100644 index 000000000..b541b466c --- /dev/null +++ b/roles/openshift_aws_iam_kms/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- fail: + msg: "{{ item.name }} needs to be defined." + when: "{{ item.cond }}" + with_items: + - name: "{{ r_openshift_aws_iam_kms_alias }}" + cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}" + - name: "{{ r_openshift_aws_iam_kms_region }}" + cond: "{{ r_openshift_aws_iam_kms_region is undefined }}" + +- name: Create IAM KMS key with alias + oo_iam_kms: + state: present + alias: "{{ r_openshift_aws_iam_kms_alias }}" + region: "{{ r_openshift_aws_iam_kms_region }}" + register: created_kms + +- debug: var=created_kms.results diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md new file mode 100644 index 000000000..52b7e83b6 --- /dev/null +++ b/roles/openshift_aws_launch_config/README.md @@ -0,0 +1,72 @@ +openshift_aws_launch_config +========= + +Ansible role to create an AWS launch config for a scale group. + +This includes the AMI, volumes, user_data, etc. + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- +- r_openshift_aws_launch_config_name: "{{ launch_config_name }}" +- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" +- r_openshift_aws_launch_config_region: "{{ region }}" +- r_openshift_aws_launch_config: "{{ node_group_config }}" +```yaml + master: + instance_type: m4.xlarge + ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + wait_for_instances: True +``` +- r_openshift_aws_launch_config_type: compute +- r_openshift_aws_launch_config_custom_image: ami-xxxxx +- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig> + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml + - name: create compute nodes config + include_role: + name: openshift_aws_launch_config + vars: + r_openshift_aws_launch_config_name: "{{ launch_config_name }}" + r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" + r_openshift_aws_launch_config_region: "{{ region }}" + r_openshift_aws_launch_config: "{{ node_group_config }}" + r_openshift_aws_launch_config_type: compute + r_openshift_aws_launch_config_custom_image: ami-1234 + r_openshift_aws_launch_config_bootstrap_token: abcd +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_aws_launch_config/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml new file mode 100644 index 000000000..e61670cc2 --- /dev/null +++ b/roles/openshift_aws_launch_config/meta/main.yml @@ -0,0 +1,12 @@ +--- +galaxy_info: + author: OpenShift + description: Openshift AWS VPC creation + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 2.3 + platforms: + - name: EL + versions: + - 7 +dependencies: [] diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml new file mode 100644 index 000000000..437cf1f71 --- /dev/null +++ b/roles/openshift_aws_launch_config/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: fail when params are not set + fail: + msg: Please specify the role parameters. + when: + - r_openshift_aws_launch_config_cluseterid is undefined + - r_openshift_aws_launch_config_type is undefined + - r_openshift_aws_launch_config_region is undefined + - r_openshift_aws_launch_config is undefined + +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: + - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg + - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg + - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s + region: "{{ r_openshift_aws_launch_config_region }}" + register: ec2sgs + +# Create the scale group config +- name: Create the node scale group config + ec2_lc: + name: "{{ r_openshift_aws_launch_config_name }}" + region: "{{ r_openshift_aws_launch_config_region }}" + image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}" + instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}" + security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" + user_data: |- + #cloud-config + {% if r_openshift_aws_launch_config_type != 'master' %} + write_files: + - path: /root/csr_kubeconfig + owner: root:root + permissions: '0640' + content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} + - path: /root/openshift_settings + owner: root:root + permissions: '0640' + content: + openshift_type: "{{ r_openshift_aws_launch_config_type }}" + runcmd: + - [ systemctl, enable, atomic-openshift-node] + - [ systemctl, start, atomic-openshift-node] + {% endif %} + key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}" + ebs_optimized: False + volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}" + assign_public_ip: True + register: test diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2 new file mode 100644 index 000000000..1a1e29550 --- /dev/null +++ b/roles/openshift_aws_launch_config/templates/cloud-init.j2 @@ -0,0 +1,9 @@ +{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %} +#cloud-config +write_files: +- path: /root/csr_kubeconfig + owner: root:root + permissions: '0640' + content: |- + {{ r_openshift_aws_launch_config_bootstrap_token }} +{% endif %} diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md new file mode 100644 index 000000000..c32c57bc5 --- /dev/null +++ b/roles/openshift_aws_node_group/README.md @@ -0,0 +1,77 @@ +openshift_aws_node_group +========= + +Ansible role to create an aws node group. + +This includes the security group, launch config, and scale group. + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- +```yaml +- r_openshift_aws_node_group_name: myscalegroup +- r_openshift_aws_node_group_clusterid: myclusterid +- r_openshift_aws_node_group_region: us-east-1 +- r_openshift_aws_node_group_lc_name: launch_config +- r_openshift_aws_node_group_type: master|infra|compute +- r_openshift_aws_node_group_config: "{{ node_group_config }}" +```yaml +master: + instance_type: m4.xlarge + ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + wait_for_instances: True +``` +- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" + +```yaml +us-east-1a # name of subnet +``` + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml + - name: "create {{ openshift_build_node_type }} node groups" + include_role: + name: openshift_aws_node_group + vars: + r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute" + r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" + r_openshift_aws_node_group_clusterid: "{{ clusterid }}" + r_openshift_aws_node_group_region: "{{ region }}" + r_openshift_aws_node_group_config: "{{ node_group_config }}" + r_openshift_aws_node_group_type: compute + r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml new file mode 100644 index 000000000..44c5116a1 --- /dev/null +++ b/roles/openshift_aws_node_group/defaults/main.yml @@ -0,0 +1,58 @@ +--- +r_openshift_aws_node_group_type: master + +r_openshift_aws_node_group_config: + tags: + clusterid: "{{ r_openshift_aws_node_group_clusterid }}" + master: + instance_type: m4.xlarge + ami: "{{ r_openshift_aws_node_group_ami }}" + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + wait_for_instances: True + compute: + instance_type: m4.xlarge + ami: "{{ r_openshift_aws_node_group_ami }}" + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: True + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 100 + desired_size: 3 + tags: + host-type: node + sub-host-type: compute + infra: + instance_type: m4.xlarge + ami: "{{ r_openshift_aws_node_group_ami }}" + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: True + health_check: + period: 60 + type: EC2 + min_size: 2 + max_size: 20 + desired_size: 2 + tags: + host-type: node + sub-host-type: infra diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml new file mode 100644 index 000000000..6f5364b03 --- /dev/null +++ b/roles/openshift_aws_node_group/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: validate role inputs + fail: + msg: Please pass in the required role variables + when: + - r_openshift_aws_node_group_clusterid is not defined + - r_openshift_aws_node_group_region is not defined + - r_openshift_aws_node_group_subnet_name is not defined + +- name: fetch the subnet to use in scale group + ec2_vpc_subnet_facts: + region: "{{ r_openshift_aws_node_group_region }}" + filters: + "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}" + register: subnetout + +- name: Create the scale group + ec2_asg: + name: "{{ r_openshift_aws_node_group_name }}" + launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}" + health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}" + health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}" + min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}" + max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}" + desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}" + region: "{{ r_openshift_aws_node_group_region }}" + termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" + load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" + wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}" + vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" + tags: + - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md new file mode 100644 index 000000000..afafe61cf --- /dev/null +++ b/roles/openshift_aws_s3/README.md @@ -0,0 +1,43 @@ +openshift_aws_s3 +========= + +Ansible role to create an s3 bucket + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_s3_clusterid: myclusterid +- r_openshift_aws_s3_region: us-east-1 +- r_openshift_aws_s3_mode: create|delete + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +- name: create an s3 bucket + include_role: + name: openshift_aws_s3 + vars: + r_openshift_aws_s3_clusterid: mycluster + r_openshift_aws_s3_region: us-east-1 + r_openshift_aws_s3_mode: create +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml new file mode 100644 index 000000000..46bd781bd --- /dev/null +++ b/roles/openshift_aws_s3/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Create an s3 bucket + s3: + bucket: "{{ r_openshift_aws_s3_clusterid }}" + mode: "{{ r_openshift_aws_s3_mode }}" + region: "{{ r_openshift_aws_s3_region }}" diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md new file mode 100644 index 000000000..eeb76bbb6 --- /dev/null +++ b/roles/openshift_aws_sg/README.md @@ -0,0 +1,59 @@ +openshift_aws_sg +========= + +Ansible role to create an aws security groups + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_sg_clusterid: myclusterid +- r_openshift_aws_sg_region: us-east-1 +- r_openshift_aws_sg_type: master|infra|compute +```yaml +# defaults/main.yml + default: + name: "{{ r_openshift_aws_sg_clusterid }}" + desc: "{{ r_openshift_aws_sg_clusterid }} default" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: all + from_port: all + to_port: all + group_name: "{{ r_openshift_aws_sg_clusterid }}" +``` + + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +- name: create security groups for master + include_role: + name: openshift_aws_sg + vars: + r_openshift_aws_sg_clusterid: mycluster + r_openshift_aws_sg_region: us-east-1 + r_openshift_aws_sg_type: master +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml new file mode 100644 index 000000000..9c480d337 --- /dev/null +++ b/roles/openshift_aws_sg/defaults/main.yml @@ -0,0 +1,48 @@ +--- +r_openshift_aws_sg_sg: + default: + name: "{{ r_openshift_aws_sg_clusterid }}" + desc: "{{ r_openshift_aws_sg_clusterid }} default" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: all + from_port: all + to_port: all + group_name: "{{ r_openshift_aws_sg_clusterid }}" + master: + name: "{{ r_openshift_aws_sg_clusterid }}_master" + desc: "{{ r_openshift_aws_sg_clusterid }} master instances" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + compute: + name: "{{ r_openshift_aws_sg_clusterid }}_compute" + desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances" + infra: + name: "{{ r_openshift_aws_sg_clusterid }}_infra" + desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 30000 + to_port: 32000 + cidr_ip: 0.0.0.0/0 + etcd: + name: "{{ r_openshift_aws_sg_clusterid }}_etcd" + desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances" diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml new file mode 100644 index 000000000..2294fdcc9 --- /dev/null +++ b/roles/openshift_aws_sg/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Validate role inputs + fail: + msg: Please ensure to pass the correct variables + when: + - r_openshift_aws_sg_region is undefined + - r_openshift_aws_sg_region is undefined + + +- name: Fetch the VPC for vpc.id + ec2_vpc_net_facts: + region: "{{ r_openshift_aws_sg_region }}" + filters: + "tag:Name": "{{ r_openshift_aws_sg_clusterid }}" + register: vpcout + +- name: Create default security group for cluster + ec2_group: + name: "{{ r_openshift_aws_sg_sg.default.name }}" + description: "{{ r_openshift_aws_sg_sg.default.desc }}" + region: "{{ r_openshift_aws_sg_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}" + register: sg_default_created + +- name: create the node group sgs + ec2_group: + name: "{{ item.name}}" + description: "{{ item.desc }}" + rules: "{{ item.rules if 'rules' in item else [] }}" + region: "{{ r_openshift_aws_sg_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: sg_create + with_items: + - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}" + +- name: create the k8s sgs for the node group + ec2_group: + name: "{{ item.name }}_k8s" + description: "{{ item.desc }} for k8s" + region: "{{ r_openshift_aws_sg_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: k8s_sg_create + with_items: + - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}" + +- name: tag sg groups with proper tags + ec2_tag: + tags: + KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}" + resource: "{{ item.group_id }}" + region: "{{ r_openshift_aws_sg_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md new file mode 100644 index 000000000..4f8667918 --- /dev/null +++ b/roles/openshift_aws_ssh_keys/README.md @@ -0,0 +1,49 @@ +openshift_aws_ssh_keys +========= + +Ansible role for sshind SSH keys + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_ssh_keys_users: list of dicts of users +- r_openshift_aws_ssh_keys_region: ec2_region to install the keys + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +users: +- username: user1 + pub_key: <user1 ssh public key> +- username: user2 + pub_key: <user2 ssh public key> + +region: us-east-1 + +- include_role: + name: openshift_aws_ssh_keys + vars: + r_openshift_aws_ssh_keys_users: "{{ users }}" + r_openshift_aws_ssh_keys_region: "{{ region }}" +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws_ssh_keys/tasks/main.yml new file mode 100644 index 000000000..232cf20ed --- /dev/null +++ b/roles/openshift_aws_ssh_keys/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Add the public keys for the users + ec2_key: + name: "{{ item.key_name }}" + key_material: "{{ item.pub_key }}" + region: "{{ r_openshift_aws_ssh_keys_region }}" + with_items: "{{ r_openshift_aws_ssh_keys_users }}" + no_log: True diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md new file mode 100644 index 000000000..d88cf0581 --- /dev/null +++ b/roles/openshift_aws_vpc/README.md @@ -0,0 +1,62 @@ +openshift_aws_vpc +========= + +Ansible role to create a default AWS VPC + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_vpc_clusterid: "{{ clusterid }}" +- r_openshift_aws_vpc_cidr: 172.31.48.0/20 +- r_openshift_aws_vpc_subnets: "{{ subnets }}" +```yaml + subnets: + us-east-1: # These are us-east-1 region defaults. Ensure this matches your region + - cidr: 172.31.48.0/20 + az: "us-east-1c" + - cidr: 172.31.32.0/20 + az: "us-east-1e" + - cidr: 172.31.16.0/20 + az: "us-east-1a" +``` +- r_openshift_aws_vpc_region: "{{ region }}" +- r_openshift_aws_vpc_tags: dict of tags to apply to vpc +- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}" + +Dependencies +------------ + + +Example Playbook +---------------- + +```yaml + - name: create default vpc + include_role: + name: openshift_aws_vpc + vars: + r_openshift_aws_vpc_clusterid: mycluster + r_openshift_aws_vpc_cidr: 172.31.48.0/20 + r_openshift_aws_vpc_subnets: "{{ subnets }}" + r_openshift_aws_vpc_region: us-east-1 + r_openshift_aws_vpc_tags: {} + r_openshift_aws_vpc_name: mycluster + +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_aws_vpc/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws_vpc/tasks/main.yml new file mode 100644 index 000000000..cfe08dae5 --- /dev/null +++ b/roles/openshift_aws_vpc/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Create AWS VPC + ec2_vpc_net: + state: present + cidr_block: "{{ r_openshift_aws_vpc_cidr }}" + dns_support: True + dns_hostnames: True + region: "{{ r_openshift_aws_vpc_region }}" + name: "{{ r_openshift_aws_vpc_clusterid }}" + tags: + Name: "{{ r_openshift_aws_vpc_clusterid }}" + register: vpc + +- name: Sleep to avoid a race condition when creating the vpc + pause: + seconds: 5 + when: vpc.changed + +- name: assign the vpc igw + ec2_vpc_igw: + region: "{{ r_openshift_aws_vpc_region }}" + vpc_id: "{{ vpc.vpc.id }}" + register: igw + +- name: assign the vpc subnets + ec2_vpc_subnet: + region: "{{ r_openshift_aws_vpc_region }}" + vpc_id: "{{ vpc.vpc.id }}" + cidr: "{{ item.cidr }}" + az: "{{ item.az }}" + resource_tags: + Name: "{{ item.az }}" + with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}" + +- name: Grab the route tables from our VPC + ec2_vpc_route_table_facts: + region: "{{ r_openshift_aws_vpc_region }}" + filters: + vpc-id: "{{ vpc.vpc.id }}" + register: route_table + +- name: update the route table in the vpc + ec2_vpc_route_table: + lookup: id + route_table_id: "{{ route_table.route_tables[0].id }}" + vpc_id: "{{ vpc.vpc.id }}" + region: "{{ r_openshift_aws_vpc_region }}" + tags: + Name: "{{ r_openshift_aws_vpc_name }}" + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + register: route_table_out diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 13cbfb14e..f0e303e43 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -5,6 +5,9 @@ r_openshift_hosted_router_use_firewalld: False r_openshift_hosted_registry_firewall_enabled: True r_openshift_hosted_registry_use_firewalld: False +openshift_hosted_router_wait: True +openshift_hosted_registry_wait: True + registry_volume_claim: 'registry-claim' openshift_hosted_router_edits: diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index dcd9c87fc..40e6262f9 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -132,34 +132,36 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" -- name: Ensure OpenShift registry correctly rolls out (best-effort today) - command: | - oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig - async: 600 - poll: 15 - failed_when: false - -- name: Determine the latest version of the OpenShift registry deployment - command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .status.latestVersion }' - register: openshift_hosted_registry_latest_version - -- name: Sanity-check that the OpenShift registry rolled out correctly - command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' - register: openshift_hosted_registry_rc_phase - until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" - delay: 15 - retries: 40 - failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout" +- when: openshift_hosted_registry_wait + block: + - name: Ensure OpenShift registry correctly rolls out (best-effort today) + command: | + oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ + --namespace {{ openshift_hosted_registry_namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig + async: 600 + poll: 15 + failed_when: false + + - name: Determine the latest version of the OpenShift registry deployment + command: | + {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \ + --namespace {{ openshift_hosted_registry_namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .status.latestVersion }' + register: openshift_hosted_registry_latest_version + + - name: Sanity-check that the OpenShift registry rolled out correctly + command: | + {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ + --namespace {{ openshift_hosted_registry_namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' + register: openshift_hosted_registry_rc_phase + until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" + delay: 15 + retries: 40 + failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout" - include: storage/glusterfs.yml when: diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 72a1ead80..e57ed733e 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -94,36 +94,38 @@ stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" -- name: Ensure OpenShift router correctly rolls out (best-effort today) - command: | - {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ - --namespace {{ item.namespace | default('default') }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig - async: 600 - poll: 15 - with_items: "{{ openshift_hosted_routers }}" - failed_when: false +- when: openshift_hosted_router_wait + block: + - name: Ensure OpenShift router correctly rolls out (best-effort today) + command: | + {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace | default('default') }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig + async: 600 + poll: 15 + with_items: "{{ openshift_hosted_routers }}" + failed_when: false -- name: Determine the latest version of the OpenShift router deployment - command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ - --namespace {{ item.namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .status.latestVersion }' - register: openshift_hosted_routers_latest_version - with_items: "{{ openshift_hosted_routers }}" + - name: Determine the latest version of the OpenShift router deployment + command: | + {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .status.latestVersion }' + register: openshift_hosted_routers_latest_version + with_items: "{{ openshift_hosted_routers }}" -- name: Poll for OpenShift router deployment success - command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ - --namespace {{ item.0.namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' - register: openshift_hosted_router_rc_phase - until: "'Running' not in openshift_hosted_router_rc_phase.stdout" - delay: 15 - retries: 40 - failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" - with_together: - - "{{ openshift_hosted_routers }}" - - "{{ openshift_hosted_routers_latest_version.results }}" + - name: Poll for OpenShift router deployment success + command: | + {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + --namespace {{ item.0.namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' + register: openshift_hosted_router_rc_phase + until: "'Running' not in openshift_hosted_router_rc_phase.stdout" + delay: 15 + retries: 40 + failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" + with_together: + - "{{ openshift_hosted_routers }}" + - "{{ openshift_hosted_routers_latest_version.results }}" diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml new file mode 100644 index 000000000..0013f5289 --- /dev/null +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -0,0 +1,28 @@ +--- + +- name: ensure the node-bootstrap service account exists + oc_serviceaccount: + name: node-bootstrapper + namespace: openshift-infra + state: present + run_once: true + +- name: grant node-bootstrapper the correct permissions to bootstrap + oc_adm_policy_user: + namespace: openshift-infra + user: system:serviceaccount:openshift-infra:node-bootstrapper + resource_kind: cluster-role + resource_name: system:node-bootstrapper + state: present + run_once: true + +# TODO: create a module for this command. +# oc_serviceaccounts_kubeconfig +- name: create service account kubeconfig with csr rights + command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra" + register: kubeconfig_out + +- name: put service account kubeconfig into a file on disk for bootstrap + copy: + content: "{{ kubeconfig_out.stdout }}" + dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index a11471891..b80941b48 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -218,6 +218,20 @@ - restart master api - restart master controllers +- name: modify controller args + yedit: + src: /etc/origin/master/master-config.yaml + edits: + - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file + value: + - /etc/origin/master/ca.crt + - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file + value: + - /etc/origin/master/ca.key + notify: + - restart master controllers + when: openshift_master_bootstrap_enabled | default(False) + - include: set_loopback_context.yml when: - openshift.common.version_gte_3_2_or_1_2 @@ -366,3 +380,7 @@ shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster when: - l_install_result | changed + +- name: node bootstrap settings + include: bootstrap.yml + when: openshift_master_bootstrap_enabled | default(False) |