diff options
Diffstat (limited to 'roles')
104 files changed, 1062 insertions, 1359 deletions
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 451b8498f..24ca0d9f8 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -97,10 +97,16 @@          l_crio_image_prepend: "docker.io/gscrivano"          l_crio_image_name: "crio-o-fedora" -    - name: Use Centos based image when distribution is Red Hat or CentOS +    - name: Use Centos based image when distribution is CentOS        set_fact:          l_crio_image_name: "cri-o-centos" -      when: ansible_distribution in ['RedHat', 'CentOS'] +      when: ansible_distribution == "CentOS" + +    - name: Use RHEL based image when distribution is Red Hat +      set_fact: +        l_crio_image_prepend: "registry.access.redhat.com" +        l_crio_image_name: "cri-o" +      when: ansible_distribution == "RedHat"      # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504      - name: Use a testing registry if requested diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 57a84bc2c..146e5f430 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -100,18 +100,22 @@          l_docker_image_prepend: "registry.fedoraproject.org/f25"        when: ansible_distribution == 'Fedora' -    # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 -    - name: Use a testing registry if requested -      set_fact: -        l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" -      when: -        - openshift_docker_systemcontainer_image_registry_override is defined -        - openshift_docker_systemcontainer_image_registry_override != "" -      - name: Set the full image name        set_fact:          l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" +    # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 +    - name: Use a specific image if requested +      set_fact: +        l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" +      when: +        - openshift_docker_systemcontainer_image_override is defined +        - openshift_docker_systemcontainer_image_override != "" + +    # Be nice and let the user see the variable result +    - debug: +        var: l_docker_image +  # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released  - name: Pre-pull Container Engine System Container image    command: "atomic pull --storage ostree {{ l_docker_image }}" diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml index f5bcd03ee..6ed87e6c7 100644 --- a/roles/etcd_common/tasks/main.yml +++ b/roles/etcd_common/tasks/main.yml @@ -6,4 +6,4 @@  - name: Include main action task file    include: "{{ r_etcd_common_action }}.yml" -  when: '"noop" not in r_etcd_common_action' +  when: r_etcd_common_action != "noop" diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml new file mode 100644 index 000000000..a88d78235 --- /dev/null +++ b/roles/etcd_common/tasks/noop.yml @@ -0,0 +1,4 @@ +--- +# This is file is here because the usage of tags, specifically `pre_upgrade` +# breaks the functionality of this role. +# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025 diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md new file mode 100644 index 000000000..696efbea5 --- /dev/null +++ b/roles/openshift_aws/README.md @@ -0,0 +1,84 @@ +openshift_aws +================================== + +Provision AWS infrastructure helpers. + +Requirements +------------ + +* Ansible 2.3 +* Boto + +Role Variables +-------------- + +From this role: + +| Name                                              | Default value +|---------------------------------------------------|----------------------- +| openshift_aws_clusterid                           | default +| openshift_aws_elb_scheme                          | internet-facing +| openshift_aws_launch_config_bootstrap_token       | '' +| openshift_aws_node_group_config                   | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}} +| openshift_aws_ami_copy_wait                       | False +| openshift_aws_users                               | [] +| openshift_aws_launch_config_name                  | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} +| openshift_aws_create_vpc                          | False +| openshift_aws_node_group_type                     | master +| openshift_aws_elb_cert_arn                        | '' +| openshift_aws_kubernetes_cluster_status           | owned +| openshift_aws_s3_mode                             | create +| openshift_aws_vpc                                 | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'} +| openshift_aws_create_ssh_keys                     | False +| openshift_aws_iam_kms_alias                       | alias/{{ openshift_aws_clusterid }}_kms +| openshift_aws_use_custom_ami                      | False +| openshift_aws_ami_copy_src_region                 | {{ openshift_aws_region }} +| openshift_aws_s3_bucket_name                      | {{ openshift_aws_clusterid }} +| openshift_aws_elb_health_check                    | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2} +| openshift_aws_node_security_groups                | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}} +| openshift_aws_elb_security_groups                 | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}'] +| openshift_aws_vpc_tags                            | {'Name': '{{ openshift_aws_vpc_name }}'} +| openshift_aws_create_security_groups              | False +| openshift_aws_create_iam_cert                     | False +| openshift_aws_create_scale_group                  | True +| openshift_aws_ami_encrypt                         | False +| openshift_aws_node_group_config_node_volumes      | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] +| openshift_aws_elb_instance_filter                 | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'} +| openshift_aws_region                              | us-east-1 +| openshift_aws_elb_name                            | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} +| openshift_aws_elb_idle_timout                     | 400 +| openshift_aws_subnet_name                     | us-east-1c +| openshift_aws_node_group_config_tags              | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }} +| openshift_aws_create_launch_config                | True +| openshift_aws_ami_tags                            | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'} +| openshift_aws_ami_name                            | openshift-gi +| openshift_aws_node_group_config_master_volumes    | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] +| openshift_aws_vpc_name                            | {{ openshift_aws_clusterid }} +| openshift_aws_elb_listeners                       | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}} +| + + +Dependencies +------------ + + +Example Playbook +---------------- + +```yaml +- include_role: +    name: openshift_aws +    tasks_from: vpc.yml +  vars: +    openshift_aws_clusterid: test +    openshift_aws_region: us-east-1 +    openshift_aws_create_vpc: true +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml new file mode 100644 index 000000000..4e7f54f79 --- /dev/null +++ b/roles/openshift_aws/defaults/main.yml @@ -0,0 +1,209 @@ +--- +openshift_aws_create_vpc: True +openshift_aws_create_s3: True +openshift_aws_create_iam_cert: True +openshift_aws_create_security_groups: True +openshift_aws_create_launch_config: True +openshift_aws_create_scale_group: True +openshift_aws_kubernetes_cluster_status: owned  # or shared +openshift_aws_node_group_type: master + +openshift_aws_wait_for_ssh: True + +openshift_aws_clusterid: default +openshift_aws_region: us-east-1 +openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" + +openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" +openshift_aws_iam_cert_path: '' +openshift_aws_iam_cert_chain_path: '' +openshift_aws_iam_cert_key_path: '' +openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}" + +openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" +openshift_aws_ami: '' +openshift_aws_ami_copy_wait: False +openshift_aws_ami_encrypt: False +openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}" +openshift_aws_ami_name: openshift-gi +openshift_aws_base_ami_name: ami_base + +openshift_aws_launch_config_bootstrap_token: '' +openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}" + +openshift_aws_users: [] + +openshift_aws_ami_tags: +  bootstrap: "true" +  openshift-created: "true" +  clusterid: "{{ openshift_aws_clusterid }}" + +openshift_aws_s3_mode: create +openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" + +openshift_aws_elb_health_check: +  ping_protocol: tcp +  ping_port: 443 +  response_timeout: 5 +  interval: 30 +  unhealthy_threshold: 2 +  healthy_threshold: 2 + +openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_idle_timout: 400 +openshift_aws_elb_scheme: internet-facing +openshift_aws_elb_cert_arn: '' + +openshift_aws_elb_listeners: +  master: +    external: +    - protocol: tcp +      load_balancer_port: 80 +      instance_protocol: ssl +      instance_port: 443 +    - protocol: ssl +      load_balancer_port: 443 +      instance_protocol: ssl +      instance_port: 443 +      # ssl certificate required for https or ssl +      ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" +    internal: +    - protocol: tcp +      load_balancer_port: 80 +      instance_protocol: tcp +      instance_port: 80 +    - protocol: tcp +      load_balancer_port: 443 +      instance_protocol: tcp +      instance_port: 443 + +openshift_aws_node_group_config_master_volumes: +- device_name: /dev/sdb +  volume_size: 100 +  device_type: gp2 +  delete_on_termination: False + +openshift_aws_node_group_config_node_volumes: +- device_name: /dev/sdb +  volume_size: 100 +  device_type: gp2 +  delete_on_termination: True + +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" + +openshift_aws_node_group_config: +  tags: "{{ openshift_aws_node_group_config_tags }}" +  master: +    instance_type: m4.xlarge +    ami: "{{ openshift_aws_ami }}" +    volumes: "{{ openshift_aws_node_group_config_master_volumes }}" +    health_check: +      period: 60 +      type: EC2 +    min_size: 3 +    max_size: 3 +    desired_size: 3 +    tags: +      host-type: master +      sub-host-type: default +    wait_for_instances: True +  compute: +    instance_type: m4.xlarge +    ami: "{{ openshift_aws_ami }}" +    volumes: "{{ openshift_aws_node_group_config_node_volumes }}" +    health_check: +      period: 60 +      type: EC2 +    min_size: 3 +    max_size: 100 +    desired_size: 3 +    tags: +      host-type: node +      sub-host-type: compute +  infra: +    instance_type: m4.xlarge +    ami: "{{ openshift_aws_ami }}" +    volumes: "{{ openshift_aws_node_group_config_node_volumes }}" +    health_check: +      period: 60 +      type: EC2 +    min_size: 2 +    max_size: 20 +    desired_size: 2 +    tags: +      host-type: node +      sub-host-type: infra + +openshift_aws_elb_security_groups: +- "{{ openshift_aws_clusterid }}" +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" + +openshift_aws_elb_instance_filter: +  "tag:clusterid": "{{ openshift_aws_clusterid }}" +  "tag:host-type": "{{ openshift_aws_node_group_type }}" +  instance-state-name: running + +openshift_aws_node_security_groups: +  default: +    name: "{{ openshift_aws_clusterid }}" +    desc: "{{ openshift_aws_clusterid }} default" +    rules: +    - proto: tcp +      from_port: 22 +      to_port: 22 +      cidr_ip: 0.0.0.0/0 +    - proto: all +      from_port: all +      to_port: all +      group_name: "{{ openshift_aws_clusterid }}" +  master: +    name: "{{ openshift_aws_clusterid }}_master" +    desc: "{{ openshift_aws_clusterid }} master instances" +    rules: +    - proto: tcp +      from_port: 80 +      to_port: 80 +      cidr_ip: 0.0.0.0/0 +    - proto: tcp +      from_port: 443 +      to_port: 443 +      cidr_ip: 0.0.0.0/0 +  compute: +    name: "{{ openshift_aws_clusterid }}_compute" +    desc: "{{ openshift_aws_clusterid }} compute node instances" +  infra: +    name: "{{ openshift_aws_clusterid }}_infra" +    desc: "{{ openshift_aws_clusterid }} infra node instances" +    rules: +    - proto: tcp +      from_port: 80 +      to_port: 80 +      cidr_ip: 0.0.0.0/0 +    - proto: tcp +      from_port: 443 +      to_port: 443 +      cidr_ip: 0.0.0.0/0 +    - proto: tcp +      from_port: 30000 +      to_port: 32000 +      cidr_ip: 0.0.0.0/0 +  etcd: +    name: "{{ openshift_aws_clusterid }}_etcd" +    desc: "{{ openshift_aws_clusterid }} etcd instances" + +openshift_aws_vpc_tags: +  Name: "{{ openshift_aws_vpc_name }}" + +openshift_aws_subnet_name: us-east-1c + +openshift_aws_vpc: +  name: "{{ openshift_aws_vpc_name }}" +  cidr: 172.31.0.0/16 +  subnets: +    us-east-1: +    - cidr: 172.31.48.0/20 +      az: "us-east-1c" +    - cidr: 172.31.32.0/20 +      az: "us-east-1e" +    - cidr: 172.31.16.0/20 +      az: "us-east-1a" diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py new file mode 100644 index 000000000..06e1f9602 --- /dev/null +++ b/roles/openshift_aws/filter_plugins/filters.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_aws +''' + + +class FilterModule(object): +    ''' Custom ansible filters for use by openshift_aws role''' + +    @staticmethod +    def build_instance_tags(clusterid, status='owned'): +        ''' This function will return a dictionary of the instance tags. + +            The main desire to have this inside of a filter_plugin is that we +            need to build the following key. + +            {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + +        ''' +        tags = {'clusterid': clusterid, +                'kubernetes.io/cluster/{}'.format(clusterid): status} + +        return tags + +    def filters(self): +        ''' returns a mapping of filters to methods ''' +        return {'build_instance_tags': self.build_instance_tags} diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml new file mode 100644 index 000000000..875efcb8f --- /dev/null +++ b/roles/openshift_aws/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_utils diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml new file mode 100644 index 000000000..07020dd62 --- /dev/null +++ b/roles/openshift_aws/tasks/ami_copy.yml @@ -0,0 +1,34 @@ +--- +- fail: +    msg: "{{ item }} needs to be defined" +  when: item is not defined +  with_items: +  - openshift_aws_ami_copy_src_ami +  - openshift_aws_ami_copy_name + +- name: Create IAM KMS key with alias +  oo_iam_kms: +    state: present +    alias: "{{ openshift_aws_iam_kms_alias }}" +    region: "{{ openshift_aws_region }}" +  register: created_kms + +- debug: var=created_kms.results + +- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}" +  ec2_ami_copy: +    name: "{{ openshift_aws_ami_copy_name }}" +    region: "{{ openshift_aws_region }}" +    source_region: "{{ openshift_aws_ami_copy_src_region }}" +    source_image_id: "{{ openshift_aws_ami_copy_src_ami }}" +    encrypted: "{{ openshift_aws_ami_encrypt | bool }}" +    kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}" +    wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}" +    tags: "{{ openshift_aws_ami_tags }}" +  register: copy_result + +- debug: var=copy_result + +- name: return AMI ID with setfact +  set_fact: +    openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml new file mode 100644 index 000000000..8d4e5ac43 --- /dev/null +++ b/roles/openshift_aws/tasks/build_ami.yml @@ -0,0 +1,48 @@ +--- +- when: openshift_aws_create_vpc | bool +  name: create a vpc +  include: vpc.yml + +- when: openshift_aws_users | length  > 0 +  name: create aws ssh keypair +  include: ssh_keys.yml + +- when: openshift_aws_create_security_groups | bool +  name: Create compute security_groups +  include: security_group.yml + +- name: query vpc +  ec2_vpc_net_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      'tag:Name': "{{ openshift_aws_vpc_name }}" +  register: vpcout + +- name: fetch the default subnet id +  ec2_vpc_subnet_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:Name": "{{ openshift_aws_subnet_name }}" +      vpc-id: "{{ vpcout.vpcs[0].id }}" +  register: subnetout + +- name: create instance for ami creation +  ec2: +    assign_public_ip: yes +    region: "{{ openshift_aws_region }}" +    key_name: "{{ openshift_aws_ssh_key_name }}" +    group: "{{ openshift_aws_clusterid }}" +    instance_type: m4.xlarge +    vpc_subnet_id: "{{ subnetout.subnets[0].id }}" +    image: "{{ openshift_aws_base_ami }}" +    volumes: +    - device_name: /dev/sdb +      volume_type: gp2 +      volume_size: 100 +      delete_on_termination: true +    wait: yes +    exact_count: 1 +    count_tag: +      Name: "{{ openshift_aws_base_ami_name }}" +    instance_tags: +      Name: "{{ openshift_aws_base_ami_name }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml new file mode 100644 index 000000000..0dac1c23d --- /dev/null +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -0,0 +1,34 @@ +--- +# When openshift_aws_use_custom_ami is '' then +# we retrieve the latest build AMI. +# Then set openshift_aws_ami to the ami. +- when: openshift_aws_ami == '' +  block: +  - name: fetch recently created AMI +    ec2_ami_find: +      region: "{{ openshift_aws_region }}" +      sort: creationDate +      sort_order: descending +      name: "{{ openshift_aws_ami_name }}*" +      ami_tags: "{{ openshift_aws_ami_tags }}" +      no_result_action: fail +    register: amiout + +  - name: Set the openshift_aws_ami +    set_fact: +      openshift_aws_ami: "{{ amiout.results[0].ami_id }}" +    when: +    - "'results' in amiout" +    - amiout.results|length > 0 + +- when: openshift_aws_create_security_groups +  name: "Create {{ openshift_aws_node_group_type }} security groups" +  include: security_group.yml + +- when: openshift_aws_create_launch_config +  name: "Create {{ openshift_aws_node_group_type }} launch config" +  include: launch_config.yml + +- when: openshift_aws_create_scale_group +  name: "Create {{ openshift_aws_node_group_type }} node group" +  include: scale_group.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml new file mode 100644 index 000000000..a1fdd66fc --- /dev/null +++ b/roles/openshift_aws/tasks/elb.yml @@ -0,0 +1,68 @@ +--- +- name: query vpc +  ec2_vpc_net_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      'tag:Name': "{{ openshift_aws_vpc_name }}" +  register: vpcout + +- name: debug +  debug: var=vpcout + +- name: fetch the remote instances +  ec2_remote_facts: +    region: "{{ openshift_aws_region }}" +    filters: "{{ openshift_aws_elb_instance_filter }}" +  register: instancesout + +- name: fetch the default subnet id +  ec2_vpc_subnet_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:Name": "{{ openshift_aws_subnet_name }}" +      vpc-id: "{{ vpcout.vpcs[0].id }}" +  register: subnetout + +- name: +  debug: +    msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] +                   if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type +                   else openshift_aws_elb_listeners }}" + +- name: "Create ELB {{ openshift_aws_elb_name }}" +  ec2_elb_lb: +    name: "{{ openshift_aws_elb_name }}" +    state: present +    security_group_names: "{{ openshift_aws_elb_security_groups }}" +    idle_timeout: "{{ openshift_aws_elb_idle_timout }}" +    region: "{{ openshift_aws_region }}" +    subnets: +    - "{{ subnetout.subnets[0].id }}" +    health_check: "{{ openshift_aws_elb_health_check }}" +    listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] +                   if 'master' in openshift_aws_node_group_type  or 'infra' in openshift_aws_node_group_type +                   else openshift_aws_elb_listeners }}" +    scheme: "{{ openshift_aws_elb_scheme }}" +    tags: +      KubernetesCluster: "{{ openshift_aws_clusterid }}" +  register: new_elb + +# It is necessary to ignore_errors here because the instances are not in 'ready' +#  state when first added to ELB +- name: "Add instances to ELB {{ openshift_aws_elb_name }}" +  ec2_elb: +    instance_id: "{{ item.id }}" +    ec2_elbs: "{{ openshift_aws_elb_name }}" +    state: present +    region: "{{ openshift_aws_region }}" +    wait: False +  with_items: "{{ instancesout.instances }}" +  ignore_errors: True +  retries: 10 +  register: elb_call +  until: elb_call|succeeded + +- debug: +    msg: "{{ item }}" +  with_items: +  - "{{ new_elb }}" diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml new file mode 100644 index 000000000..cd9772a25 --- /dev/null +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -0,0 +1,29 @@ +--- +- name: upload certificates to AWS IAM +  iam_cert23: +    state: present +    name: "{{ openshift_aws_iam_cert_name }}" +    cert: "{{ openshift_aws_iam_cert_path }}" +    key: "{{ openshift_aws_iam_cert_key_path }}" +    cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}" +  register: elb_cert_chain +  failed_when: +  - "'failed' in elb_cert_chain" +  - elb_cert_chain.failed +  - "'msg' in elb_cert_chain" +  - "'already exists and has a different certificate body' in elb_cert_chain.msg" +  - "'BotoServerError' in elb_cert_chain.msg" +  when: +  - openshift_aws_create_iam_cert | bool +  - openshift_aws_iam_cert_path != '' +  - openshift_aws_iam_cert_key_path != '' +  - openshift_aws_elb_cert_arn == '' + +- name: set_fact openshift_aws_elb_cert_arn +  set_fact: +    openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + +- name: wait for cert to propagate +  pause: +    seconds: 5 +  when: elb_cert_chain.changed diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml new file mode 100644 index 000000000..65c5a6cc0 --- /dev/null +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -0,0 +1,45 @@ +--- +- fail: +    msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image." +  when: +  - openshift_aws_ami is undefined + +- name: fetch the security groups for launch config +  ec2_group_facts: +    filters: +      group-name: +      - "{{ openshift_aws_clusterid }}"  # default sg +      - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"  # node type sg +      - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s"  # node type sg k8s +    region: "{{ openshift_aws_region }}" +  register: ec2sgs + +# Create the scale group config +- name: Create the node scale group launch config +  ec2_lc: +    name: "{{ openshift_aws_launch_config_name }}" +    region: "{{ openshift_aws_region }}" +    image_id: "{{ openshift_aws_ami }}" +    instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" +    security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" +    user_data: |- +      #cloud-config +      {%  if openshift_aws_node_group_type != 'master' %} +      write_files: +      - path: /root/csr_kubeconfig +        owner: root:root +        permissions: '0640' +        content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} +      - path: /root/openshift_settings +        owner: root:root +        permissions: '0640' +        content: +          openshift_type: "{{ openshift_aws_node_group_type }}" +      runcmd: +      - [ systemctl, enable, atomic-openshift-node] +      - [ systemctl, start, atomic-openshift-node] +      {% endif %} +    key_name: "{{ openshift_aws_ssh_key_name }}" +    ebs_optimized: False +    volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" +    assign_public_ip: True diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml new file mode 100644 index 000000000..189caeaee --- /dev/null +++ b/roles/openshift_aws/tasks/provision.yml @@ -0,0 +1,54 @@ +--- +- when: openshift_aws_create_vpc | bool +  name: create default vpc +  include: vpc.yml + +- when: openshift_aws_create_iam_cert | bool +  name: create the iam_cert for elb certificate +  include: iam_cert.yml + +- when: openshift_aws_users | length > 0 +  name: create aws ssh keypair +  include: ssh_keys.yml + +- when: openshift_aws_create_s3 | bool +  name: create s3 bucket for registry +  include: s3.yml + +- name: include scale group creation for master +  include: build_node_group.yml + +- name: fetch newly created instances +  ec2_remote_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:clusterid": "{{ openshift_aws_clusterid }}" +      "tag:host-type": "{{ openshift_aws_node_group_type }}" +      instance-state-name: running +  register: instancesout +  retries: 20 +  delay: 3 +  until: instancesout.instances|length > 0 + +- name: create our master internal load balancers +  include: elb.yml +  vars: +    openshift_aws_elb_direction: internal +    openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal" +    openshift_aws_elb_scheme: internal + +- name: create our master external load balancers +  include: elb.yml +  vars: +    openshift_aws_elb_direction: external +    openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external" +    openshift_aws_elb_scheme: internet-facing + +- name: wait for ssh to become available +  wait_for: +    port: 22 +    host: "{{ item.public_ip_address }}" +    timeout: 300 +    search_regex: OpenSSH +  with_items: "{{ instancesout.instances }}" +  when: openshift_aws_wait_for_ssh | bool diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml new file mode 100644 index 000000000..fc4996c68 --- /dev/null +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -0,0 +1,66 @@ +--- +# Get bootstrap config token +# bootstrap should be created on first master +# need to fetch it and shove it into cloud data +- name: fetch master instances +  ec2_remote_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:clusterid": "{{ openshift_aws_clusterid }}" +      "tag:host-type": master +      instance-state-name: running +  register: instancesout +  retries: 20 +  delay: 3 +  until: instancesout.instances|length > 0 + +- name: slurp down the bootstrap.kubeconfig +  slurp: +    src: /etc/origin/master/bootstrap.kubeconfig +  delegate_to: "{{ instancesout.instances[0].public_ip_address }}" +  remote_user: root +  register: bootstrap + +- name: set_fact for kubeconfig token +  set_fact: +    openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}" + +- name: include build node group for infra +  include: build_node_group.yml +  vars: +    openshift_aws_node_group_type: infra +    openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra" +    openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}" + +- name: include build node group for compute +  include: build_node_group.yml +  vars: +    openshift_aws_node_group_type: compute +    openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute" +    openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}" + +- when: openshift_aws_wait_for_ssh | bool +  block: +  - name: pause and allow for instances to scale before we query them +    pause: +      seconds: 10 + +  - name: fetch newly created instances +    ec2_remote_facts: +      region: "{{ openshift_aws_region }}" +      filters: +        "tag:clusterid": "{{ openshift_aws_clusterid }}" +        "tag:host-type": node +        instance-state-name: running +    register: instancesout +    retries: 20 +    delay: 3 +    until: instancesout.instances|length > 0 + +  - name: wait for ssh to become available +    wait_for: +      port: 22 +      host: "{{ item.public_ip_address }}" +      timeout: 300 +      search_regex: OpenSSH +    with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml new file mode 100644 index 000000000..9cf37c840 --- /dev/null +++ b/roles/openshift_aws/tasks/s3.yml @@ -0,0 +1,7 @@ +--- +- name: Create an s3 bucket +  s3: +    bucket: "{{ openshift_aws_s3_bucket_name }}" +    mode: "{{ openshift_aws_s3_mode }}" +    region: "{{ openshift_aws_region }}" +  when: openshift_aws_create_s3 | bool diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml new file mode 100644 index 000000000..3e969fc43 --- /dev/null +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -0,0 +1,32 @@ +--- +- name: query vpc +  ec2_vpc_net_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      'tag:Name': "{{ openshift_aws_vpc_name }}" +  register: vpcout + +- name: fetch the subnet to use in scale group +  ec2_vpc_subnet_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:Name": "{{ openshift_aws_subnet_name }}" +      vpc-id: "{{ vpcout.vpcs[0].id }}" +  register: subnetout + +- name: Create the scale group +  ec2_asg: +    name: "{{ openshift_aws_scale_group_name }}" +    launch_config_name: "{{ openshift_aws_launch_config_name }}" +    health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}" +    health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}" +    min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}" +    max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}" +    desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}" +    region: "{{ openshift_aws_region }}" +    termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in  openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" +    load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" +    wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}" +    vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" +    tags: +    - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml new file mode 100644 index 000000000..0cb749dcc --- /dev/null +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -0,0 +1,49 @@ +--- +- name: fetch newly created instances +  ec2_remote_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:Name": "{{ openshift_aws_base_ami_name }}" +      instance-state-name: running +  register: instancesout +  retries: 20 +  delay: 3 +  until: instancesout.instances|length > 0 + +- name: bundle ami +  ec2_ami: +    instance_id: "{{ instancesout.instances.0.id }}" +    region: "{{ openshift_aws_region }}" +    state: present +    description: "This was provisioned {{ ansible_date_time.iso8601 }}" +    name: "{{ openshift_aws_ami_name }}" +    tags: "{{ openshift_aws_ami_tags }}" +    wait: yes +  register: amioutput + +- debug: var=amioutput + +- when: openshift_aws_ami_encrypt | bool +  block: +  - name: augment the encrypted ami tags with source-ami +    set_fact: +      source_tag: +        source-ami: "{{ amioutput.image_id }}" + +  - name: copy the ami for encrypted disks +    include: ami_copy.yml +    vars: +      openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted" +      openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" +      # TODO: How does the kms alias get passed to ec2_ami_copy +      openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" +      openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}" +      # this option currently fails due to boto waiters +      # when supported this need to be reapplied +      #openshift_aws_ami_copy_wait: True + +- name: terminate temporary instance +  ec2: +    state: absent +    region: "{{ openshift_aws_region }}" +    instance_ids: "{{ instancesout.instances.0.id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml new file mode 100644 index 000000000..161e72fb4 --- /dev/null +++ b/roles/openshift_aws/tasks/security_group.yml @@ -0,0 +1,45 @@ +--- +- name: Fetch the VPC for the vpc.id +  ec2_vpc_net_facts: +    region: "{{ openshift_aws_region }}" +    filters: +      "tag:Name": "{{ openshift_aws_clusterid }}" +  register: vpcout + +- name: Create default security group for cluster +  ec2_group: +    name: "{{ openshift_aws_node_security_groups.default.name }}" +    description: "{{ openshift_aws_node_security_groups.default.desc }}" +    region: "{{ openshift_aws_region }}" +    vpc_id: "{{ vpcout.vpcs[0].id }}" +    rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}" +  register: sg_default_created + +- name: create the node group sgs +  ec2_group: +    name: "{{ item.name}}" +    description: "{{ item.desc }}" +    rules: "{{ item.rules if 'rules' in item else [] }}" +    region: "{{ openshift_aws_region }}" +    vpc_id: "{{ vpcout.vpcs[0].id }}" +  register: sg_create +  with_items: +  - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: create the k8s sgs for the node group +  ec2_group: +    name: "{{ item.name }}_k8s" +    description: "{{ item.desc }} for k8s" +    region: "{{ openshift_aws_region }}" +    vpc_id: "{{ vpcout.vpcs[0].id }}" +  register: k8s_sg_create +  with_items: +  - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: tag sg groups with proper tags +  ec2_tag: +    tags: +      KubernetesCluster: "{{ openshift_aws_clusterid }}" +    resource: "{{ item.group_id }}" +    region: "{{ openshift_aws_region }}" +  with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws/tasks/ssh_keys.yml index 232cf20ed..f439ce74e 100644 --- a/roles/openshift_aws_ssh_keys/tasks/main.yml +++ b/roles/openshift_aws/tasks/ssh_keys.yml @@ -3,6 +3,6 @@    ec2_key:      name: "{{ item.key_name }}"      key_material: "{{ item.pub_key }}" -    region: "{{ r_openshift_aws_ssh_keys_region }}" -  with_items: "{{ r_openshift_aws_ssh_keys_users }}" +    region: "{{ openshift_aws_region }}" +  with_items: "{{ openshift_aws_users }}"    no_log: True diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws/tasks/vpc.yml index cfe08dae5..ce2c8eac5 100644 --- a/roles/openshift_aws_vpc/tasks/main.yml +++ b/roles/openshift_aws/tasks/vpc.yml @@ -2,13 +2,12 @@  - name: Create AWS VPC    ec2_vpc_net:      state: present -    cidr_block: "{{ r_openshift_aws_vpc_cidr }}" +    cidr_block: "{{ openshift_aws_vpc.cidr }}"      dns_support: True      dns_hostnames: True -    region: "{{ r_openshift_aws_vpc_region }}" -    name: "{{ r_openshift_aws_vpc_clusterid }}" -    tags: -      Name: "{{ r_openshift_aws_vpc_clusterid }}" +    region: "{{ openshift_aws_region }}" +    name: "{{ openshift_aws_clusterid }}" +    tags: "{{ openshift_aws_vpc_tags }}"    register: vpc  - name: Sleep to avoid a race condition when creating the vpc @@ -18,23 +17,23 @@  - name: assign the vpc igw    ec2_vpc_igw: -    region: "{{ r_openshift_aws_vpc_region }}" +    region: "{{ openshift_aws_region }}"      vpc_id: "{{ vpc.vpc.id }}"    register: igw  - name: assign the vpc subnets    ec2_vpc_subnet: -    region: "{{ r_openshift_aws_vpc_region }}" +    region: "{{ openshift_aws_region }}"      vpc_id: "{{ vpc.vpc.id }}"      cidr: "{{ item.cidr }}"      az: "{{ item.az }}"      resource_tags:        Name: "{{ item.az }}" -  with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}" +  with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"  - name: Grab the route tables from our VPC    ec2_vpc_route_table_facts: -    region: "{{ r_openshift_aws_vpc_region }}" +    region: "{{ openshift_aws_region }}"      filters:        vpc-id: "{{ vpc.vpc.id }}"    register: route_table @@ -44,9 +43,9 @@      lookup: id      route_table_id: "{{ route_table.route_tables[0].id }}"      vpc_id: "{{ vpc.vpc.id }}" -    region: "{{ r_openshift_aws_vpc_region }}" +    region: "{{ openshift_aws_region }}"      tags: -      Name: "{{ r_openshift_aws_vpc_name }}" +      Name: "{{ openshift_aws_vpc_name }}"      routes:      - dest: 0.0.0.0/0        gateway_id: igw diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md deleted file mode 100644 index 111818451..000000000 --- a/roles/openshift_aws_ami_copy/README.md +++ /dev/null @@ -1,50 +0,0 @@ -openshift_aws_ami_perms -========= - -Ansible role for copying an AMI - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- openshift_aws_ami_copy_src_ami: source AMI id to copy from -- openshift_aws_ami_copy_region: region where the AMI is found -- openshift_aws_ami_copy_name: name to assign to new AMI -- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption -- openshift_aws_ami_copy_tags: dict with desired tags -- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status.  This fails due to boto waiters. - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -    - name: copy the ami for encrypted disks -      include_role: -        name: openshift_aws_ami_copy -      vars: -        r_openshift_aws_ami_copy_region: us-east-1 -        r_openshift_aws_ami_copy_name: myami -        r_openshift_aws_ami_copy_src_ami: ami-1234 -        r_openshift_aws_ami_copy_kms_arn: arn:xxxx -        r_openshift_aws_ami_copy_tags: {} -        r_openshift_aws_ami_copy_encrypt: False - -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml deleted file mode 100644 index bcccd4042..000000000 --- a/roles/openshift_aws_ami_copy/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- fail: -    msg: "{{ item }} needs to be defined" -  when: item is not defined -  with_items: -  - r_openshift_aws_ami_copy_src_ami -  - r_openshift_aws_ami_copy_name -  - r_openshift_aws_ami_copy_region - -- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}" -  ec2_ami_copy: -    region: "{{ r_openshift_aws_ami_copy_region }}" -    source_region: "{{ r_openshift_aws_ami_copy_region }}" -    name: "{{ r_openshift_aws_ami_copy_name }}" -    source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}" -    encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}" -    kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}" -    wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}" -    tags: "{{ r_openshift_aws_ami_copy_tags }}" -  register: copy_result - -- debug: var=copy_result - -- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami -  set_fact: -    r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md deleted file mode 100644 index ecc45fa14..000000000 --- a/roles/openshift_aws_elb/README.md +++ /dev/null @@ -1,75 +0,0 @@ -openshift_aws_elb -========= - -Ansible role to provision and manage AWS ELB's for Openshift. - -Requirements ------------- - -Ansible Modules: - -- ec2_elb -- ec2_elb_lb - -python package: - -python-boto - -Role Variables --------------- - -- r_openshift_aws_elb_instances: instances to put in ELB -- r_openshift_aws_elb_elb_name: name of elb -- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to -- r_openshift_aws_elb_region: AWS Region -- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb -```yaml -  ping_protocol: tcp -  ping_port: 443 -  response_timeout: 5 -  interval: 30 -  unhealthy_threshold: 2 -  healthy_threshold: 2 -``` -- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb -```yaml -- protocol: tcp -  load_balancer_port: 80 -  instance_protocol: ssl -  instance_port: 443 -- protocol: ssl -  load_balancer_port: 443 -  instance_protocol: ssl -  instance_port: 443 -  # ssl certificate required for https or ssl -  ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" -``` - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- include_role: -    name: openshift_aws_elb -  vars: -    r_openshift_aws_elb_instances: aws_instances_to_put_in_elb -    r_openshift_aws_elb_elb_name: elb_name -    r_openshift_aws_elb_security_groups: security_group_names -    r_openshift_aws_elb_region: aws_region -    r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}" -    r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}" -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml deleted file mode 100644 index ed5d38079..000000000 --- a/roles/openshift_aws_elb/defaults/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -r_openshift_aws_elb_health_check: -  ping_protocol: tcp -  ping_port: 443 -  response_timeout: 5 -  interval: 30 -  unhealthy_threshold: 2 -  healthy_threshold: 2 - -r_openshift_aws_elb_cert_arn: '' - -r_openshift_aws_elb_listeners: -  master: -    external: -    - protocol: tcp -      load_balancer_port: 80 -      instance_protocol: ssl -      instance_port: 443 -    - protocol: ssl -      load_balancer_port: 443 -      instance_protocol: ssl -      instance_port: 443 -      # ssl certificate required for https or ssl -      ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" -    internal: -    - protocol: tcp -      load_balancer_port: 80 -      instance_protocol: tcp -      instance_port: 80 -    - protocol: tcp -      load_balancer_port: 443 -      instance_protocol: tcp -      instance_port: 443 diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml deleted file mode 100644 index 58be652a5..000000000 --- a/roles/openshift_aws_elb/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -galaxy_info: -  author: OpenShift -  description: Openshift ELB provisioning -  company: Red Hat, Inc -  license: ASL 2.0 -  min_ansible_version: 1.2 -  platforms: -  - name: EL -    versions: -    - 7 -dependencies: [] diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml deleted file mode 100644 index 64ec18545..000000000 --- a/roles/openshift_aws_elb/tasks/main.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: fetch the default subnet id -  ec2_remote_facts: -    region: "{{ r_openshift_aws_elb_region }}" -    filters: "{{ r_openshift_aws_elb_instance_filter }}" -  register: instancesout - -- name: fetch the default subnet id -  ec2_vpc_subnet_facts: -    region: "{{ r_openshift_aws_elb_region }}" -    filters: -      "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}" -  register: subnetout - -- name: -  debug: -    msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] -                   if 'master' in r_openshift_aws_elb_type  or 'infra' in r_openshift_aws_elb_type -                   else r_openshift_aws_elb_listeners }}" - -- name: "Create ELB {{ r_openshift_aws_elb_name }}" -  ec2_elb_lb: -    name: "{{ r_openshift_aws_elb_name }}" -    state: present -    security_group_names: "{{ r_openshift_aws_elb_security_groups }}" -    idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}" -    region: "{{ r_openshift_aws_elb_region }}" -    subnets: -    - "{{ subnetout.subnets[0].id }}" -    health_check: "{{ r_openshift_aws_elb_health_check }}" -    listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] -                   if 'master' in r_openshift_aws_elb_type  or 'infra' in r_openshift_aws_elb_type -                   else r_openshift_aws_elb_listeners }}" -    scheme: "{{ r_openshift_aws_elb_scheme }}" -    tags: -      KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}" -  register: new_elb - -# It is necessary to ignore_errors here because the instances are not in 'ready' -#  state when first added to ELB -- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}" -  ec2_elb: -    instance_id: "{{ item.id }}" -    ec2_elbs: "{{ r_openshift_aws_elb_name }}" -    state: present -    region: "{{ r_openshift_aws_elb_region }}" -    wait: False -  with_items: "{{ instancesout.instances }}" -  ignore_errors: True -  retries: 10 -  register: elb_call -  until: elb_call|succeeded - -- debug: -    msg: "{{ item }}" -  with_items: -  - "{{ new_elb }}" diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md deleted file mode 100644 index 9468e785c..000000000 --- a/roles/openshift_aws_iam_kms/README.md +++ /dev/null @@ -1,43 +0,0 @@ -openshift_aws_iam_kms -========= - -Ansible role to create AWS IAM KMS keys for encryption - -Requirements ------------- - -Ansible Modules: - -oo_iam_kms - -Role Variables --------------- - -- r_openshift_aws_iam_kms_region: AWS region to create KMS key -- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key - -Dependencies ------------- - -lib_utils - -Example Playbook ----------------- -```yaml -- include_role: -    name: openshift_aws_iam_kms -  vars: -    r_openshift_aws_iam_kms_region: 'us-east-1' -    r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms' -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_iam_kms/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml deleted file mode 100644 index e29aaf96b..000000000 --- a/roles/openshift_aws_iam_kms/meta/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -galaxy_info: -  author: OpenShift -  description: AWS IAM KMS setup and management -  company: Red Hat, Inc -  license: ASL 2.0 -  min_ansible_version: 1.2 -  platforms: -  - name: EL -    versions: -    - 7 -dependencies: -- lib_utils diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml deleted file mode 100644 index 32aac2666..000000000 --- a/roles/openshift_aws_iam_kms/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- fail: -    msg: "{{ item.name }} needs to be defined." -  when: item.cond | bool -  with_items: -  - name: "{{ r_openshift_aws_iam_kms_alias }}" -    cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}" -  - name: "{{ r_openshift_aws_iam_kms_region }}" -    cond: "{{ r_openshift_aws_iam_kms_region is undefined }}" - -- name: Create IAM KMS key with alias -  oo_iam_kms: -    state: present -    alias: "{{ r_openshift_aws_iam_kms_alias }}" -    region: "{{ r_openshift_aws_iam_kms_region }}" -  register: created_kms - -- debug: var=created_kms.results diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md deleted file mode 100644 index 52b7e83b6..000000000 --- a/roles/openshift_aws_launch_config/README.md +++ /dev/null @@ -1,72 +0,0 @@ -openshift_aws_launch_config -========= - -Ansible role to create an AWS launch config for a scale group. - -This includes the AMI, volumes, user_data, etc. - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- -- r_openshift_aws_launch_config_name: "{{ launch_config_name }}" -- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" -- r_openshift_aws_launch_config_region: "{{ region }}" -- r_openshift_aws_launch_config: "{{ node_group_config }}" -```yaml -    master: -      instance_type: m4.xlarge -      ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced -      volumes: -      - device_name: /dev/sdb -        volume_size: 100 -        device_type: gp2 -        delete_on_termination: False -      health_check: -        period: 60 -        type: EC2 -      min_size: 3 -      max_size: 3 -      desired_size: 3 -      tags: -        host-type: master -        sub-host-type: default -      wait_for_instances: True -``` -- r_openshift_aws_launch_config_type: compute -- r_openshift_aws_launch_config_custom_image: ami-xxxxx -- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig> - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -  - name: create compute nodes config -    include_role: -      name: openshift_aws_launch_config -    vars: -      r_openshift_aws_launch_config_name: "{{ launch_config_name }}" -      r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" -      r_openshift_aws_launch_config_region: "{{ region }}" -      r_openshift_aws_launch_config: "{{ node_group_config }}" -      r_openshift_aws_launch_config_type: compute -      r_openshift_aws_launch_config_custom_image: ami-1234 -      r_openshift_aws_launch_config_bootstrap_token: abcd -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_launch_config/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml deleted file mode 100644 index e61670cc2..000000000 --- a/roles/openshift_aws_launch_config/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -galaxy_info: -  author: OpenShift -  description: Openshift AWS VPC creation -  company: Red Hat, Inc -  license: ASL 2.0 -  min_ansible_version: 2.3 -  platforms: -  - name: EL -    versions: -    - 7 -dependencies: [] diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml deleted file mode 100644 index 437cf1f71..000000000 --- a/roles/openshift_aws_launch_config/tasks/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: fail when params are not set -  fail: -    msg: Please specify the role parameters. -  when: -  - r_openshift_aws_launch_config_cluseterid is undefined -  - r_openshift_aws_launch_config_type is undefined -  - r_openshift_aws_launch_config_region is undefined -  - r_openshift_aws_launch_config is undefined - -- name: fetch the security groups for launch config -  ec2_group_facts: -    filters: -      group-name: -      - "{{ r_openshift_aws_launch_config_clusterid }}"  # default sg -      - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}"  # node type sg -      - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s"  # node type sg k8s -    region: "{{ r_openshift_aws_launch_config_region }}" -  register: ec2sgs - -# Create the scale group config -- name: Create the node scale group config -  ec2_lc: -    name: "{{ r_openshift_aws_launch_config_name }}" -    region: "{{ r_openshift_aws_launch_config_region }}" -    image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}" -    instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}" -    security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" -    user_data: |- -      #cloud-config -      {%  if r_openshift_aws_launch_config_type != 'master' %} -      write_files: -      - path: /root/csr_kubeconfig -        owner: root:root -        permissions: '0640' -        content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} -      - path: /root/openshift_settings -        owner: root:root -        permissions: '0640' -        content: -          openshift_type: "{{ r_openshift_aws_launch_config_type }}" -      runcmd: -      - [ systemctl, enable, atomic-openshift-node] -      - [ systemctl, start, atomic-openshift-node] -      {% endif %} -    key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}" -    ebs_optimized: False -    volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}" -    assign_public_ip: True -  register: test diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2 deleted file mode 100644 index 1a1e29550..000000000 --- a/roles/openshift_aws_launch_config/templates/cloud-init.j2 +++ /dev/null @@ -1,9 +0,0 @@ -{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %} -#cloud-config -write_files: -- path: /root/csr_kubeconfig -  owner: root:root -  permissions: '0640' -  content: |- -  {{ r_openshift_aws_launch_config_bootstrap_token }} -{% endif %} diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md deleted file mode 100644 index c32c57bc5..000000000 --- a/roles/openshift_aws_node_group/README.md +++ /dev/null @@ -1,77 +0,0 @@ -openshift_aws_node_group -========= - -Ansible role to create an aws node group. - -This includes the security group, launch config, and scale group. - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- -```yaml -- r_openshift_aws_node_group_name: myscalegroup -- r_openshift_aws_node_group_clusterid: myclusterid -- r_openshift_aws_node_group_region: us-east-1 -- r_openshift_aws_node_group_lc_name: launch_config -- r_openshift_aws_node_group_type: master|infra|compute -- r_openshift_aws_node_group_config: "{{ node_group_config }}" -```yaml -master: -  instance_type: m4.xlarge -  ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced -  volumes: -  - device_name: /dev/sdb -    volume_size: 100 -    device_type: gp2 -    delete_on_termination: False -  health_check: -    period: 60 -    type: EC2 -  min_size: 3 -  max_size: 3 -  desired_size: 3 -  tags: -    host-type: master -    sub-host-type: default -  wait_for_instances: True -``` -- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" - -```yaml -us-east-1a  # name of subnet -``` - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -  - name: "create {{ openshift_build_node_type }} node groups" -    include_role: -      name: openshift_aws_node_group -    vars: -      r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute" -      r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" -      r_openshift_aws_node_group_clusterid: "{{ clusterid }}" -      r_openshift_aws_node_group_region: "{{ region }}" -      r_openshift_aws_node_group_config: "{{ node_group_config }}" -      r_openshift_aws_node_group_type: compute -      r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml deleted file mode 100644 index 44c5116a1..000000000 --- a/roles/openshift_aws_node_group/defaults/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -r_openshift_aws_node_group_type: master - -r_openshift_aws_node_group_config: -  tags: -    clusterid: "{{ r_openshift_aws_node_group_clusterid }}" -  master: -    instance_type: m4.xlarge -    ami: "{{ r_openshift_aws_node_group_ami }}" -    volumes: -    - device_name: /dev/sdb -      volume_size: 100 -      device_type: gp2 -      delete_on_termination: False -    health_check: -      period: 60 -      type: EC2 -    min_size: 3 -    max_size: 3 -    desired_size: 3 -    tags: -      host-type: master -      sub-host-type: default -    wait_for_instances: True -  compute: -    instance_type: m4.xlarge -    ami: "{{ r_openshift_aws_node_group_ami }}" -    volumes: -    - device_name: /dev/sdb -      volume_size: 100 -      device_type: gp2 -      delete_on_termination: True -    health_check: -      period: 60 -      type: EC2 -    min_size: 3 -    max_size: 100 -    desired_size: 3 -    tags: -      host-type: node -      sub-host-type: compute -  infra: -    instance_type: m4.xlarge -    ami: "{{ r_openshift_aws_node_group_ami }}" -    volumes: -    - device_name: /dev/sdb -      volume_size: 100 -      device_type: gp2 -      delete_on_termination: True -    health_check: -      period: 60 -      type: EC2 -    min_size: 2 -    max_size: 20 -    desired_size: 2 -    tags: -      host-type: node -      sub-host-type: infra diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml deleted file mode 100644 index 6f5364b03..000000000 --- a/roles/openshift_aws_node_group/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: validate role inputs -  fail: -    msg: Please pass in the required role variables -  when: -  - r_openshift_aws_node_group_clusterid is not defined -  - r_openshift_aws_node_group_region is not defined -  - r_openshift_aws_node_group_subnet_name is not defined - -- name: fetch the subnet to use in scale group -  ec2_vpc_subnet_facts: -    region: "{{ r_openshift_aws_node_group_region }}" -    filters: -      "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}" -  register: subnetout - -- name: Create the scale group -  ec2_asg: -    name: "{{ r_openshift_aws_node_group_name }}" -    launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}" -    health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}" -    health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}" -    min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}" -    max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}" -    desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}" -    region: "{{ r_openshift_aws_node_group_region }}" -    termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in  r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" -    load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" -    wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}" -    vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" -    tags: -    - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md deleted file mode 100644 index afafe61cf..000000000 --- a/roles/openshift_aws_s3/README.md +++ /dev/null @@ -1,43 +0,0 @@ -openshift_aws_s3 -========= - -Ansible role to create an s3 bucket - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_s3_clusterid: myclusterid -- r_openshift_aws_s3_region: us-east-1 -- r_openshift_aws_s3_mode:  create|delete - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- name: create an s3 bucket -  include_role: -    name: openshift_aws_s3 -  vars: -    r_openshift_aws_s3_clusterid: mycluster -    r_openshift_aws_s3_region: us-east-1 -    r_openshift_aws_s3_mode: create -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml deleted file mode 100644 index 46bd781bd..000000000 --- a/roles/openshift_aws_s3/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Create an s3 bucket -  s3: -    bucket: "{{ r_openshift_aws_s3_clusterid }}" -    mode: "{{ r_openshift_aws_s3_mode }}" -    region: "{{ r_openshift_aws_s3_region }}" diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md deleted file mode 100644 index eeb76bbb6..000000000 --- a/roles/openshift_aws_sg/README.md +++ /dev/null @@ -1,59 +0,0 @@ -openshift_aws_sg -========= - -Ansible role to create an aws security groups - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_sg_clusterid: myclusterid -- r_openshift_aws_sg_region: us-east-1 -- r_openshift_aws_sg_type: master|infra|compute -```yaml -# defaults/main.yml -  default: -    name: "{{ r_openshift_aws_sg_clusterid }}" -    desc: "{{ r_openshift_aws_sg_clusterid }} default" -    rules: -    - proto: tcp -      from_port: 22 -      to_port: 22 -      cidr_ip: 0.0.0.0/0 -    - proto: all -      from_port: all -      to_port: all -      group_name: "{{ r_openshift_aws_sg_clusterid }}" -``` - - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -- name: create security groups for master -  include_role: -    name: openshift_aws_sg -  vars: -    r_openshift_aws_sg_clusterid: mycluster -    r_openshift_aws_sg_region: us-east-1 -    r_openshift_aws_sg_type: master -``` - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml deleted file mode 100644 index 9c480d337..000000000 --- a/roles/openshift_aws_sg/defaults/main.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -r_openshift_aws_sg_sg: -  default: -    name: "{{ r_openshift_aws_sg_clusterid }}" -    desc: "{{ r_openshift_aws_sg_clusterid }} default" -    rules: -    - proto: tcp -      from_port: 22 -      to_port: 22 -      cidr_ip: 0.0.0.0/0 -    - proto: all -      from_port: all -      to_port: all -      group_name: "{{ r_openshift_aws_sg_clusterid }}" -  master: -    name: "{{ r_openshift_aws_sg_clusterid }}_master" -    desc: "{{ r_openshift_aws_sg_clusterid }} master instances" -    rules: -    - proto: tcp -      from_port: 80 -      to_port: 80 -      cidr_ip: 0.0.0.0/0 -    - proto: tcp -      from_port: 443 -      to_port: 443 -      cidr_ip: 0.0.0.0/0 -  compute: -    name: "{{ r_openshift_aws_sg_clusterid }}_compute" -    desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances" -  infra: -    name: "{{ r_openshift_aws_sg_clusterid }}_infra" -    desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances" -    rules: -    - proto: tcp -      from_port: 80 -      to_port: 80 -      cidr_ip: 0.0.0.0/0 -    - proto: tcp -      from_port: 443 -      to_port: 443 -      cidr_ip: 0.0.0.0/0 -    - proto: tcp -      from_port: 30000 -      to_port: 32000 -      cidr_ip: 0.0.0.0/0 -  etcd: -    name: "{{ r_openshift_aws_sg_clusterid }}_etcd" -    desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances" diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml deleted file mode 100644 index 2294fdcc9..000000000 --- a/roles/openshift_aws_sg/tasks/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Validate role inputs -  fail: -    msg: Please ensure to pass the correct variables -  when: -  - r_openshift_aws_sg_region is undefined -  - r_openshift_aws_sg_region is undefined - - -- name: Fetch the VPC for vpc.id -  ec2_vpc_net_facts: -    region: "{{ r_openshift_aws_sg_region }}" -    filters: -      "tag:Name": "{{ r_openshift_aws_sg_clusterid }}" -  register: vpcout - -- name: Create default security group for cluster -  ec2_group: -    name: "{{ r_openshift_aws_sg_sg.default.name }}" -    description: "{{ r_openshift_aws_sg_sg.default.desc }}" -    region: "{{ r_openshift_aws_sg_region }}" -    vpc_id: "{{ vpcout.vpcs[0].id }}" -    rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}" -  register: sg_default_created - -- name: create the node group sgs -  ec2_group: -    name: "{{ item.name}}" -    description: "{{ item.desc }}" -    rules: "{{ item.rules if 'rules' in item else [] }}" -    region: "{{ r_openshift_aws_sg_region }}" -    vpc_id: "{{ vpcout.vpcs[0].id }}" -  register: sg_create -  with_items: -  - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}" - -- name: create the k8s sgs for the node group -  ec2_group: -    name: "{{ item.name }}_k8s" -    description: "{{ item.desc }} for k8s" -    region: "{{ r_openshift_aws_sg_region }}" -    vpc_id: "{{ vpcout.vpcs[0].id }}" -  register: k8s_sg_create -  with_items: -  - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}" - -- name: tag sg groups with proper tags -  ec2_tag: -    tags: -      KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}" -    resource: "{{ item.group_id }}" -    region: "{{ r_openshift_aws_sg_region }}" -  with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md deleted file mode 100644 index 4f8667918..000000000 --- a/roles/openshift_aws_ssh_keys/README.md +++ /dev/null @@ -1,49 +0,0 @@ -openshift_aws_ssh_keys -========= - -Ansible role for sshind SSH keys - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_ssh_keys_users: list of dicts of users -- r_openshift_aws_ssh_keys_region: ec2_region to install the keys - -Dependencies ------------- - - -Example Playbook ----------------- -```yaml -users: -- username: user1 -  pub_key: <user1 ssh public key> -- username: user2 -  pub_key: <user2 ssh public key> - -region: us-east-1 - -- include_role: -    name: openshift_aws_ssh_keys -  vars: -    r_openshift_aws_ssh_keys_users: "{{ users }}" -    r_openshift_aws_ssh_keys_region: "{{ region }}" -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md deleted file mode 100644 index d88cf0581..000000000 --- a/roles/openshift_aws_vpc/README.md +++ /dev/null @@ -1,62 +0,0 @@ -openshift_aws_vpc -========= - -Ansible role to create a default AWS VPC - -Requirements ------------- - -Ansible Modules: - - -Role Variables --------------- - -- r_openshift_aws_vpc_clusterid: "{{ clusterid }}" -- r_openshift_aws_vpc_cidr: 172.31.48.0/20 -- r_openshift_aws_vpc_subnets: "{{ subnets }}" -```yaml -    subnets: -      us-east-1:  # These are us-east-1 region defaults. Ensure this matches your region -      - cidr: 172.31.48.0/20 -        az: "us-east-1c" -      - cidr: 172.31.32.0/20 -        az: "us-east-1e" -      - cidr: 172.31.16.0/20 -        az: "us-east-1a" -``` -- r_openshift_aws_vpc_region: "{{ region }}" -- r_openshift_aws_vpc_tags: dict of tags to apply to vpc -- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}" - -Dependencies ------------- - - -Example Playbook ----------------- - -```yaml -  - name: create default vpc -    include_role: -      name: openshift_aws_vpc -    vars: -      r_openshift_aws_vpc_clusterid: mycluster -      r_openshift_aws_vpc_cidr: 172.31.48.0/20 -      r_openshift_aws_vpc_subnets: "{{ subnets }}" -      r_openshift_aws_vpc_region: us-east-1 -      r_openshift_aws_vpc_tags: {} -      r_openshift_aws_vpc_name: mycluster - -``` - - -License -------- - -Apache 2.0 - -Author Information ------------------- - -Openshift diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_aws_vpc/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml index 9200f2c3c..162d817f0 100644 --- a/roles/openshift_cfme/meta/main.yml +++ b/roles/openshift_cfme/meta/main.yml @@ -16,5 +16,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: lib_utils -- role: openshift_common  - role: openshift_master_facts diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 04a1ce873..29ed82783 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -14,5 +14,4 @@ galaxy_info:  dependencies:  - role: openshift_docker    when: not skip_docker_role | default(False) | bool -- role: openshift_common  - role: openshift_facts diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md deleted file mode 100644 index 2a271854b..000000000 --- a/roles/openshift_common/README.md +++ /dev/null @@ -1,45 +0,0 @@ -OpenShift/Atomic Enterprise Common -=================================== - -OpenShift/Atomic Enterprise common installation and configuration tasks. - -Requirements ------------- - -A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, -rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos. - -Role Variables --------------- - -| Name                      | Default value     |                                             | -|---------------------------|-------------------|---------------------------------------------| -| openshift_cluster_id      | default           | Cluster name if multiple OpenShift clusters | -| openshift_debug_level     | 2                 | Global openshift debug log verbosity        | -| openshift_hostname        | UNDEF             | Internal hostname to use for this host (this value will set the hostname on the system) | -| openshift_ip              | UNDEF             | Internal IP address to use for this host    | -| openshift_public_hostname | UNDEF             | Public hostname to use for this host        | -| openshift_public_ip       | UNDEF             | Public IP address to use for this host      | -| openshift_portal_net      | UNDEF             | Service IP CIDR | - -Dependencies ------------- - -os_firewall -openshift_facts -openshift_repos - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml deleted file mode 100644 index 267c03605..000000000 --- a/roles/openshift_common/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_cluster_id: 'default' -openshift_debug_level: 2 diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml deleted file mode 100644 index 7cc95d8fa..000000000 --- a/roles/openshift_common/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: -  author: Jason DeTiberus -  description: OpenShift Common -  company: Red Hat, Inc. -  license: Apache License, Version 2.0 -  min_ansible_version: 1.7 -  platforms: -  - name: EL -    versions: -    - 7 -  categories: -  - cloud -dependencies: -- role: openshift_facts diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml deleted file mode 100644 index a0bd6c860..000000000 --- a/roles/openshift_common/tasks/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -- fail: -    msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel -  when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - -- fail: -    msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage -  when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool - -- fail: -    msg: Nuage sdn can not be used with flannel -  when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - -- fail: -    msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv -  when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool - -- fail: -    msg: Contiv can not be used with flannel -  when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: -    msg: Contiv can not be used with nuage -  when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: -    msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico -  when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool - -- fail: -    msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. -  when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool - -- fail: -    msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both -  when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool - -- fail: -    msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both -  when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool - -- fail: -    msg: openshift_hostname must be 63 characters or less -  when: openshift_hostname is defined and openshift_hostname | length > 63 - -- name: Set common Cluster facts -  openshift_facts: -    role: common -    local_facts: -      install_examples: "{{ openshift_install_examples | default(True) }}" -      use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" -      sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" -      use_flannel: "{{ openshift_use_flannel | default(None) }}" -      use_calico: "{{openshift_use_calico | default(None) }}" -      use_nuage: "{{ openshift_use_nuage | default(None) }}" -      use_contiv: "{{ openshift_use_contiv | default(None) }}" -      use_manageiq: "{{ openshift_use_manageiq | default(None) }}" -      data_dir: "{{ openshift_data_dir | default(None) }}" -      use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}" - -- name: Install the base package for versioning -  package: -    name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" -    state: present -  when: not openshift.common.is_containerized | bool - -- name: Set version facts -  openshift_facts: - -# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the -# hostname by default. -- set_fact: -    set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}" - -- name: Set hostname -  command: > -    hostnamectl set-hostname {{ openshift.common.hostname }} -  when: openshift_set_hostname | default(set_hostname_default) | bool diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml index 5cfda1c89..f3fe2dcbe 100644 --- a/roles/openshift_examples/meta/main.yml +++ b/roles/openshift_examples/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info:      - 7    categories:    - cloud -dependencies: -- role: openshift_common +dependencies: [] diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index cf78b4a75..ebfa6bb8f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -449,78 +449,6 @@ def normalize_provider_facts(provider, metadata):      return facts -def set_flannel_facts_if_unset(facts): -    """ Set flannel facts if not already present in facts dict -            dict: the facts dict updated with the flannel facts if -            missing -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with the flannel -            facts if they were not already present - -    """ -    if 'common' in facts: -        if 'use_flannel' not in facts['common']: -            use_flannel = False -            facts['common']['use_flannel'] = use_flannel -    return facts - - -def set_calico_facts_if_unset(facts): -    """ Set calico facts if not already present in facts dict -            dict: the facts dict updated with the calico facts if -            missing -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with the calico -            facts if they were not already present - -    """ -    if 'common' in facts: -        if 'use_calico' not in facts['common']: -            use_calico = False -            facts['common']['use_calico'] = use_calico -    return facts - - -def set_nuage_facts_if_unset(facts): -    """ Set nuage facts if not already present in facts dict -            dict: the facts dict updated with the nuage facts if -            missing -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with the nuage -            facts if they were not already present - -    """ -    if 'common' in facts: -        if 'use_nuage' not in facts['common']: -            use_nuage = False -            facts['common']['use_nuage'] = use_nuage -    return facts - - -def set_contiv_facts_if_unset(facts): -    """ Set contiv facts if not already present in facts dict -            dict: the facts dict updated with the contiv facts if -            missing -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with the contiv -            facts if they were not already present - -    """ -    if 'common' in facts: -        if 'use_contiv' not in facts['common']: -            use_contiv = False -            facts['common']['use_contiv'] = use_contiv -    return facts - -  def set_node_schedulability(facts):      """ Set schedulable facts if not already present in facts dict          Args: @@ -590,13 +518,8 @@ def set_dnsmasq_facts_if_unset(facts):      """      if 'common' in facts: -        if 'use_dnsmasq' not in facts['common']: -            facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))          if 'master' in facts and 'dns_port' not in facts['master']: -            if safe_get_bool(facts['common']['use_dnsmasq']): -                facts['master']['dns_port'] = 8053 -            else: -                facts['master']['dns_port'] = 53 +            facts['master']['dns_port'] = 8053      return facts @@ -968,27 +891,6 @@ def set_version_facts_if_unset(facts):      return facts -def set_manageiq_facts_if_unset(facts): -    """ Set manageiq facts. This currently includes common.use_manageiq. - -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with version facts. -        Raises: -            OpenShiftFactsInternalError: -    """ -    if 'common' not in facts: -        if 'version_gte_3_1_or_1_1' not in facts['common']: -            raise OpenShiftFactsInternalError( -                "Invalid invocation: The required facts are not set" -            ) -    if 'use_manageiq' not in facts['common']: -        facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1'] - -    return facts - -  def set_sdn_facts_if_unset(facts, system_facts):      """ Set sdn facts if not already present in facts dict @@ -999,15 +901,6 @@ def set_sdn_facts_if_unset(facts, system_facts):              dict: the facts dict updated with the generated sdn facts if they                    were not already present      """ -    # pylint: disable=too-many-branches -    if 'common' in facts: -        use_sdn = facts['common']['use_openshift_sdn'] -        if not (use_sdn == '' or isinstance(use_sdn, bool)): -            use_sdn = safe_get_bool(use_sdn) -            facts['common']['use_openshift_sdn'] = use_sdn -        if 'sdn_network_plugin_name' not in facts['common']: -            plugin = 'redhat/openshift-ovs-subnet' if use_sdn else '' -            facts['common']['sdn_network_plugin_name'] = plugin      if 'master' in facts:          # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length @@ -1996,10 +1889,6 @@ class OpenShiftFacts(object):          facts['current_config'] = get_current_config(facts)          facts = set_url_facts_if_unset(facts)          facts = set_project_cfg_facts_if_unset(facts) -        facts = set_flannel_facts_if_unset(facts) -        facts = set_calico_facts_if_unset(facts) -        facts = set_nuage_facts_if_unset(facts) -        facts = set_contiv_facts_if_unset(facts)          facts = set_node_schedulability(facts)          facts = set_selectors(facts)          facts = set_identity_providers_if_unset(facts) @@ -2011,7 +1900,6 @@ class OpenShiftFacts(object):          facts = build_api_server_args(facts)          facts = set_version_facts_if_unset(facts)          facts = set_dnsmasq_facts_if_unset(facts) -        facts = set_manageiq_facts_if_unset(facts)          facts = set_aggregate_facts(facts)          facts = set_etcd_facts_if_unset(facts)          facts = set_proxy_facts(facts) @@ -2039,7 +1927,7 @@ class OpenShiftFacts(object):                             self.system_facts['ansible_fqdn']]          hostname = choose_hostname(hostname_values, ip_addr) -        defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr, +        defaults['common'] = dict(ip=ip_addr,                                    public_ip=ip_addr,                                    deployment_type=deployment_type,                                    deployment_subtype=deployment_subtype, @@ -2048,10 +1936,8 @@ class OpenShiftFacts(object):                                    portal_net='172.30.0.0/16',                                    client_binary='oc', admin_binary='oadm',                                    dns_domain='cluster.local', -                                  install_examples=True,                                    debug_level=2, -                                  config_base='/etc/origin', -                                  data_dir='/var/lib/origin') +                                  config_base='/etc/origin')          if 'master' in roles:              defaults['master'] = dict(api_use_ssl=True, api_port='8443', diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 857a80c74..866c74d7c 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -32,6 +32,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):      # we use python-docker-py to check local docker for images, and skopeo      # to look for images available remotely without waiting to pull them.      dependencies = ["python-docker-py", "skopeo"] +    skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false"      def is_active(self):          """Skip hosts with unsupported deployment types.""" @@ -67,8 +68,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):                  "failed": True,                  "msg": (                      "One or more required Docker images are not available:\n    {}\n" -                    "Configured registries: {}" -                ).format(",\n    ".join(sorted(unavailable_images)), ", ".join(registries)), +                    "Configured registries: {}\n" +                    "Checked by: {}" +                ).format(",\n    ".join(sorted(unavailable_images)), ", ".join(registries), +                         self.skopeo_img_check_command),              }          return {} @@ -169,8 +172,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):          for registry in registries:              args = { -                "_raw_params": "timeout 10 skopeo inspect --tls-verify=false " -                               "docker://{}/{}".format(registry, image) +                "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image)              }              result = self.execute_module("command", args)              if result.get("rc", 0) == 0 and not result.get("failed"): diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 3e5d7f860..29ae58556 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -39,7 +39,6 @@ variables also control configuration behavior:  Dependencies  ------------ -* openshift_common  * openshift_hosted_facts  Example Playbook diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index c26df3afa..08c1d849e 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -47,3 +47,9 @@ r_openshift_hosted_registry_os_firewall_allow:  - service: Docker Registry Port    port: 5000/tcp    cond: "{{ r_openshift_hosted_use_calico }}" + +# NOTE +# r_openshift_hosted_use_calico_default may be defined external to this role. +# openshift_use_calico, if defined, may affect other roles or play behavior. +r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}" +r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}" diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index e57ed733e..68ec7233e 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -18,6 +18,15 @@      openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"      openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" +- name: Get the certificate contents for router +  copy: +    backup: True +    dest: "/etc/origin/master/{{ item | basename }}" +    src: "{{ item }}" +  with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | +                  oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" +  when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} +  # This is for when we desire a cluster signed cert  # The certificate is generated and placed in master_config_dir/  - block: @@ -43,15 +52,6 @@    # End Block    when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {} -- name: Get the certificate contents for router -  copy: -    backup: True -    dest: "/etc/origin/master/{{ item | basename }}" -    src: "{{ item }}" -  with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | -                  oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" -  when: not openshift_hosted_router_create_certificate | bool -  - name: Create the router service account(s)    oc_serviceaccount:      name: "{{ item.serviceaccount }}" diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml index 044c8043c..ab07a77c1 100644 --- a/roles/openshift_hosted_logging/meta/main.yaml +++ b/roles/openshift_hosted_logging/meta/main.yaml @@ -1,4 +1,3 @@  ---  dependencies: -  - { role: openshift_common }    - { role: openshift_master_facts } diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index 9c12865bf..4027f524b 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -11,5 +11,4 @@ galaxy_info:      - 7    categories:    - cloud -dependencies: -- role: openshift_common +dependencies: [] diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 41a2b12a2..239b16427 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -24,4 +24,10 @@ r_openshift_loadbalancer_os_firewall_allow:    port: "{{ openshift_master_api_port | default(8443) }}/tcp"  - service: nuage mon    port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" -  cond: "{{ openshift_use_nuage | default(false) | bool }}" +  cond: "{{ r_openshift_lb_use_nuage | bool }}" + +# NOTE +# r_openshift_lb_use_nuage_default may be defined external to this role. +# openshift_use_nuage, if defined, may affect other roles or play behavior. +r_openshift_lb_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +r_openshift_lb_use_nuage: "{{ r_openshift_lb_use_nuage_default }}" diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 97525479e..95bf462d1 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@  --- -__latest_curator_version: "3_5" -__allowed_curator_versions: ["3_5", "3_6"] +__latest_curator_version: "3_6" +__allowed_curator_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 index 0c06a7677..65b08d970 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -24,7 +24,8 @@ network:  cloud:    kubernetes: -    service: ${SERVICE_DNS} +    pod_label: ${POD_LABEL} +    pod_port: 9300      namespace: ${NAMESPACE}  discovery: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 5f2932541..3c8f390c4 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -90,6 +90,12 @@ spec:                name: "RECOVER_AFTER_TIME"                value: "{{openshift_logging_elasticsearch_recover_after_time}}"              - +              name: "READINESS_PROBE_TIMEOUT" +              value: "30" +            - +              name: "POD_LABEL" +              value: "component={{component}}"  +            -                name: "IS_MASTER"                value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" @@ -106,6 +112,13 @@ spec:                readOnly: true              - name: elasticsearch-storage                mountPath: /elasticsearch/persistent +          readinessProbe: +            exec: +              command: +              - "/usr/share/java/elasticsearch/probe/readiness.sh" +            initialDelaySeconds: 10 +            timeoutSeconds: 30 +            periodSeconds: 5        volumes:          - name: elasticsearch            secret: diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 20fa63543..09e2ee4d0 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@  --- -__latest_es_version: "3_5" -__allowed_es_versions: ["3_5", "3_6"] +__latest_es_version: "3_6" +__allowed_es_versions: ["3_5", "3_6", "3_7"]  __allowed_es_types: ["data-master", "data-client", "master", "client"]  __es_log_appenders: ['file', 'console']  __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index ec8e565c3..92a426952 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@  --- -__latest_fluentd_version: "3_5" -__allowed_fluentd_versions: ["3_5", "3_6"] +__latest_fluentd_version: "3_6" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]  __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]  __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index 87b281c4b..241877a02 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@  --- -__latest_kibana_version: "3_5" -__allowed_kibana_versions: ["3_5", "3_6"] +__latest_kibana_version: "3_6" +__allowed_kibana_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index 4234b74e2..e7b57f4b5 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@  --- -__latest_mux_version: "3_5" -__allowed_mux_versions: ["3_5", "3_6"] +__latest_mux_version: "3_6" +__allowed_mux_versions: ["3_5", "3_6", "3_7"] diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index fbf69c270..86fa57b50 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -17,7 +17,6 @@ From this role:  | Name                                             | Default value         |                                                                               |  |---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------| -| openshift_master_debug_level                      | openshift_debug_level | Verbosity of the debug logs for master                                        |  | openshift_node_ips                                | []                    | List of the openshift node ip addresses to pre-register when master starts up |  | oreg_url                                          | UNDEF                 | Default docker registry to use                                                |  | oreg_url_master                                   | UNDEF                 | Default docker registry to use, specifically on the master                    | @@ -29,18 +28,10 @@ From this role:  | openshift_master_public_console_url               | UNDEF                 |                                                                               |  | openshift_master_saconfig_limit_secret_references | false                 |                                                                               | -From openshift_common: - -| Name                          | Default Value  |                                        | -|-------------------------------|----------------|----------------------------------------| -| openshift_debug_level         | 2              | Global openshift debug log verbosity   | -| openshift_public_ip           | UNDEF          | Public IP address to use for this host | -| openshift_hostname            | UNDEF          | hostname to use for this instance      |  Dependencies  ------------ -openshift_common  Example Playbook  ---------------- diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index d70106276..71bb09a76 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -22,5 +22,24 @@ r_openshift_master_os_firewall_allow:  oreg_url: ''  oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" -oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"  oreg_auth_credentials_replace: False + + +# NOTE +# r_openshift_master_*_default may be defined external to this role. +# openshift_use_*, if defined, may affect other roles or play behavior. +r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}" +r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}" + +r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}" + +r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}" +r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}" + +r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}" + +r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}" diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index bd2383f61..b0237141b 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -29,4 +29,4 @@ dependencies:  - role: nickhammond.logrotate  - role: contiv    contiv_role: netmaster -  when: openshift.common.use_contiv | bool +  when: openshift_use_contiv | default(False) | bool diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index a06defdb9..121261e94 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -47,9 +47,9 @@    when:    - not openshift.common.is_containerized | bool -- name: Create openshift.common.data_dir +- name: Create r_openshift_master_data_dir    file: -    path: "{{ openshift.common.data_dir }}" +    path: "{{ r_openshift_master_data_dir }}"      state: directory      mode: 0755      owner: root @@ -169,7 +169,7 @@      register: l_already_set    - set_fact: -      openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" +      openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"  - name: Set fact of all etcd host IPs    openshift_facts: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index c480d8223..7a918c57e 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -10,6 +10,13 @@      openshift_master_config_dir: '/etc/origin/master'    when: openshift_master_config_dir is not defined +# This play may be consumed outside the role, we need to ensure that +# r_openshift_master_data_dir is set. +- name: Set r_openshift_master_data_dir if unset +  set_fact: +    r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" +  when: r_openshift_master_data_dir is not defined +  - name: Remove the legacy master service if it exists    include: clean_systemd_units.yml diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index e8f7c47b0..f06448d71 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,12 +12,12 @@ Requires={{ openshift.docker.service_name }}.service  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api  Environment=GOTRACEBACK=crash  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api  LimitNOFILE=131072  LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }}  SyslogIdentifier={{ openshift.common.service_type }}-master-api  Restart=always  RestartSec=5s diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 69db62f16..b7f36491b 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,12 +11,12 @@ PartOf={{ openshift.docker.service_name }}.service  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers  Environment=GOTRACEBACK=crash  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers  LimitNOFILE=131072  LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }}  SyslogIdentifier={{ openshift.common.service_type }}-master-controllers  Restart=always  RestartSec=5s diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index c14579435..d045b402b 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -106,7 +106,7 @@ etcdConfig:      clientCA: ca.crt  {% endif %}      keyFile: etcd.server.key -  storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd +  storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd  {% endif %}  etcdStorageConfig:    kubernetesStoragePrefix: kubernetes.io @@ -179,8 +179,8 @@ masterPublicURL: {{ openshift.master.public_api_url }}  networkConfig:    clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} -{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %} -  networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %} +  networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}  {% endif %}  # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet    serviceNetworkCIDR: {{ openshift.common.portal_net }} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index 0e78d2d23..02bfd6f62 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 @@ -13,7 +13,7 @@ Environment=GOTRACEBACK=crash  ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS  LimitNOFILE=131072  LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }}  SyslogIdentifier=atomic-openshift-master-api  Restart=always  RestartSec=5s diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index 94928f88c..e284413f7 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -17,7 +17,7 @@ Environment=GOTRACEBACK=crash  ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS  LimitNOFILE=131072  LimitCORE=infinity -WorkingDirectory={{ openshift.common.data_dir }} +WorkingDirectory={{ r_openshift_master_data_dir }}  SyslogIdentifier={{ openshift.common.service_type }}-master-controllers  Restart=always  RestartSec=5s diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index fb0b494da..32670b18e 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -17,22 +17,12 @@ From this role:  | Name                       | Default value         |                                                          |  |----------------------------|-----------------------|----------------------------------------------------------| -| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node                     |  | oreg_url                   | UNDEF (Optional)      | Default docker registry to use                           |  | oreg_url_node              | UNDEF (Optional)      | Default docker registry to use, specifically on the node | -From openshift_common: - -| Name                          |  Default Value      |                     | -|-------------------------------|---------------------|---------------------| -| openshift_debug_level         | 2                   | Global openshift debug log verbosity | -| openshift_public_ip           | UNDEF (Required)    | Public IP address to use for this host | -| openshift_hostname            | UNDEF (Required)    | hostname to use for this instance | -  Dependencies  ------------ -openshift_common  Example Playbook  ---------------- diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index a7dad5b1f..f1e64f3aa 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -69,10 +69,10 @@ r_openshift_node_os_firewall_allow:    port: 443/tcp  - service: OpenShift OVS sdn    port: 4789/udp -  cond: openshift.common.use_openshift_sdn | default(true) | bool +  cond: openshift_use_openshift_sdn | bool  - service: Calico BGP Port    port: 179/tcp -  cond: "{{ openshift.common.use_calico | bool }}" +  cond: "{{ openshift_node_use_calico }}"  - service: Kubernetes service NodePort TCP    port: "{{ openshift_node_port_range | default('') }}/tcp"    cond: "{{ openshift_node_port_range is defined }}" @@ -82,5 +82,27 @@ r_openshift_node_os_firewall_allow:  oreg_url: ''  oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" -oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"  oreg_auth_credentials_replace: False + + +# NOTE +# r_openshift_node_*_default may be defined external to this role. +# openshift_use_*, if defined, may affect other roles or play behavior. +openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}" +openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}" + +openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}" + +openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}" +openshift_node_use_calico: "{{ openshift_node_use_calico_default }}" + +openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}" +openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}" + +openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}" +openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}" + +openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 14ba48aba..855b0a8d8 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -3,7 +3,7 @@    systemd:      name: openvswitch      state: restarted -  when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool +  when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool    register: l_openshift_node_stop_openvswitch_result    until: not l_openshift_node_stop_openvswitch_result | failed    retries: 3 diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 3db980514..ce5ecb9d0 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -15,11 +15,9 @@ dependencies:  - role: openshift_node_facts  - role: lib_openshift  - role: lib_os_firewall -- role: openshift_common  - role: openshift_clock  - role: openshift_docker  - role: openshift_node_certificates    when: not openshift_node_bootstrap  - role: openshift_cloud_provider  - role: openshift_node_dnsmasq -  when: openshift.common.use_dnsmasq | bool diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index cb1440283..b83b2c452 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -42,14 +42,25 @@      path: /etc/origin/.config_managed    register: rpmgenerated_config -- name: Remove RPM generated config files if present -  file: -    path: "/etc/origin/{{ item }}" -    state: absent -  when: -  - rpmgenerated_config.stat.exists -  - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise'] -  with_items: -  - master -  - node -  - .config_managed +- when: rpmgenerated_config.stat.exists +  block: +  - name: Remove RPM generated config files if present +    file: +      path: "/etc/origin/{{ item }}" +      state: absent +    with_items: +    - master + +  # with_fileglob doesn't work correctly due to a few issues. +  # Could change this to fileglob when it gets fixed. +  - name: find all files in /etc/origin/node so we can remove them +    find: +      path: /etc/origin/node/ +    register: find_results + +  - name: Remove everything except the resolv.conf required for node +    file: +      path: "{{ item.path }}" +      state: absent +    when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path" +    with_items: "{{ find_results.files }}" diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 8210fd881..7af3f54b5 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -22,7 +22,7 @@      daemon_reload: yes    when:      - openshift.common.is_containerized | bool -    - openshift.common.use_openshift_sdn | default(true) | bool +    - openshift_node_use_openshift_sdn | default(true) | bool    register: ovs_start_result    until: not ovs_start_result | failed    retries: 3 diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 9bf4ed879..02b8ee67c 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -13,7 +13,7 @@        name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"        state: present      when: -    - openshift.common.use_openshift_sdn | default(true) | bool +    - openshift_node_use_openshift_sdn | bool    - name: Install conntrack-tools package      package: diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 60a25dcc6..22ff6dfd2 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -49,6 +49,13 @@      state: restarted    when: openshift_use_crio | default(false) +- name: restart NetworkManager to ensure resolv.conf is present +  systemd: +    name: NetworkManager +    enabled: yes +    state: restarted +  when: openshift_node_bootstrap | bool +  # The atomic-openshift-node service will set this parameter on  # startup, but if the network service is restarted this setting is  # lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388 @@ -121,4 +128,4 @@  ##### END Storage #####  - include: config/workaround-bz1331590-ovs-oom-fix.yml -  when: openshift.common.use_openshift_sdn | default(true) | bool +  when: openshift_node_use_openshift_sdn | default(true) | bool diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 4687400cd..6b4490f61 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -26,7 +26,7 @@    - name: Install OpenvSwitch system containers      include: openvswitch_system_container.yml      when: -    - openshift.common.use_openshift_sdn | default(true) | bool +    - openshift_node_use_openshift_sdn | bool      - openshift.common.is_openvswitch_system_container | bool  - block: @@ -39,7 +39,7 @@    - include: config/install-ovs-docker-service-file.yml    when:    - openshift.common.is_containerized | bool -  - openshift.common.use_openshift_sdn | default(true) | bool +  - openshift_node_use_openshift_sdn | bool    - not openshift.common.is_openvswitch_system_container | bool  - include: config/configure-node-settings.yml diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index f59aa6fb4..7049f7189 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -37,15 +37,15 @@ masterClientConnectionOverrides:    qps: 100  {% endif %}  masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig -{% if openshift.common.use_openshift_sdn | bool %} -networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift_node_use_openshift_sdn | bool %} +networkPluginName: {{ openshift_node_sdn_network_plugin_name }}  {% endif %}  # networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which  # deprecates networkPluginName above. The two should match.  networkConfig:     mtu: {{ openshift.node.sdn_mtu }} -{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %} -   networkPluginName: {{ openshift.common.sdn_network_plugin_name }} +{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %} +   networkPluginName: {{ openshift_node_sdn_network_plugin_name }}  {% endif %}  {% if openshift.node.set_node_ip | bool %}  nodeIP: {{ openshift.common.ip }} @@ -66,7 +66,7 @@ servingInfo:    - {{ cipher_suite }}  {% endfor %}  {% endif %} -volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes +volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes  proxyArguments:    proxy-mode:       - {{ openshift.node.proxy_mode }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 639b6f6c8..57094f28e 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service  After=openvswitch.service  PartOf={{ openshift.docker.service_name }}.service  Requires={{ openshift.docker.service_name }}.service -{% if openshift.common.use_openshift_sdn %} +{% if openshift_node_use_openshift_sdn %}  Wants=openvswitch.service  After=ovsdb-server.service  After=ovs-vswitchd.service @@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node  ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/  ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node  ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml index 84035b88c..d80ed1b72 100644 --- a/roles/openshift_node_dnsmasq/meta/main.yml +++ b/roles/openshift_node_dnsmasq/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info:    categories:    - cloud  dependencies: -- role: openshift_common  - role: openshift_node_facts diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index 4e6229bfb..5ad994df9 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -32,14 +32,12 @@ From openshift.common:  | Name                               |  Default Value      |                     |  |------------------------------------|---------------------|---------------------|  | openshift.common.config_base       |---------------------|---------------------| -| openshift.common.data_dir          |---------------------|---------------------|  | openshift.common.hostname          |---------------------|---------------------|  | openshift.common.http_proxy        |---------------------|---------------------|  | openshift.common.is_atomic         |---------------------|---------------------|  | openshift.common.is_containerized  |---------------------|---------------------|  | openshift.common.portal_net        |---------------------|---------------------|  | openshift.common.service_type      |---------------------|---------------------| -| openshift.common.use_openshift_sdn |---------------------|---------------------|  From openshift.master: @@ -58,7 +56,7 @@ From openshift.node:  Dependencies  ------------ -openshift_common +  TODO diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml index ed97d539c..3d8704308 100644 --- a/roles/openshift_node_upgrade/defaults/main.yml +++ b/roles/openshift_node_upgrade/defaults/main.yml @@ -1 +1,6 @@  --- +openshift_use_openshift_sdn: True +os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" + +openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml index d31b899cf..90d80855e 100644 --- a/roles/openshift_node_upgrade/handlers/main.yml +++ b/roles/openshift_node_upgrade/handlers/main.yml @@ -6,7 +6,7 @@    when:    - not skip_node_svc_handlers | default(False) | bool    - not (ovs_service_status_changed | default(false) | bool) -  - openshift.common.use_openshift_sdn | default(true) | bool +  - openshift_use_openshift_sdn | bool    register: l_openshift_node_upgrade_stop_openvswitch_result    until: not l_openshift_node_upgrade_stop_openvswitch_result | failed    retries: 3 diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml index 2a36d8945..a810b01dc 100644 --- a/roles/openshift_node_upgrade/meta/main.yml +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -11,4 +11,3 @@ galaxy_info:      - 7  dependencies:  - role: lib_utils -- role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index bc092c26c..e34319186 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -44,7 +44,7 @@    changed_when: "'Downloaded newer image' in pull_result.stdout"    when:    - openshift.common.is_containerized | bool -  - openshift.common.use_openshift_sdn | default(true) | bool +  - openshift_use_openshift_sdn | bool  - include: docker/upgrade.yml    vars: @@ -142,7 +142,7 @@    # End Disable Swap Block  - name: Reset selinux context -  command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes +  command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes    when:    - ansible_selinux is defined    - ansible_selinux.status == 'enabled' diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index 4e9550150..afff2f8ba 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -4,7 +4,7 @@  # - openshift_image_tag  # - openshift.common.is_containerized  # - openshift.node.ovs_image -# - openshift.common.use_openshift_sdn +# - openshift_use_openshift_sdn  # - openshift.common.service_type  # - openshift.node.debug_level  # - openshift.common.config_base @@ -28,10 +28,10 @@    when: openshift.common.is_containerized | bool  - include: config/workaround-bz1331590-ovs-oom-fix.yml -  when: openshift.common.use_openshift_sdn | default(true) | bool +  when: openshift_use_openshift_sdn | bool  - include: config/install-ovs-docker-service-file.yml -  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool +  when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool  - include: config/configure-node-settings.yml  - include: config/configure-proxy-settings.yml diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 639b6f6c8..451412ab0 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service  After=openvswitch.service  PartOf={{ openshift.docker.service_name }}.service  Requires={{ openshift.docker.service_name }}.service -{% if openshift.common.use_openshift_sdn %} +{% if openshift_use_openshift_sdn %}  Wants=openvswitch.service  After=ovsdb-server.service  After=ovs-vswitchd.service @@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node  ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/  ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node  ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md index 1489cb0bd..0407d6ef1 100644 --- a/roles/openshift_persistent_volumes/README.md +++ b/roles/openshift_persistent_volumes/README.md @@ -17,13 +17,6 @@ From this role:  | persistent_volume_claims | []            | List of persistent volume claim dictionaries, keys: name, capacity, access_modes    | -From openshift_common: - -| Name                          | Default Value  |                                        | -|-------------------------------|----------------|----------------------------------------| -| openshift_debug_level         | 2              | Global openshift debug log verbosity   | - -  Dependencies  ------------ diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 25e5a38dd..8d3d010e4 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -10,5 +10,4 @@ galaxy_info:      versions:      - 7  dependencies: -- role: openshift_common  - role: openshift_hosted_facts diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml index 01ee2544d..7c848cb12 100644 --- a/roles/openshift_service_catalog/defaults/main.yml +++ b/roles/openshift_service_catalog/defaults/main.yml @@ -1,3 +1,7 @@  ---  openshift_service_catalog_remove: false  openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"} + +openshift_use_openshift_sdn: True +# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}" +os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 64f94347b..746c73eaf 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -28,7 +28,7 @@  - name: Make kube-service-catalog project network global    command: >      oc adm pod-network make-projects-global kube-service-catalog -  when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant' +  when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'  - include: generate_certs.yml diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index c0ea00f34..204abe27e 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -5,6 +5,12 @@      is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"      is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" +- name: Install the base package for versioning +  package: +    name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +    state: present +  when: not is_containerized | bool +  # Block attempts to install origin without specifying some kind of version information.  # This is because the latest tags for origin are usually alpha builds, which should not  # be used by default. Users must indicate what they want.  | 
