diff options
57 files changed, 613 insertions, 330 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 103569fc1..63d27ff7b 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.190.0 ./ +3.7.0-0.192.0 ./ diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index e49dd5fa2..5de43270e 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -123,6 +123,15 @@ openshift_release=v3.7 # use this option if you are sure you know what you are doing! #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" +# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used +# unless you know what you are doing!! +# The following two variables are used when opneshift_use_crio is True +# and cleans up after builds that pass through docker. +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=false +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} + # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" diff --git a/meta/main.yml b/meta/main.yml new file mode 100644 index 000000000..7f867d73b --- /dev/null +++ b/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 009399803..4558b1d67 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.190.0%{?dist} +Release: 0.192.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -285,6 +285,32 @@ Atomic OpenShift Utilities includes %changelog +* Sat Nov 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.192.0 +- Bootstrap enhancements. (kwoodson@redhat.com) +- Fix master upgrade version detect and systemd enable (mgugino@redhat.com) +- Correct groupname during upgrade_control_plane play (mgugino@redhat.com) +- openshift_hosted: Add docker-gc (smilner@redhat.com) +- Remove old /etc/yum.repos.d/openshift_additional.repo file. + (abutcher@redhat.com) +- CFME: Use cluster_hostname if cluster_public_hostname isn't available + (tbielawa@redhat.com) +- Use client binary and well defined kubeconfig (sdodson@redhat.com) +- Ensure install and remove are mutually exclusive via + openshift_sanitize_inventory (sdodson@redhat.com) +- Enable SC, ASB, TSB by default (sdodson@redhat.com) +- Using the currently attached pvc for an ES dc if available, otherwise falling + back to current logic (ewolinet@redhat.com) +- Adding elb changes to provision elbs and add to scale group. + (kwoodson@redhat.com) +- Give admin and edit roles permission to patch ServiceInstances and + ServiceBindings (staebler@redhat.com) + +* Fri Nov 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.191.0 +- Adding CONFIG_FILE option back. (kwoodson@redhat.com) +- Configurable node config location. (kwoodson@redhat.com) +- Add enterprise prometheus image defaults (sdodson@redhat.com) +- Adding meta/main.yml to allow for Galaxy use of this repo (bedin@redhat.com) + * Thu Nov 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.190.0 - check presence of v2 snapshot before the migration proceeds (jchaloup@redhat.com) diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml index df77fe3bc..f5eb01b14 100644 --- a/playbooks/aws/openshift-cluster/prerequisites.yml +++ b/playbooks/aws/openshift-cluster/prerequisites.yml @@ -4,5 +4,3 @@ - include: provision_ssh_keypair.yml - include: provision_sec_group.yml - vars: - openshift_aws_node_group_type: compute diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml index 039357adb..7d74a691a 100644 --- a/playbooks/aws/openshift-cluster/provision_sec_group.yml +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -6,7 +6,7 @@ connection: local gather_facts: no tasks: - - name: create an instance and prepare for ami + - name: create security groups include_role: name: openshift_aws tasks_from: security_group.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 244787985..3b4d6f9a6 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -27,7 +27,7 @@ when: openshift_logging_install_logging | default(false) | bool - include: service_catalog.yml - when: openshift_enable_service_catalog | default(false) | bool + when: openshift_enable_service_catalog | default(true) | bool - include: ../openshift-management/config.yml when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml new file mode 100644 index 000000000..1e3dfee07 --- /dev/null +++ b/playbooks/common/openshift-cluster/install_docker_gc.yml @@ -0,0 +1,7 @@ +--- +- name: Install docker gc + hosts: oo_first_master + gather_facts: false + tasks: + - include_role: + name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index c1536eb36..281ccce2e 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -24,6 +24,11 @@ - include: openshift_prometheus.yml when: openshift_hosted_prometheus_deploy | default(False) | bool +- include: install_docker_gc.yml + when: + - openshift_use_crio | default(False) | bool + - openshift_crio_enable_docker_gc | default(False) | bool + - name: Hosted Install Checkpoint End hosts: oo_all_hosts gather_facts: false diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d5a8379d7..b905d6d86 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -129,7 +129,7 @@ # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode - hosts: oo_etcd_to_config + hosts: oo_masters_to_config gather_facts: no tasks: - name: Stop {{ openshift.common.service_type }}-master-controllers diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index b359919ba..f32073f09 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -212,6 +212,13 @@ tasks_from: master when: openshift_use_kuryr | default(false) | bool + - name: Setup the compute and infra node config maps + include_role: + name: openshift_node_bootstrap_configmap + tasks_from: standard.yml + when: openshift_master_bootstrap_enabled | default(false) | bool + run_once: True + post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index d8695bd3a..2ed156728 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: ansible_service_broker_install | default(false) | bool + when: ansible_service_broker_install | default(true) | bool - include: remove.yml when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 5371588cf..51f7d31c2 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -4,7 +4,6 @@ openshift_aws_create_iam_cert: True openshift_aws_create_security_groups: True openshift_aws_create_launch_config: True openshift_aws_create_scale_group: True -openshift_aws_node_group_type: master openshift_aws_wait_for_ssh: True @@ -12,12 +11,11 @@ openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" -openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' openshift_aws_iam_cert_key_path: '' -openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}" +openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift" openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" openshift_aws_ami: '' @@ -28,7 +26,7 @@ openshift_aws_ami_name: openshift-gi openshift_aws_base_ami_name: ami_base openshift_aws_launch_config_bootstrap_token: '' -openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}" +openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}" openshift_aws_users: [] @@ -48,12 +46,19 @@ openshift_aws_elb_health_check: unhealthy_threshold: 2 healthy_threshold: 2 -openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" +openshift_aws_elb_name_dict: + master: + external: "{{ openshift_aws_elb_basename }}-master-external" + internal: "{{ openshift_aws_elb_basename }}-master-internal" + infra: + external: "{{ openshift_aws_elb_basename }}-infra" + openshift_aws_elb_idle_timout: 400 openshift_aws_elb_scheme: internet-facing openshift_aws_elb_cert_arn: '' -openshift_aws_elb_listeners: +openshift_aws_elb_dict: master: external: - protocol: tcp @@ -75,6 +80,18 @@ openshift_aws_elb_listeners: load_balancer_port: 443 instance_protocol: tcp instance_port: 443 + infra: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True openshift_aws_node_group_config_master_volumes: - device_name: /dev/sdb @@ -88,17 +105,21 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True -openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] openshift_aws_node_group_replace_all_instances: False openshift_aws_node_group_config_extra_labels: {} -openshift_aws_node_group_config: - tags: "{{ openshift_aws_node_group_config_tags }}" +openshift_aws_ami_map: + master: "{{ openshift_aws_ami }}" + infra: "{{ openshift_aws_ami }}" + compute: "{{ openshift_aws_ami }}" + +openshift_aws_master_group_config: + # The 'master' key is always required here. master: instance_type: m4.xlarge - ami: "{{ openshift_aws_ami }}" volumes: "{{ openshift_aws_node_group_config_master_volumes }}" health_check: period: 60 @@ -114,9 +135,12 @@ openshift_aws_node_group_config: wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}" + +openshift_aws_node_group_config: + # The 'compute' key is always required here. compute: instance_type: m4.xlarge - ami: "{{ openshift_aws_ami }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" health_check: period: 60 @@ -131,9 +155,9 @@ openshift_aws_node_group_config: type: compute termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + # The 'infra' key is always required here. infra: instance_type: m4.xlarge - ami: "{{ openshift_aws_ami }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" health_check: period: 60 @@ -148,20 +172,30 @@ openshift_aws_node_group_config: type: infra termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}" + +openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" +openshift_aws_elb_az_load_balancing: False -openshift_aws_elb_security_groups: -- "{{ openshift_aws_clusterid }}" -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" +openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" -openshift_aws_elb_instance_filter: - "tag:clusterid": "{{ openshift_aws_clusterid }}" - "tag:host-type": "{{ openshift_aws_node_group_type }}" - instance-state-name: running +openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" openshift_aws_launch_config_security_groups: -- "{{ openshift_aws_clusterid }}" # default sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + compute: + - "{{ openshift_aws_clusterid }}" # default sg + - "{{ openshift_aws_clusterid }}_compute" # node type sg + - "{{ openshift_aws_clusterid }}_compute_k8s" # node type sg k8s + infra: + - "{{ openshift_aws_clusterid }}" # default sg + - "{{ openshift_aws_clusterid }}_infra" # node type sg + - "{{ openshift_aws_clusterid }}_infra_k8s" # node type sg k8s + master: + - "{{ openshift_aws_clusterid }}" # default sg + - "{{ openshift_aws_clusterid }}_master" # node type sg + - "{{ openshift_aws_clusterid }}_master_k8s" # node type sg k8s + +openshift_aws_security_groups_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_security_groups: default: @@ -231,3 +265,18 @@ openshift_aws_vpc: openshift_aws_node_run_bootstrap_startup: True openshift_aws_node_user_data: '' openshift_aws_node_config_namespace: openshift-node + +# If creating extra node groups, you'll need to define all of the following + +# The format is the same as openshift_aws_node_group_config, but the top-level +# key names should be different (ie, not == master or infra). +# openshift_aws_node_group_config_extra: {} + +# This variable should look like openshift_aws_launch_config_security_groups +# and contain a one-to-one mapping of top level keys that are defined in +# openshift_aws_node_group_config_extra. +# openshift_aws_launch_config_security_groups_extra: {} + +# openshift_aws_node_security_groups_extra: {} + +# openshift_aws_ami_map_extra: {} diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index 06e1f9602..a9893c0a7 100644 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py @@ -9,17 +9,17 @@ class FilterModule(object): ''' Custom ansible filters for use by openshift_aws role''' @staticmethod - def build_instance_tags(clusterid, status='owned'): + def build_instance_tags(clusterid): ''' This function will return a dictionary of the instance tags. The main desire to have this inside of a filter_plugin is that we need to build the following key. - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} ''' tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): status} + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} return tags diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 0dac1c23d..852adc7b5 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -1,4 +1,6 @@ --- +# This task file expects l_nodes_to_build to be passed in. + # When openshift_aws_use_custom_ami is '' then # we retrieve the latest build AMI. # Then set openshift_aws_ami to the ami. @@ -21,14 +23,12 @@ - "'results' in amiout" - amiout.results|length > 0 -- when: openshift_aws_create_security_groups - name: "Create {{ openshift_aws_node_group_type }} security groups" - include: security_group.yml +# Need to set epoch time in one place to use for launch_config and scale_group +- set_fact: + l_epoch_time: "{{ ansible_date_time.epoch }}" - when: openshift_aws_create_launch_config - name: "Create {{ openshift_aws_node_group_type }} launch config" include: launch_config.yml - when: openshift_aws_create_scale_group - name: "Create {{ openshift_aws_node_group_type }} node group" include: scale_group.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 7bc3184df..a543222d5 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -1,66 +1,24 @@ --- -- name: query vpc - ec2_vpc_net_facts: - region: "{{ openshift_aws_region }}" - filters: - 'tag:Name': "{{ openshift_aws_vpc_name }}" - register: vpcout - -- name: debug - debug: var=vpcout - -- name: fetch the remote instances - ec2_remote_facts: - region: "{{ openshift_aws_region }}" - filters: "{{ openshift_aws_elb_instance_filter }}" - register: instancesout - -- name: fetch the default subnet id - ec2_vpc_subnet_facts: - region: "{{ openshift_aws_region }}" - filters: - "tag:Name": "{{ openshift_aws_subnet_name }}" - vpc-id: "{{ vpcout.vpcs[0].id }}" - register: subnetout - -- name: +- name: "dump the elb listeners for {{ l_elb_dict_item.key }}" debug: - msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] - if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type - else openshift_aws_elb_listeners }}" + msg: "{{ l_elb_dict_item.value }}" -- name: "Create ELB {{ l_openshift_aws_elb_name }}" +- name: "Create ELB {{ l_elb_dict_item.key }}" ec2_elb_lb: - name: "{{ l_openshift_aws_elb_name }}" + name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}" state: present - security_group_names: "{{ openshift_aws_elb_security_groups }}" + cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" + security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" idle_timeout: "{{ openshift_aws_elb_idle_timout }}" region: "{{ openshift_aws_region }}" subnets: - "{{ subnetout.subnets[0].id }}" health_check: "{{ openshift_aws_elb_health_check }}" - listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] - if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type - else openshift_aws_elb_listeners }}" + listeners: "{{ item.value }}" scheme: "{{ openshift_aws_elb_scheme }}" - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_elb_tags }}" register: new_elb - -# It is necessary to ignore_errors here because the instances are not in 'ready' -# state when first added to ELB -- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" - ec2_elb: - instance_id: "{{ item.id }}" - ec2_elbs: "{{ l_openshift_aws_elb_name }}" - state: present - region: "{{ openshift_aws_region }}" - wait: False - with_items: "{{ instancesout.instances }}" - ignore_errors: True - retries: 10 - register: elb_call - until: elb_call|succeeded + with_dict: "{{ l_elb_dict_item.value }}" - debug: msg: "{{ item }}" diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 8b7b02a0e..0dbeba5a0 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -9,31 +9,7 @@ when: - openshift_deployment_type is undefined -- name: query vpc - ec2_vpc_net_facts: - region: "{{ openshift_aws_region }}" - filters: - 'tag:Name': "{{ openshift_aws_vpc_name }}" - register: vpcout - -- name: fetch the security groups for launch config - ec2_group_facts: - filters: - group-name: "{{ openshift_aws_launch_config_security_groups }}" - vpc-id: "{{ vpcout.vpcs[0].id }}" - region: "{{ openshift_aws_region }}" - register: ec2sgs - -# Create the scale group config -- name: Create the node scale group launch config - ec2_lc: - name: "{{ openshift_aws_launch_config_name }}" - region: "{{ openshift_aws_region }}" - image_id: "{{ openshift_aws_ami }}" - instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" - security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" - user_data: "{{ lookup('template', 'user_data.j2') }}" - key_name: "{{ openshift_aws_ssh_key_name }}" - ebs_optimized: False - volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" - assign_public_ip: True +- include: launch_config_create.yml + with_dict: "{{ l_nodes_to_build }}" + loop_control: + loop_var: launch_config_item diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml new file mode 100644 index 000000000..8265c2179 --- /dev/null +++ b/roles/openshift_aws/tasks/launch_config_create.yml @@ -0,0 +1,22 @@ +--- +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + region: "{{ openshift_aws_region }}" + register: ec2sgs + +# Create the scale group config +- name: Create the node scale group launch config + ec2_lc: + name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}-{{ l_epoch_time }}" + region: "{{ openshift_aws_region }}" + image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}" + instance_type: "{{ launch_config_item.value.instance_type }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" + user_data: "{{ lookup('template', 'user_data.j2') }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + ebs_optimized: False + volumes: "{{ launch_config_item.value.volumes }}" + assign_public_ip: True diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 737cfc7a6..530b0134d 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,20 +3,18 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_elb_name }}-external" - - "{{ openshift_aws_elb_name }}-internal" + - "{{ openshift_aws_elb_name_dict['master']['internal'] }}" delegate_to: localhost register: elbs - debug: var=elbs + run_once: true - name: set fact set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}" osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" + - "{{ elbs.elbs[0].dns_name }}" - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index a8518d43a..91538ed5c 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -7,35 +7,36 @@ name: create s3 bucket for registry include: s3.yml +- include: vpc_and_subnet_id.yml + +- name: create elbs + include: elb.yml + with_dict: "{{ openshift_aws_elb_dict }}" + vars: + l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" + l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" + loop_control: + loop_var: l_elb_dict_item + - name: include scale group creation for master include: build_node_group.yml + vars: + l_nodes_to_build: "{{ openshift_aws_master_group_config }}" + l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" + l_aws_ami_map: "{{ openshift_aws_ami_map }}" - name: fetch newly created instances ec2_remote_facts: region: "{{ openshift_aws_region }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid }}" - "tag:host-type": "{{ openshift_aws_node_group_type }}" + "tag:host-type": "master" instance-state-name: running register: instancesout retries: 20 delay: 3 until: instancesout.instances|length > 0 -- name: create our master internal load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: internal - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" - openshift_aws_elb_scheme: internal - -- name: create our master external load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: external - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" - openshift_aws_elb_scheme: internet-facing - - name: wait for ssh to become available wait_for: port: 22 diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 25ae6ce1c..3349acb7a 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -3,20 +3,7 @@ set_fact: openshift_node_bootstrap: True -- name: query vpc - ec2_vpc_net_facts: - region: "{{ openshift_aws_region }}" - filters: - 'tag:Name': "{{ openshift_aws_vpc_name }}" - register: vpcout - -- name: fetch the default subnet id - ec2_vpc_subnet_facts: - region: "{{ openshift_aws_region }}" - filters: - "tag:Name": "{{ openshift_aws_subnet_name }}" - vpc-id: "{{ vpcout.vpcs[0].id }}" - register: subnetout +- include: vpc_and_subnet_id.yml - name: create instance for ami creation ec2: diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml index fc4996c68..1b40f24d3 100644 --- a/roles/openshift_aws/tasks/provision_nodes.yml +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -25,19 +25,23 @@ set_fact: openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}" -- name: include build node group for infra +- include: vpc_and_subnet_id.yml + +- name: include build compute and infra node groups include: build_node_group.yml vars: - openshift_aws_node_group_type: infra - openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra" - openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}" + l_nodes_to_build: "{{ openshift_aws_node_group_config }}" + l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" + l_aws_ami_map: "{{ openshift_aws_ami_map }}" -- name: include build node group for compute +- name: include build node group for extra nodes include: build_node_group.yml + when: openshift_aws_node_group_config_extra is defined vars: - openshift_aws_node_group_type: compute - openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute" - openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}" + l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}" + l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}" + l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}" + - when: openshift_aws_wait_for_ssh | bool block: diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index eb31636e7..097859af2 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -1,11 +1,4 @@ --- -- name: query vpc - ec2_vpc_net_facts: - region: "{{ openshift_aws_region }}" - filters: - 'tag:Name': "{{ openshift_aws_vpc_name }}" - register: vpcout - - name: fetch the subnet to use in scale group ec2_vpc_subnet_facts: region: "{{ openshift_aws_region }}" @@ -16,19 +9,20 @@ - name: Create the scale group ec2_asg: - name: "{{ openshift_aws_scale_group_name }}" - launch_config_name: "{{ openshift_aws_launch_config_name }}" - health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}" - health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}" - min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}" - max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}" - desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}" + name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}" + launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}-{{ l_epoch_time }}" + health_check_period: "{{ item.value.health_check.period }}" + health_check_type: "{{ item.value.health_check.type }}" + min_size: "{{ item.value.min_size }}" + max_size: "{{ item.value.max_size }}" + desired_capacity: "{{ item.value.desired_size }}" region: "{{ openshift_aws_region }}" - termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" - load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" - wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}" + termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}" + load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}" + wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}" vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}" - replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}" + replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}" tags: - - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" + - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}" + with_dict: "{{ l_nodes_to_build }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 161e72fb4..5cc7ae537 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -6,40 +6,11 @@ "tag:Name": "{{ openshift_aws_clusterid }}" register: vpcout -- name: Create default security group for cluster - ec2_group: - name: "{{ openshift_aws_node_security_groups.default.name }}" - description: "{{ openshift_aws_node_security_groups.default.desc }}" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}" - register: sg_default_created - -- name: create the node group sgs - ec2_group: - name: "{{ item.name}}" - description: "{{ item.desc }}" - rules: "{{ item.rules if 'rules' in item else [] }}" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - register: sg_create - with_items: - - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" +- include: security_group_create.yml + vars: + l_security_groups: "{{ openshift_aws_node_security_groups }}" -- name: create the k8s sgs for the node group - ec2_group: - name: "{{ item.name }}_k8s" - description: "{{ item.desc }} for k8s" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - register: k8s_sg_create - with_items: - - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" - -- name: tag sg groups with proper tags - ec2_tag: - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" - resource: "{{ item.group_id }}" - region: "{{ openshift_aws_region }}" - with_items: "{{ k8s_sg_create.results }}" +- include: security_group_create.yml + when: openshift_aws_node_security_groups_extra is defined + vars: + l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}" diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml new file mode 100644 index 000000000..ef6060555 --- /dev/null +++ b/roles/openshift_aws/tasks/security_group_create.yml @@ -0,0 +1,25 @@ +--- +- name: create the node group sgs + ec2_group: + name: "{{ item.value.name}}" + description: "{{ item.value.desc }}" + rules: "{{ item.value.rules if 'rules' in item.value else [] }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + with_dict: "{{ l_security_groups }}" + +- name: create the k8s sgs for the node group + ec2_group: + name: "{{ item.value.name }}_k8s" + description: "{{ item.value.desc }} for k8s" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + with_dict: "{{ l_security_groups }}" + register: k8s_sg_create + +- name: tag sg groups with proper tags + ec2_tag: + tags: "{{ openshift_aws_security_groups_tags }}" + resource: "{{ item.group_id }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml new file mode 100644 index 000000000..aaf9b300f --- /dev/null +++ b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml @@ -0,0 +1,18 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: debug + debug: var=vpcout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 index 76aebdcea..a8c7f9a95 100644 --- a/roles/openshift_aws/templates/user_data.j2 +++ b/roles/openshift_aws/templates/user_data.j2 @@ -7,8 +7,8 @@ write_files: owner: 'root:root' permissions: '0640' content: | - openshift_group_type: {{ openshift_aws_node_group_type }} -{% if openshift_aws_node_group_type != 'master' %} + openshift_group_type: {{ launch_config_item.key }} +{% if launch_config_item.key != 'master' %} - path: /etc/origin/node/bootstrap.kubeconfig owner: 'root:root' permissions: '0640' @@ -19,7 +19,7 @@ runcmd: {% if openshift_aws_node_run_bootstrap_startup %} - [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] {% endif %} -{% if openshift_aws_node_group_type != 'master' %} +{% if launch_config_item.key != 'master' %} - [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] - [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] {% endif %} diff --git a/roles/openshift_docker_gc/defaults/main.yml b/roles/openshift_docker_gc/defaults/main.yml new file mode 100644 index 000000000..9d79de8a1 --- /dev/null +++ b/roles/openshift_docker_gc/defaults/main.yml @@ -0,0 +1,3 @@ +--- +r_enable_docker_gc: "{{ openshift_crio_enable_docker_gc | default(False) }}" +r_docker_gc_node_selectors: "{{ openshift_crio_docker_gc_node_selector | default({}) }}" diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml new file mode 100644 index 000000000..f88a7c533 --- /dev/null +++ b/roles/openshift_docker_gc/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: OpenShift + description: docker garbage collection + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: lib_openshift diff --git a/roles/openshift_docker_gc/tasks/main.yaml b/roles/openshift_docker_gc/tasks/main.yaml new file mode 100644 index 000000000..9ba551479 --- /dev/null +++ b/roles/openshift_docker_gc/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +- name: Create docker-gc tempdir + command: mktemp -d + register: templates_tmpdir + +# NOTE: oc_adm_policy_user does not support -z (yet) +- name: Add dockergc as priviledged + shell: oc adm policy add-scc-to-user -z dockergc privileged +# oc_adm_policy_user: +# user: dockergc +# resource_kind: scc +# resource_name: privileged +# state: present + +- name: Create dockergc DaemonSet + become: yes + template: + src: dockergc-ds.yaml.j2 + dest: "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" + +- name: Apply dockergc DaemonSet + oc_obj: + state: present + kind: DaemonSet + name: "dockergc" + files: + - "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 new file mode 100644 index 000000000..53e8b448b --- /dev/null +++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: dockergc + # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged + # in order for the dockergc to access the docker socket and root directory +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: dockergc + labels: + app: dockergc + spec: + template: + metadata: + labels: + app: dockergc + name: dockergc + spec: +{# Only set nodeSelector if the dict is not empty #} +{% if r_docker_gc_node_selectors %} + nodeSelector: +{% for k,v in r_docker_gc_node_selectors.items() %} + {{ k }}: {{ v }}{% endfor %}{% endif %} + + serviceAccountName: dockergc + containers: + - image: openshift/origin:latest + args: + - "ex" + - "dockergc" + - "--image-gc-low-threshold=60" + - "--image-gc-high-threshold=80" + - "--minimum-ttl-duration=1h0m0s" + securityContext: + privileged: true + name: dockergc + resources: + requests: + memory: 30Mi + cpu: 50m + volumeMounts: + - name: docker-root + readOnly: true + mountPath: /var/lib/docker + - name: docker-socket + readOnly: false + mountPath: /var/run/docker.sock + volumes: + - name: docker-root + hostPath: + path: /var/lib/docker + - name: docker-socket + hostPath: + path: /var/run/docker.sock diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 33028fea4..a88945538 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1289,7 +1289,7 @@ def get_container_openshift_version(facts): If containerized, see if we can determine the installed version via the systemd environment files. """ - for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']: + for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']: env_path = filename % facts['common']['service_type'] if not os.path.exists(env_path): continue diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index b98e281a3..cec295d65 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -78,7 +78,7 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" @@ -136,7 +136,7 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml index 383e6edb5..50a5252cc 100644 --- a/roles/openshift_management/tasks/add_container_provider.yml +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -10,6 +10,18 @@ - name: Ensure OpenShift facts are loaded openshift_facts: +- name: Ensure we use openshift_master_cluster_public_hostname if it is available + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_public_hostname }}" + when: + - openshift.master.cluster_public_hostname is defined + +- name: Ensure we default to the first master if openshift_master_cluster_public_hostname is unavailable + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_hostname }}" + when: + - l_cluster_hostname is not defined + - name: Ensure the management SA Secrets are read oc_serviceaccount_secret: state: list @@ -59,7 +71,7 @@ connection_configurations: - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} - hostname: "{{ openshift.master.cluster_public_hostname }}" + hostname: "{{ l_cluster_hostname }}" name: "{{ openshift_management_project }}" port: "{{ openshift.master.api_port }}" type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index fe78dea66..4acac7923 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -26,7 +26,6 @@ default_r_openshift_master_os_firewall_allow: cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}" - # oreg_url is defined by user input oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" @@ -60,7 +59,7 @@ r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_p openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}" -openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}" +openshift_master_config_dir_default: "{{ openshift.common.config_base ~ '/master' if openshift is defined and 'common' in openshift else '/etc/origin/master' }}" openshift_master_config_dir: "{{ openshift_master_config_dir_default }}" openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" @@ -71,8 +70,6 @@ openshift_master_node_config_kubeletargs_mem: 512M openshift_master_bootstrap_enabled: False -openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}" - openshift_master_config_imageconfig_format: "{{ openshift.node.registry_url }}" # these are for the default settings in a generated node-config.yaml @@ -144,3 +141,5 @@ openshift_master_node_configs: - "{{ openshift_master_node_config_compute }}" openshift_master_bootstrap_namespace: openshift-node +openshift_master_csr_sa: node-bootstrapper +openshift_master_csr_namespace: openshift-infra diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml index f837a8bae..ce55e7d0c 100644 --- a/roles/openshift_master/tasks/bootstrap.yml +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -2,7 +2,8 @@ # TODO: create a module for this command. # oc_serviceaccounts_kubeconfig - name: create service account kubeconfig with csr rights - command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra" + command: > + oc serviceaccounts create-kubeconfig {{ openshift_master_csr_sa }} -n {{ openshift_master_csr_namespace }} register: kubeconfig_out until: kubeconfig_out.rc == 0 retries: 24 @@ -12,67 +13,3 @@ copy: content: "{{ kubeconfig_out.stdout }}" dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig" - -- name: create a temp dir for this work - command: mktemp -d /tmp/openshift_node_config-XXXXXX - register: mktempout - run_once: true - -# This generate is so that we do not have to maintain -# our own copy of the template. This is generated by -# the product and the following settings will be -# generated by the master -- name: generate a node-config dynamically - command: > - {{ openshift_master_client_binary }} adm create-node-config - --node-dir={{ mktempout.stdout }}/ - --node=CONFIGMAP - --hostnames=test - --dns-ip=0.0.0.0 - --certificate-authority={{ openshift_master_config_dir }}/ca.crt - --signer-cert={{ openshift_master_config_dir }}/ca.crt - --signer-key={{ openshift_master_config_dir }}/ca.key - --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt - --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt - register: configgen - run_once: true - -- name: remove the default settings - yedit: - state: "{{ item.state | default('present') }}" - src: "{{ mktempout.stdout }}/node-config.yaml" - key: "{{ item.key }}" - value: "{{ item.value | default(omit) }}" - with_items: "{{ openshift_master_node_config_default_edits }}" - run_once: true - -- name: copy the generated config into each group - copy: - src: "{{ mktempout.stdout }}/node-config.yaml" - remote_src: true - dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" - with_items: "{{ openshift_master_node_configs }}" - run_once: true - -- name: "specialize the generated configs for node-config-{{ item.type }}" - yedit: - src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" - edits: "{{ item.edits }}" - with_items: "{{ openshift_master_node_configs }}" - run_once: true - -- name: create node-config.yaml configmap - oc_configmap: - name: "node-config-{{ item.type }}" - namespace: "{{ openshift_master_bootstrap_namespace }}" - from_file: - node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" - with_items: "{{ openshift_master_node_configs }}" - run_once: true - -- name: remove templated files - file: - dest: "{{ mktempout.stdout }}/" - state: absent - with_items: "{{ openshift_master_node_configs }}" - run_once: true diff --git a/roles/openshift_master/tasks/bootstrap_settings.yml b/roles/openshift_master/tasks/bootstrap_settings.yml new file mode 100644 index 000000000..cbd7f587b --- /dev/null +++ b/roles/openshift_master/tasks/bootstrap_settings.yml @@ -0,0 +1,14 @@ +--- +- name: modify controller args + yedit: + src: /etc/origin/master/master-config.yaml + edits: + - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file + value: + - /etc/origin/master/ca.crt + - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file + value: + - /etc/origin/master/ca.key + notify: + - restart master controllers + when: openshift_master_bootstrap_enabled | default(False) diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 48b34c578..c7c02d49b 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -218,18 +218,7 @@ - restart master api - restart master controllers -- name: modify controller args - yedit: - src: /etc/origin/master/master-config.yaml - edits: - - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file - value: - - /etc/origin/master/ca.crt - - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file - value: - - /etc/origin/master/ca.key - notify: - - restart master controllers +- include: bootstrap_settings.yml when: openshift_master_bootstrap_enabled | default(False) - include: set_loopback_context.yml diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 5751723ab..8420dfb8c 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -58,6 +58,17 @@ - l_create_ha_unit_files | changed # end workaround for missing systemd unit files +- name: enable master services + systemd: + name: "{{ openshift.common.service_type }}-master-{{ item }}" + enabled: yes + with_items: + - api + - controllers + when: + - openshift.master.cluster_method == "native" + - not openshift.common.is_master_system_container | bool + - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: l_master_api_proxy diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 37f48e724..0c6d8db38 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -110,5 +110,8 @@ openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}" openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" +openshift_node_config_dir_default: "/etc/origin/node" +openshift_node_config_dir: "{{ openshift_node_config_dir_default }}" + openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}" diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index cf22181a8..8cf41ab4c 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -25,11 +25,11 @@ state: "{{ item.state | default('present') }}" with_items: # add the kubeconfig - - line: "KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig" + - line: "KUBECONFIG={{ openshift_node_config_dir }}/bootstrap.kubeconfig" regexp: "^KUBECONFIG=.*" # remove the config file. This comes from openshift_facts - - regexp: "^CONFIG_FILE=.*" - state: absent + - line: "CONFIG_FILE={{ openshift_node_config_dir }}/node-config.yaml" + regexp: "^CONFIG_FILE=.*" - name: include aws sysconfig credentials include: aws.yml @@ -76,7 +76,7 @@ state: link force: yes with_items: - - /var/lib/origin/openshift.local.config/node/node-client-ca.crt + - "{{ openshift_node_config_dir }}/node-client-ca.crt" - when: rpmgenerated_config.stat.exists block: diff --git a/roles/openshift_node_bootstrap_configmap/defaults/main.yml b/roles/openshift_node_bootstrap_configmap/defaults/main.yml new file mode 100644 index 000000000..02c872646 --- /dev/null +++ b/roles/openshift_node_bootstrap_configmap/defaults/main.yml @@ -0,0 +1,15 @@ +--- +openshift_node_bootstrap_configmap_custom_labels: [] +openshift_node_bootstrap_configmap_edits: [] +openshift_node_bootstrap_configmap_name: node-config-compute +openshift_node_bootstrap_configmap_namespace: openshift-node +openshift_node_bootstrap_configmap_default_labels: +- type=compute + +openshift_imageconfig_format: "{{ openshift.node.registry_url if openshift is defined and 'node' in openshift else oreg_url }}" +openshift_node_bootstrap_configmap_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" +openshift_node_bootstrap_configmap_network_plugin_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}" +openshift_node_bootstrap_configmap_network_plugin: "{{ openshift_node_bootstrap_configmap_network_plugin_default }}" +openshift_node_bootstrap_configmap_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" +openshift_node_bootstrap_configmap_node_data_dir: "{{ openshift_node_bootstrap_configmap_node_data_dir_default }}" +openshift_node_bootstrap_configmap_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) }}" diff --git a/roles/openshift_node_bootstrap_configmap/meta/main.yml b/roles/openshift_node_bootstrap_configmap/meta/main.yml new file mode 100644 index 000000000..14c1dd498 --- /dev/null +++ b/roles/openshift_node_bootstrap_configmap/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: +- role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_node_bootstrap_configmap/tasks/create_config.yml b/roles/openshift_node_bootstrap_configmap/tasks/create_config.yml new file mode 100644 index 000000000..05080daa4 --- /dev/null +++ b/roles/openshift_node_bootstrap_configmap/tasks/create_config.yml @@ -0,0 +1,32 @@ +--- +- name: create a temp dir for this work + command: mktemp -d /tmp/openshift_node_config-XXXXXX + register: mktempout + run_once: true + +- name: create node config template + template: + src: node-config.yaml.j2 + dest: "{{ mktempout.stdout }}/node-config.yaml" + +- name: "specialize the generated configs for {{ openshift_node_bootstrap_configmap_name }}" + yedit: + content: + src: "{{ mktempout.stdout }}/node-config.yaml" + edits: "{{ openshift_node_bootstrap_configmap_edits }}" + when: openshift_node_bootstrap_configmap_edits|length > 0 + run_once: true + +- name: create node-config.yaml configmap + oc_configmap: + name: "{{ openshift_node_bootstrap_configmap_name }}" + namespace: "{{ openshift_node_bootstrap_configmap_namespace }}" + from_file: + node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml" + run_once: true + +- name: remove templated files + file: + dest: "{{ mktempout.stdout }}/" + state: absent + run_once: true diff --git a/roles/openshift_node_bootstrap_configmap/tasks/standard.yml b/roles/openshift_node_bootstrap_configmap/tasks/standard.yml new file mode 100644 index 000000000..637d7c7fc --- /dev/null +++ b/roles/openshift_node_bootstrap_configmap/tasks/standard.yml @@ -0,0 +1,12 @@ +--- +- name: Build an infra node configmap + include: create_config.yml + vars: + openshift_node_bootstrap_configmap_name: node-config-infra + static: true + +- name: Build an infra node configmap + include: create_config.yml + vars: + openshift_node_bootstrap_configmap_name: node-config-compute + static: true diff --git a/roles/openshift_node_bootstrap_configmap/templates/node-config.yaml.j2 b/roles/openshift_node_bootstrap_configmap/templates/node-config.yaml.j2 new file mode 100644 index 000000000..d533b88fa --- /dev/null +++ b/roles/openshift_node_bootstrap_configmap/templates/node-config.yaml.j2 @@ -0,0 +1,53 @@ +allowDisabledDocker: false +apiVersion: v1 +authConfig: + authenticationCacheSize: 1000 + authenticationCacheTTL: 5m + authorizationCacheSize: 1000 + authorizationCacheTTL: 5m +dnsBindAddress: "127.0.0.1:53" +dnsDomain: cluster.local +dnsIP: 0.0.0.0 +dnsNameservers: null +dnsRecursiveResolvConf: /etc/origin/node/resolv.conf +dockerConfig: + dockerShimRootDirectory: /var/lib/dockershim + dockerShimSocket: /var/run/dockershim.sock + execHandlerName: native +enableUnidling: true +imageConfig: + format: "{{ openshift_imageconfig_format }}" + latest: false +iptablesSyncPeriod: 30s +kind: NodeConfig +kubeletArguments: + cloud-config: + - /etc/origin/cloudprovider/{{ openshift_node_bootstrap_configmap_cloud_provider }}.conf + cloud-provider: + - {{ openshift_node_bootstrap_configmap_cloud_provider }} + node-labels: {{ openshift_node_bootstrap_configmap_default_labels | union(openshift_node_bootstrap_configmap_custom_labels) | list | to_json }} +masterClientConnectionOverrides: + acceptContentTypes: application/vnd.kubernetes.protobuf,application/json + burst: 40 + contentType: application/vnd.kubernetes.protobuf + qps: 20 +masterKubeConfig: node.kubeconfig +networkConfig: + mtu: "{{ openshift_node_bootstrap_configmap_network_mtu }}" + networkPluginName: {{ openshift_node_bootstrap_configmap_network_plugin }} +nodeIP: "" +podManifestConfig: null +servingInfo: + bindAddress: 0.0.0.0:10250 + bindNetwork: tcp4 + certFile: server.crt + clientCA: node-client-ca.crt + keyFile: server.key + namedCertificates: null +volumeConfig: + localQuota: + perFSGroup: null +volumeDirectory: {{ openshift_node_bootstrap_configmap_node_data_dir }}/openshift.local.volumes +enable-controller-attach-detach: +- 'true' +networkPluginName: {{ openshift_node_bootstrap_configmap_network_plugin }} diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 00995eee6..d217b90fb 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -6,16 +6,6 @@ openshift_prometheus_namespace: prometheus openshift_prometheus_node_selector: {"region":"infra"} -# image defaults -openshift_prometheus_image_prefix: "openshift/" -openshift_prometheus_image_version: "v2.0.0-dev.3" -openshift_prometheus_proxy_image_prefix: "openshift/" -openshift_prometheus_proxy_image_version: "v1.0.0" -openshift_prometheus_alertmanager_image_prefix: "openshift/" -openshift_prometheus_alertmanager_image_version: "v0.9.1" -openshift_prometheus_alertbuffer_image_prefix: "openshift/" -openshift_prometheus_alertbuffer_image_version: "v0.0.2" - # additional prometheus rules file openshift_prometheus_additional_rules_file: null diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 523a64334..332104d77 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,4 +1,20 @@ --- +- name: Set default image variables based on deployment_type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set image facts + set_fact: + openshift_prometheus_image_prefix: "{{ openshift_prometheus_image_prefix | default(__openshift_prometheus_image_prefix) }}" + openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default(__openshift_prometheus_image_version) }}" + openshift_prometheus_proxy_image_prefix: "{{ openshift_prometheus_proxy_image_prefix | default(openshift_prometheus_image_prefix) }}" + openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default(__openshift_prometheus_proxy_image_version) }}" + openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_altermanager_image_prefix | default(openshift_prometheus_image_prefix) }}" + openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default(__openshift_prometheus_alertmanager_image_version) }}" + openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(openshift_prometheus_image_prefix) }}" + openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default(__openshift_prometheus_alertbuffer_image_version) }}" - name: Create temp directory for doing work in on target command: mktemp -td openshift-prometheus-ansible-XXXXXX diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml new file mode 100644 index 000000000..4092eb2d4 --- /dev/null +++ b/roles/openshift_prometheus/vars/default_images.yml @@ -0,0 +1,7 @@ +--- +# image defaults +__openshift_prometheus_image_prefix: "openshift/" +__openshift_prometheus_image_version: "v2.0.0-dev.3" +__openshift_prometheus_proxy_image_version: "v1.0.0" +__openshift_prometheus_alertmanager_image_version: "v0.9.1" +__openshift_prometheus_alertbuffer_image_version: "v0.0.2" diff --git a/roles/openshift_prometheus/vars/openshift-enterprise.yml b/roles/openshift_prometheus/vars/openshift-enterprise.yml new file mode 100644 index 000000000..0b45e03d3 --- /dev/null +++ b/roles/openshift_prometheus/vars/openshift-enterprise.yml @@ -0,0 +1,7 @@ +--- +# image defaults +__openshift_prometheus_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_prometheus_image_version: "v3.7" +__openshift_prometheus_proxy_image_version: "v3.7" +__openshift_prometheus_alertmanager_image_version: "v3.7" +__openshift_prometheus_alertbuffer_image_version: "v3.7" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index d41245093..95ba9fe4c 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,6 +10,11 @@ - name: Ensure libselinux-python is installed package: name=libselinux-python state=present + - name: Remove openshift_additional.repo file + file: + dest: /etc/yum.repos.d/openshift_additional.repo + state: absent + - name: Create any additional repos that are defined yum_repository: description: "{{ item.description | default(item.name | default(item.id)) }}" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index a6c168bc7..70b236033 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -69,3 +69,21 @@ - openshift_clusterid is not defined - openshift_cloudprovider_kind is defined - openshift_cloudprovider_kind == 'aws' + +- name: Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive + fail: + msg: > + Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive, + do not set both to true. ansible_service_broker_install defaults to true. + when: + - ansible_service_broker_remove | default(false) | bool + - ansible_service_broker_install | default(true) | bool + +- name: Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive + fail: + msg: > + Ensure that template_service_broker_remove and template_service_broker_install are mutually exclusive, + do not set both to true. template_service_broker_remove defaults to true. + when: + - template_service_broker_remove | default(false) | bool + - template_service_broker_install | default(true) | bool diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index 9d55185c8..cd7bda2c6 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -60,7 +60,7 @@ register: apiserver_ca - shell: > - oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index d17468b5c..3507330e3 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - oc adm pod-network make-projects-global kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml @@ -88,14 +88,14 @@ vars: original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_obj: name: admin @@ -111,14 +111,14 @@ vars: original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_obj: name: view @@ -139,7 +139,7 @@ # only do this if we don't already have the updated role info - name: update view role for service catalog access command: > - oc replace -f {{ mktemp.stdout }}/view_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml when: - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index ca9844e79..a832e1f85 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 index 4629d5bb3..59cceafcf 100644 --- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 +++ b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 @@ -12,6 +12,7 @@ - get - list - watch + - patch - apiGroups: - "settings.k8s.io" attributeRestrictions: null diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml index d7ca970c7..da8aa291b 100644 --- a/roles/template_service_broker/tasks/main.yml +++ b/roles/template_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: template_service_broker_install | default(false) | bool + when: template_service_broker_install | default(true) | bool - include: remove.yml when: template_service_broker_remove | default(false) | bool |