diff options
39 files changed, 338 insertions, 109 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 3fd8b4d26..3bd2751f4 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.189.0 ./ +3.7.0-0.191.0 ./ diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index e49dd5fa2..5de43270e 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -123,6 +123,15 @@ openshift_release=v3.7 # use this option if you are sure you know what you are doing! #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" +# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used +# unless you know what you are doing!! +# The following two variables are used when opneshift_use_crio is True +# and cleans up after builds that pass through docker. +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=false +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} + # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" diff --git a/meta/main.yml b/meta/main.yml new file mode 100644 index 000000000..7f867d73b --- /dev/null +++ b/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 57db94c1f..5376b83fc 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.189.0%{?dist} +Release: 0.191.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -285,6 +285,33 @@ Atomic OpenShift Utilities includes %changelog +* Fri Nov 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.191.0 +- Adding CONFIG_FILE option back. (kwoodson@redhat.com) +- Configurable node config location. (kwoodson@redhat.com) +- Add enterprise prometheus image defaults (sdodson@redhat.com) +- Adding meta/main.yml to allow for Galaxy use of this repo (bedin@redhat.com) + +* Thu Nov 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.190.0 +- check presence of v2 snapshot before the migration proceeds + (jchaloup@redhat.com) +- Remove delegate_to from openshift_facts within the openshift_ca role. + (abutcher@redhat.com) +- Don't use possibly undefined variables in error messages + (tbielawa@redhat.com) +- MTU for bootstrapping should default to openshift_node_sdn_mtu + (ccoleman@redhat.com) +- Retry service account bootstrap kubeconfig creation (ccoleman@redhat.com) +- Docker: make use of new etc/containers/registries.conf optional + (mgugino@redhat.com) +- Add rules to the view ClusterRole for service catalog. (staebler@redhat.com) +- Updating console OPENSHIFT_CONSTANTS flag for TSB (ewolinet@redhat.com) +- GlusterFS: Fix registry storage documentation (jarrpa@redhat.com) +- fix comment and make it visible to end-user (azagayno@redhat.com) +- escape also custom_cors_origins (azagayno@redhat.com) +- add comment on regexp specifics (azagayno@redhat.com) +- escape corsAllowedOrigins regexp strings and anchor them + (azagayno@redhat.com) + * Wed Nov 01 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.189.0 - Stating that certificate it is required when doing SSL on ELB. (kwoodson@redhat.com) diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 244787985..3b4d6f9a6 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -27,7 +27,7 @@ when: openshift_logging_install_logging | default(false) | bool - include: service_catalog.yml - when: openshift_enable_service_catalog | default(false) | bool + when: openshift_enable_service_catalog | default(true) | bool - include: ../openshift-management/config.yml when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml new file mode 100644 index 000000000..1e3dfee07 --- /dev/null +++ b/playbooks/common/openshift-cluster/install_docker_gc.yml @@ -0,0 +1,7 @@ +--- +- name: Install docker gc + hosts: oo_first_master + gather_facts: false + tasks: + - include_role: + name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index c1536eb36..281ccce2e 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -24,6 +24,11 @@ - include: openshift_prometheus.yml when: openshift_hosted_prometheus_deploy | default(False) | bool +- include: install_docker_gc.yml + when: + - openshift_use_crio | default(False) | bool + - openshift_crio_enable_docker_gc | default(False) | bool + - name: Hosted Install Checkpoint End hosts: oo_all_hosts gather_facts: false diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d5a8379d7..b905d6d86 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -129,7 +129,7 @@ # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode - hosts: oo_etcd_to_config + hosts: oo_masters_to_config gather_facts: no tasks: - name: Stop {{ openshift.common.service_type }}-master-controllers diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index d8695bd3a..2ed156728 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: ansible_service_broker_install | default(false) | bool + when: ansible_service_broker_install | default(true) | bool - include: remove.yml when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml index 0804d9e1c..5c45e5ae1 100644 --- a/roles/etcd/tasks/migration/check.yml +++ b/roles/etcd/tasks/migration/check.yml @@ -3,6 +3,17 @@ # Check the cluster is healthy - include: check_cluster_health.yml +# Check if there is at least one v2 snapshot +- name: Check if there is at least one v2 snapshot + find: + paths: "{{ etcd_data_dir }}/member/snap" + patterns: '*.snap' + register: snapshots_result + +- fail: + msg: "Before the migration can proceed the etcd member must write down at least one snapshot under {{ etcd_data_dir }}/member/snap directory." + when: snapshots_result.matched | int == 0 + # Check if the member has v3 data already # Run the migration only if the data are v2 - name: Check if there are any v3 data diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 5371588cf..9f3c14bad 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -12,7 +12,6 @@ openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" -openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' @@ -48,7 +47,14 @@ openshift_aws_elb_health_check: unhealthy_threshold: 2 healthy_threshold: 2 -openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_name_dict: + master: + external: "{{ openshift_aws_elb_basename }}-external" + internal: "{{ openshift_aws_elb_basename }}-internal" + infra: + external: "{{ openshift_aws_elb_basename }}" + openshift_aws_elb_idle_timout: 400 openshift_aws_elb_scheme: internet-facing openshift_aws_elb_cert_arn: '' @@ -75,6 +81,18 @@ openshift_aws_elb_listeners: load_balancer_port: 443 instance_protocol: tcp instance_port: 443 + infra: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True openshift_aws_node_group_config_master_volumes: - device_name: /dev/sdb @@ -88,7 +106,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True -openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] openshift_aws_node_group_replace_all_instances: False @@ -114,6 +132,7 @@ openshift_aws_node_group_config: wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}" compute: instance_type: m4.xlarge ami: "{{ openshift_aws_ami }}" @@ -148,21 +167,22 @@ openshift_aws_node_group_config: type: infra termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}" + +openshift_aws_elb_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" +openshift_aws_elb_az_load_balancing: False openshift_aws_elb_security_groups: -- "{{ openshift_aws_clusterid }}" -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s openshift_aws_elb_instance_filter: "tag:clusterid": "{{ openshift_aws_clusterid }}" "tag:host-type": "{{ openshift_aws_node_group_type }}" instance-state-name: running -openshift_aws_launch_config_security_groups: -- "{{ openshift_aws_clusterid }}" # default sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s - +openshift_aws_security_groups_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index 06e1f9602..a9893c0a7 100644 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py @@ -9,17 +9,17 @@ class FilterModule(object): ''' Custom ansible filters for use by openshift_aws role''' @staticmethod - def build_instance_tags(clusterid, status='owned'): + def build_instance_tags(clusterid): ''' This function will return a dictionary of the instance tags. The main desire to have this inside of a filter_plugin is that we need to build the following key. - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} ''' tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): status} + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} return tags diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 0dac1c23d..0aac40ddd 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -21,10 +21,6 @@ - "'results' in amiout" - amiout.results|length > 0 -- when: openshift_aws_create_security_groups - name: "Create {{ openshift_aws_node_group_type }} security groups" - include: security_group.yml - - when: openshift_aws_create_launch_config name: "Create {{ openshift_aws_node_group_type }} launch config" include: launch_config.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 7bc3184df..56abe9dd7 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -9,12 +9,6 @@ - name: debug debug: var=vpcout -- name: fetch the remote instances - ec2_remote_facts: - region: "{{ openshift_aws_region }}" - filters: "{{ openshift_aws_elb_instance_filter }}" - register: instancesout - - name: fetch the default subnet id ec2_vpc_subnet_facts: region: "{{ openshift_aws_region }}" @@ -23,7 +17,7 @@ vpc-id: "{{ vpcout.vpcs[0].id }}" register: subnetout -- name: +- name: dump the elb listeners debug: msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type @@ -33,6 +27,7 @@ ec2_elb_lb: name: "{{ l_openshift_aws_elb_name }}" state: present + cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" security_group_names: "{{ openshift_aws_elb_security_groups }}" idle_timeout: "{{ openshift_aws_elb_idle_timout }}" region: "{{ openshift_aws_region }}" @@ -43,25 +38,9 @@ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type else openshift_aws_elb_listeners }}" scheme: "{{ openshift_aws_elb_scheme }}" - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_elb_tags }}" register: new_elb -# It is necessary to ignore_errors here because the instances are not in 'ready' -# state when first added to ELB -- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" - ec2_elb: - instance_id: "{{ item.id }}" - ec2_elbs: "{{ l_openshift_aws_elb_name }}" - state: present - region: "{{ openshift_aws_region }}" - wait: False - with_items: "{{ instancesout.instances }}" - ignore_errors: True - retries: 10 - register: elb_call - until: elb_call|succeeded - - debug: msg: "{{ item }}" with_items: diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 8b7b02a0e..94aca5a35 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -19,7 +19,7 @@ - name: fetch the security groups for launch config ec2_group_facts: filters: - group-name: "{{ openshift_aws_launch_config_security_groups }}" + group-name: "{{ openshift_aws_elb_security_groups }}" vpc-id: "{{ vpcout.vpcs[0].id }}" region: "{{ openshift_aws_region }}" register: ec2sgs diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 737cfc7a6..1c99229ff 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,20 +3,18 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_elb_name }}-external" - - "{{ openshift_aws_elb_name }}-internal" + - "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}" delegate_to: localhost register: elbs - debug: var=elbs + run_once: true - name: set fact set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}" osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" + - "{{ elbs.elbs[0].dns_name }}" - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index a8518d43a..e99017b9f 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -7,6 +7,38 @@ name: create s3 bucket for registry include: s3.yml +- when: openshift_aws_create_security_groups + block: + - name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + + - name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + vars: + openshift_aws_node_group_type: infra + +- name: create our master internal load balancer + include: elb.yml + vars: + openshift_aws_elb_direction: internal + openshift_aws_elb_scheme: internal + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}" + +- name: create our master external load balancer + include: elb.yml + vars: + openshift_aws_elb_direction: external + openshift_aws_elb_scheme: internet-facing + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['external'] }}" + +- name: create our infra node external load balancer + include: elb.yml + vars: + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict['infra']['external'] }}" + openshift_aws_elb_direction: external + openshift_aws_elb_scheme: internet-facing + openshift_aws_node_group_type: infra + - name: include scale group creation for master include: build_node_group.yml @@ -22,20 +54,6 @@ delay: 3 until: instancesout.instances|length > 0 -- name: create our master internal load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: internal - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" - openshift_aws_elb_scheme: internal - -- name: create our master external load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: external - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" - openshift_aws_elb_scheme: internet-facing - - name: wait for ssh to become available wait_for: port: 22 diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 161e72fb4..e1fb99b02 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -38,8 +38,7 @@ - name: tag sg groups with proper tags ec2_tag: - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_security_groups_tags }}" resource: "{{ item.group_id }}" region: "{{ openshift_aws_region }}" with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 419679bc2..fad1ff5de 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -18,9 +18,7 @@ - name: Reload generated facts openshift_facts: - when: install_result | changed - delegate_to: "{{ openshift_ca_host }}" - run_once: true + when: hostvars[openshift_ca_host].install_result | changed - name: Create openshift_ca_config_dir if it does not exist file: diff --git a/roles/openshift_docker_gc/defaults/main.yml b/roles/openshift_docker_gc/defaults/main.yml new file mode 100644 index 000000000..9d79de8a1 --- /dev/null +++ b/roles/openshift_docker_gc/defaults/main.yml @@ -0,0 +1,3 @@ +--- +r_enable_docker_gc: "{{ openshift_crio_enable_docker_gc | default(False) }}" +r_docker_gc_node_selectors: "{{ openshift_crio_docker_gc_node_selector | default({}) }}" diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml new file mode 100644 index 000000000..f88a7c533 --- /dev/null +++ b/roles/openshift_docker_gc/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: OpenShift + description: docker garbage collection + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: lib_openshift diff --git a/roles/openshift_docker_gc/tasks/main.yaml b/roles/openshift_docker_gc/tasks/main.yaml new file mode 100644 index 000000000..9ba551479 --- /dev/null +++ b/roles/openshift_docker_gc/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +- name: Create docker-gc tempdir + command: mktemp -d + register: templates_tmpdir + +# NOTE: oc_adm_policy_user does not support -z (yet) +- name: Add dockergc as priviledged + shell: oc adm policy add-scc-to-user -z dockergc privileged +# oc_adm_policy_user: +# user: dockergc +# resource_kind: scc +# resource_name: privileged +# state: present + +- name: Create dockergc DaemonSet + become: yes + template: + src: dockergc-ds.yaml.j2 + dest: "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" + +- name: Apply dockergc DaemonSet + oc_obj: + state: present + kind: DaemonSet + name: "dockergc" + files: + - "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 new file mode 100644 index 000000000..53e8b448b --- /dev/null +++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: dockergc + # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged + # in order for the dockergc to access the docker socket and root directory +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: dockergc + labels: + app: dockergc + spec: + template: + metadata: + labels: + app: dockergc + name: dockergc + spec: +{# Only set nodeSelector if the dict is not empty #} +{% if r_docker_gc_node_selectors %} + nodeSelector: +{% for k,v in r_docker_gc_node_selectors.items() %} + {{ k }}: {{ v }}{% endfor %}{% endif %} + + serviceAccountName: dockergc + containers: + - image: openshift/origin:latest + args: + - "ex" + - "dockergc" + - "--image-gc-low-threshold=60" + - "--image-gc-high-threshold=80" + - "--minimum-ttl-duration=1h0m0s" + securityContext: + privileged: true + name: dockergc + resources: + requests: + memory: 30Mi + cpu: 50m + volumeMounts: + - name: docker-root + readOnly: true + mountPath: /var/lib/docker + - name: docker-socket + readOnly: false + mountPath: /var/run/docker.sock + volumes: + - name: docker-root + hostPath: + path: /var/lib/docker + - name: docker-socket + hostPath: + path: /var/run/docker.sock diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml index 383e6edb5..50a5252cc 100644 --- a/roles/openshift_management/tasks/add_container_provider.yml +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -10,6 +10,18 @@ - name: Ensure OpenShift facts are loaded openshift_facts: +- name: Ensure we use openshift_master_cluster_public_hostname if it is available + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_public_hostname }}" + when: + - openshift.master.cluster_public_hostname is defined + +- name: Ensure we default to the first master if openshift_master_cluster_public_hostname is unavailable + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_hostname }}" + when: + - l_cluster_hostname is not defined + - name: Ensure the management SA Secrets are read oc_serviceaccount_secret: state: list @@ -59,7 +71,7 @@ connection_configurations: - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} - hostname: "{{ openshift.master.cluster_public_hostname }}" + hostname: "{{ l_cluster_hostname }}" name: "{{ openshift_management_project }}" port: "{{ openshift.master.api_port }}" type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index dafafda08..fe78dea66 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -64,7 +64,7 @@ openshift_master_config_dir_default: "{{ (openshift.common.config_base | default openshift_master_config_dir: "{{ openshift_master_config_dir_default }}" openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" -openshift_master_node_config_networkconfig_mtu: 1450 +openshift_master_node_config_networkconfig_mtu: "{{ openshift_node_sdn_mtu | default(1450) }}" openshift_master_node_config_kubeletargs_cpu: 500m openshift_master_node_config_kubeletargs_mem: 512M @@ -103,7 +103,7 @@ openshift_master_node_config_default_edits: value: - 'true' - key: networkConfig.mtu - value: 8951 + value: "{{ openshift_master_node_config_networkconfig_mtu }}" - key: networkConfig.networkPluginName value: "{{ r_openshift_master_sdn_network_plugin_name }}" - key: networkPluginName diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 37f48e724..0c6d8db38 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -110,5 +110,8 @@ openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}" openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" +openshift_node_config_dir_default: "/etc/origin/node" +openshift_node_config_dir: "{{ openshift_node_config_dir_default }}" + openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}" diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index cf22181a8..8cf41ab4c 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -25,11 +25,11 @@ state: "{{ item.state | default('present') }}" with_items: # add the kubeconfig - - line: "KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig" + - line: "KUBECONFIG={{ openshift_node_config_dir }}/bootstrap.kubeconfig" regexp: "^KUBECONFIG=.*" # remove the config file. This comes from openshift_facts - - regexp: "^CONFIG_FILE=.*" - state: absent + - line: "CONFIG_FILE={{ openshift_node_config_dir }}/node-config.yaml" + regexp: "^CONFIG_FILE=.*" - name: include aws sysconfig credentials include: aws.yml @@ -76,7 +76,7 @@ state: link force: yes with_items: - - /var/lib/origin/openshift.local.config/node/node-client-ca.crt + - "{{ openshift_node_config_dir }}/node-client-ca.crt" - when: rpmgenerated_config.stat.exists block: diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 00995eee6..d217b90fb 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -6,16 +6,6 @@ openshift_prometheus_namespace: prometheus openshift_prometheus_node_selector: {"region":"infra"} -# image defaults -openshift_prometheus_image_prefix: "openshift/" -openshift_prometheus_image_version: "v2.0.0-dev.3" -openshift_prometheus_proxy_image_prefix: "openshift/" -openshift_prometheus_proxy_image_version: "v1.0.0" -openshift_prometheus_alertmanager_image_prefix: "openshift/" -openshift_prometheus_alertmanager_image_version: "v0.9.1" -openshift_prometheus_alertbuffer_image_prefix: "openshift/" -openshift_prometheus_alertbuffer_image_version: "v0.0.2" - # additional prometheus rules file openshift_prometheus_additional_rules_file: null diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 523a64334..332104d77 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,4 +1,20 @@ --- +- name: Set default image variables based on deployment_type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set image facts + set_fact: + openshift_prometheus_image_prefix: "{{ openshift_prometheus_image_prefix | default(__openshift_prometheus_image_prefix) }}" + openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default(__openshift_prometheus_image_version) }}" + openshift_prometheus_proxy_image_prefix: "{{ openshift_prometheus_proxy_image_prefix | default(openshift_prometheus_image_prefix) }}" + openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default(__openshift_prometheus_proxy_image_version) }}" + openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_altermanager_image_prefix | default(openshift_prometheus_image_prefix) }}" + openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default(__openshift_prometheus_alertmanager_image_version) }}" + openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(openshift_prometheus_image_prefix) }}" + openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default(__openshift_prometheus_alertbuffer_image_version) }}" - name: Create temp directory for doing work in on target command: mktemp -td openshift-prometheus-ansible-XXXXXX diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml new file mode 100644 index 000000000..4092eb2d4 --- /dev/null +++ b/roles/openshift_prometheus/vars/default_images.yml @@ -0,0 +1,7 @@ +--- +# image defaults +__openshift_prometheus_image_prefix: "openshift/" +__openshift_prometheus_image_version: "v2.0.0-dev.3" +__openshift_prometheus_proxy_image_version: "v1.0.0" +__openshift_prometheus_alertmanager_image_version: "v0.9.1" +__openshift_prometheus_alertbuffer_image_version: "v0.0.2" diff --git a/roles/openshift_prometheus/vars/openshift-enterprise.yml b/roles/openshift_prometheus/vars/openshift-enterprise.yml new file mode 100644 index 000000000..0b45e03d3 --- /dev/null +++ b/roles/openshift_prometheus/vars/openshift-enterprise.yml @@ -0,0 +1,7 @@ +--- +# image defaults +__openshift_prometheus_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_prometheus_image_version: "v3.7" +__openshift_prometheus_proxy_image_version: "v3.7" +__openshift_prometheus_alertmanager_image_version: "v3.7" +__openshift_prometheus_alertbuffer_image_version: "v3.7" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index d41245093..95ba9fe4c 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,6 +10,11 @@ - name: Ensure libselinux-python is installed package: name=libselinux-python state=present + - name: Remove openshift_additional.repo file + file: + dest: /etc/yum.repos.d/openshift_additional.repo + state: absent + - name: Create any additional repos that are defined yum_repository: description: "{{ item.description | default(item.name | default(item.id)) }}" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index a6c168bc7..70b236033 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -69,3 +69,21 @@ - openshift_clusterid is not defined - openshift_cloudprovider_kind is defined - openshift_cloudprovider_kind == 'aws' + +- name: Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive + fail: + msg: > + Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive, + do not set both to true. ansible_service_broker_install defaults to true. + when: + - ansible_service_broker_remove | default(false) | bool + - ansible_service_broker_install | default(true) | bool + +- name: Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive + fail: + msg: > + Ensure that template_service_broker_remove and template_service_broker_install are mutually exclusive, + do not set both to true. template_service_broker_remove defaults to true. + when: + - template_service_broker_remove | default(false) | bool + - template_service_broker_install | default(true) | bool diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index 9d55185c8..cd7bda2c6 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -60,7 +60,7 @@ register: apiserver_ca - shell: > - oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index d17468b5c..3507330e3 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - oc adm pod-network make-projects-global kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml @@ -88,14 +88,14 @@ vars: original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_obj: name: admin @@ -111,14 +111,14 @@ vars: original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_obj: name: view @@ -139,7 +139,7 @@ # only do this if we don't already have the updated role info - name: update view role for service catalog access command: > - oc replace -f {{ mktemp.stdout }}/view_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml when: - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index ca9844e79..a832e1f85 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 index 4629d5bb3..59cceafcf 100644 --- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 +++ b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 @@ -12,6 +12,7 @@ - get - list - watch + - patch - apiGroups: - "settings.k8s.io" attributeRestrictions: null diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index d0bc0e028..abe411f67 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -119,13 +119,13 @@ are an exception: Additionally, this role's behavior responds to the following registry-specific variables: -| Name | Default value | Description | -|-----------------------------------------------|------------------------------|-----------------------------------------| -| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes -| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage -| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only -| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume -| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume +| Name | Default value | Description | +|-------------------------------------------------------|------------------------------|-----------------------------------------| +| openshift_hosted_registry_storage_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes +| openshift_hosted_registry_storage_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage +| openshift_hosted_registry_storage_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only +| openshift_hosted_registry_storage_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume +| openshift_hosted_registry_storage_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume Dependencies ------------ diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml index d7ca970c7..da8aa291b 100644 --- a/roles/template_service_broker/tasks/main.yml +++ b/roles/template_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: template_service_broker_install | default(false) | bool + when: template_service_broker_install | default(true) | bool - include: remove.yml when: template_service_broker_remove | default(false) | bool |