diff options
Diffstat (limited to 'roles')
44 files changed, 603 insertions, 120 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index dc05b03b5..34110ca99 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -14,3 +14,4 @@ ansible_service_broker_launch_apb_on_bind: false ansible_service_broker_image_pull_policy: IfNotPresent ansible_service_broker_sandbox_role: edit ansible_service_broker_auto_escalate: false +ansible_service_broker_local_registry_whitelist: [] diff --git a/roles/ansible_service_broker/tasks/generate_certs.yml b/roles/ansible_service_broker/tasks/generate_certs.yml new file mode 100644 index 000000000..50156a35c --- /dev/null +++ b/roles/ansible_service_broker/tasks/generate_certs.yml @@ -0,0 +1,35 @@ +--- + +- when: ansible_service_broker_certs_dir is undefined + block: + - name: Create ansible-service-broker cert directory + file: + path: "{{ openshift.common.config_base }}/ansible-service-broker" + state: directory + mode: 0755 + check_mode: no + + - set_fact: + ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/ansible-service-broker" + + - name: Create self signing ca cert + command: 'openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ ansible_service_broker_certs_dir }}/key.pem -out {{ ansible_service_broker_certs_dir }}/cert.pem -days 365 -subj "/CN=asb-etcd.openshift-ansible-service-broker.svc"' + args: + creates: '{{ ansible_service_broker_certs_dir }}/cert.pem' + + - name: Create self signed client cert + command: '{{ item.cmd }}' + args: + creates: '{{ item.creates }}' + with_items: + - cmd: openssl genrsa -out {{ ansible_service_broker_certs_dir }}/client.key 2048 + creates: '{{ ansible_service_broker_certs_dir }}/client.key' + - cmd: 'openssl req -new -key {{ ansible_service_broker_certs_dir }}/client.key -out {{ ansible_service_broker_certs_dir }}/client.csr -subj "/CN=client"' + creates: '{{ ansible_service_broker_certs_dir }}/client.csr' + - cmd: openssl x509 -req -in {{ ansible_service_broker_certs_dir }}/client.csr -CA {{ ansible_service_broker_certs_dir }}/cert.pem -CAkey {{ ansible_service_broker_certs_dir }}/key.pem -CAcreateserial -out {{ ansible_service_broker_certs_dir }}/client.pem -days 1024 + creates: '{{ ansible_service_broker_certs_dir }}/client.pem' + +- set_fact: + etcd_ca_cert: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/cert.pem') }}" + etcd_client_cert: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/client.pem') }}" + etcd_client_key: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/client.key') }}" diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 89a84c4df..926ed344e 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -32,6 +32,7 @@ - include: validate_facts.yml +- include: generate_certs.yml # Deployment of ansible-service-broker starts here - name: create openshift-ansible-service-broker project @@ -116,6 +117,24 @@ kubernetes.io/service-account.name: asb-client type: kubernetes.io/service-account-token +- name: Create etcd-auth secret + oc_secret: + name: etcd-auth-secret + namespace: openshift-ansible-service-broker + contents: + - path: ca.crt + data: '{{ etcd_ca_cert }}' + +- name: Create broker-etcd-auth secret + oc_secret: + name: broker-etcd-auth-secret + namespace: openshift-ansible-service-broker + contents: + - path: client.crt + data: '{{ etcd_client_cert }}' + - path: client.key + data: '{{ etcd_client_key }}' + - oc_secret: state: list namespace: openshift-ansible-service-broker @@ -156,6 +175,34 @@ app: openshift-ansible-service-broker service: asb +- name: create asb-etcd service + oc_obj: + name: asb-etcd + namespace: openshift-ansible-service-broker + state: present + kind: Service + content: + path: /tmp/asbetcdsvcout + data: + apiVersion: v1 + kind: Service + metadata: + name: asb-etcd + labels: + app: etcd + service: asb-etcd + annotations: + service.alpha.openshift.io/serving-cert-secret-name: etcd-tls + spec: + ports: + - name: port-2379 + port: 2379 + targetPort: 2379 + protocol: TCP + selector: + app: etcd + service: asb-etcd + - name: create route for ansible-service-broker service oc_route: name: asb-1338 @@ -227,6 +274,8 @@ mountPath: /etc/ansible-service-broker - name: asb-tls mountPath: /etc/tls/private + - name: asb-etcd-auth + mountPath: /var/run/asb-etcd-auth ports: - containerPort: 1338 protocol: TCP @@ -249,7 +298,50 @@ scheme: HTTPS initialDelaySeconds: 15 timeoutSeconds: 1 + volumes: + - name: config-volume + configMap: + name: broker-config + items: + - key: broker-config + path: config.yaml + - name: asb-tls + secret: + secretName: asb-tls + - name: asb-etcd-auth + secret: + secretName: broker-etcd-auth-secret +- name: Create asb-etcd deployment config + oc_obj: + name: etcd + namespace: openshift-ansible-service-broker + state: present + kind: DeploymentConfig + content: + path: /tmp/dcout + data: + apiVersion: v1 + kind: DeploymentConfig + metadata: + name: asb-etcd + labels: + app: etcd + service: asb-etcd + spec: + replicas: 1 + selector: + app: etcd + strategy: + type: Rolling + template: + metadata: + labels: + app: etcd + service: asb-etcd + spec: + serviceAccount: asb + containers: - image: "{{ ansible_service_broker_etcd_image }}" name: etcd imagePullPolicy: IfNotPresent @@ -258,8 +350,12 @@ args: - "{{ ansible_service_broker_etcd_image_etcd_path }}" - "--data-dir=/data" - - "--listen-client-urls=http://0.0.0.0:2379" - - "--advertise-client-urls=http://0.0.0.0:2379" + - "--listen-client-urls=https://0.0.0.0:2379" + - "--advertise-client-urls=https://0.0.0.0:2379" + - "--client-cert-auth" + - "--trusted-ca-file=/var/run/etcd-auth-secret/ca.crt" + - "--cert-file=/etc/tls/private/tls.crt" + - "--key-file=/etc/tls/private/tls.key" ports: - containerPort: 2379 protocol: TCP @@ -267,21 +363,22 @@ - name: ETCDCTL_API value: "3" volumeMounts: - - mountPath: /data - name: etcd + - name: etcd + mountPath: /data + - name: etcd-tls + mountPath: /etc/tls/private + - name: etcd-auth + mountPath: /var/run/etcd-auth-secret volumes: - name: etcd persistentVolumeClaim: claimName: etcd - - name: config-volume - configMap: - name: broker-config - items: - - key: broker-config - path: config.yaml - - name: asb-tls + - name: etcd-tls secret: - secretName: asb-tls + secretName: etcd-tls + - name: etcd-auth + secret: + secretName: etcd-auth-secret # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: @@ -312,11 +409,16 @@ org: {{ ansible_service_broker_registry_organization }} tag: {{ ansible_service_broker_registry_tag }} white_list: {{ ansible_service_broker_registry_whitelist }} + - type: local_registry + namespaces: ['openshift'] + white_list: {{ ansible_service_broker_local_registry_whitelist }} dao: - etcd_host: 0.0.0.0 + etcd_host: asb-etcd.openshift-ansible-service-broker.svc etcd_port: 2379 + etcd_ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + etcd_client_cert: /var/run/asb-etcd-auth/client.crt + etcd_client_key: /var/run/asb-etcd-auth/client.key log: - logfile: /var/log/ansible-service-broker/asb.log stdout: true level: {{ ansible_service_broker_log_level }} color: true diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index d8695bd3a..2ed156728 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: ansible_service_broker_install | default(false) | bool + when: ansible_service_broker_install | default(true) | bool - include: remove.yml when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml index 51b86fb26..28dc967a0 100644 --- a/roles/ansible_service_broker/tasks/remove.yml +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -52,12 +52,30 @@ name: asb-client namespace: openshift-ansible-service-broker +- name: Remove etcd-auth secret + oc_secret: + state: absent + name: etcd-auth-secret + namespace: openshift-ansible-service-broker + +- name: Remove broker-etcd-auth secret + oc_secret: + state: absent + name: broker-etcd-auth-secret + namespace: openshift-ansible-service-broker + - name: remove ansible-service-broker service oc_service: name: asb namespace: openshift-ansible-service-broker state: absent +- name: remove asb-etcd service + oc_service: + state: absent + name: asb-etcd + namespace: openshift-ansible-service-broker + - name: remove etcd service oc_service: name: etcd @@ -83,6 +101,14 @@ kind: DeploymentConfig state: absent +- name: remove Ansible Service Broker etcd deployment config + oc_obj: + name: asb-etcd + namespace: openshift-ansible-service-broker + kind: DeploymentConfig + state: absent + + - name: remove secret for broker auth oc_obj: name: asb-client diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml index 604d24e1d..a2345551b 100644 --- a/roles/ansible_service_broker/tasks/validate_facts.yml +++ b/roles/ansible_service_broker/tasks/validate_facts.yml @@ -1,11 +1,9 @@ --- - name: validate Dockerhub registry settings - fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters" + fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_organization" when: - ansible_service_broker_registry_type == 'dockerhub' - - not (ansible_service_broker_registry_user and - ansible_service_broker_registry_password and - ansible_service_broker_registry_organization) + - not ansible_service_broker_registry_organization - name: validate RHCC registry settings diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 8438e993f..248e0363d 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -12,6 +12,6 @@ __ansible_service_broker_registry_name: dh __ansible_service_broker_registry_url: null __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null -__ansible_service_broker_registry_organization: null +__ansible_service_broker_registry_organization: ansibleplaybookbundle __ansible_service_broker_registry_tag: latest __ansible_service_broker_registry_whitelist: [] diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index fc58b4fd8..c203f596e 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -3,7 +3,7 @@ __ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- __ansible_service_broker_image_tag: v3.7 -__ansible_service_broker_etcd_image_prefix: rhel7/ +__ansible_service_broker_etcd_image_prefix: registry.access.redhat.com/rhel7/ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /bin/etcd diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index f6f2bd77e..c086c28df 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -33,3 +33,6 @@ r_crio_os_firewall_allow: openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" + +docker_alt_storage_path: /var/lib/containers/docker +docker_default_storage_path: /var/lib/docker diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 1539af53f..3c814d8d8 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -25,6 +25,15 @@ - not l_use_system_container - not l_use_crio_only +- name: Ensure /var/lib/containers exists + file: + path: /var/lib/containers + state: directory + +- name: Fix SELinux Permissions on /var/lib/containers + command: "restorecon -R /var/lib/containers/" + changed_when: false + - name: Use System Container Docker if Requested include: systemcontainer_docker.yml when: @@ -36,3 +45,48 @@ when: - l_use_crio - openshift_docker_is_node_or_master | bool + +- name: stat the docker data dir + stat: + path: "{{ docker_default_storage_path }}" + register: dockerstat + +- when: + - l_use_crio + - dockerstat.stat.islink is defined and not (dockerstat.stat.islink | bool) + block: + - name: stop the current running docker + systemd: + state: stopped + name: "{{ openshift.docker.service_name }}" + + - name: "Ensure {{ docker_alt_storage_path }} exists" + file: + path: "{{ docker_alt_storage_path }}" + state: directory + + - name: "Set the selinux context on {{ docker_alt_storage_path }}" + command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" + register: results + failed_when: + - results.rc == 1 + - "'already exists' not in results.stderr" + + - name: "restorecon the {{ docker_alt_storage_path }}" + command: "restorecon -r {{ docker_alt_storage_path }}" + + - name: Remove the old docker location + file: + state: absent + path: "{{ docker_default_storage_path }}" + + - name: Setup the link + file: + state: link + src: "{{ docker_alt_storage_path }}" + path: "{{ docker_default_storage_path }}" + + - name: start docker + systemd: + state: started + name: "{{ openshift.docker.service_name }}" diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 67ede0d21..1e2d64293 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -170,10 +170,6 @@ dest: /etc/cni/net.d/openshift-sdn.conf src: 80-openshift-sdn.conf.j2 -- name: Fix SELinux Permissions on /var/lib/containers - command: "restorecon -R /var/lib/containers/" - changed_when: false - - name: Start the CRI-O service systemd: name: "cri-o" diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index e88f3ae8d..7e7d0fa60 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1664,7 +1664,7 @@ def main(): name=dict(default=None, type='str'), annotations=dict(default=None, type='dict'), parameters=dict(default=None, type='dict'), - provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), + provisioner=dict(required=True, type='str'), api_version=dict(default='v1', type='str'), default_storage_class=dict(default="false", type='str'), ), diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py index e9f3ebbd3..a8f371661 100644 --- a/roles/lib_openshift/src/ansible/oc_storageclass.py +++ b/roles/lib_openshift/src/ansible/oc_storageclass.py @@ -14,7 +14,7 @@ def main(): name=dict(default=None, type='str'), annotations=dict(default=None, type='dict'), parameters=dict(default=None, type='dict'), - provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), + provisioner=dict(required=True, type='str'), api_version=dict(default='v1', type='str'), default_storage_class=dict(default="false", type='str'), ), diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 21da6b953..410b739e9 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -7,8 +7,13 @@ openshift.master.cluster_method == 'native' # TODO: need to fix up ignore_errors here +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: > (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 5371588cf..9f3c14bad 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -12,7 +12,6 @@ openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" -openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' @@ -48,7 +47,14 @@ openshift_aws_elb_health_check: unhealthy_threshold: 2 healthy_threshold: 2 -openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_name_dict: + master: + external: "{{ openshift_aws_elb_basename }}-external" + internal: "{{ openshift_aws_elb_basename }}-internal" + infra: + external: "{{ openshift_aws_elb_basename }}" + openshift_aws_elb_idle_timout: 400 openshift_aws_elb_scheme: internet-facing openshift_aws_elb_cert_arn: '' @@ -75,6 +81,18 @@ openshift_aws_elb_listeners: load_balancer_port: 443 instance_protocol: tcp instance_port: 443 + infra: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True openshift_aws_node_group_config_master_volumes: - device_name: /dev/sdb @@ -88,7 +106,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True -openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] openshift_aws_node_group_replace_all_instances: False @@ -114,6 +132,7 @@ openshift_aws_node_group_config: wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}" compute: instance_type: m4.xlarge ami: "{{ openshift_aws_ami }}" @@ -148,21 +167,22 @@ openshift_aws_node_group_config: type: infra termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}" + +openshift_aws_elb_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" +openshift_aws_elb_az_load_balancing: False openshift_aws_elb_security_groups: -- "{{ openshift_aws_clusterid }}" -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s openshift_aws_elb_instance_filter: "tag:clusterid": "{{ openshift_aws_clusterid }}" "tag:host-type": "{{ openshift_aws_node_group_type }}" instance-state-name: running -openshift_aws_launch_config_security_groups: -- "{{ openshift_aws_clusterid }}" # default sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s - +openshift_aws_security_groups_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index 06e1f9602..a9893c0a7 100644 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py @@ -9,17 +9,17 @@ class FilterModule(object): ''' Custom ansible filters for use by openshift_aws role''' @staticmethod - def build_instance_tags(clusterid, status='owned'): + def build_instance_tags(clusterid): ''' This function will return a dictionary of the instance tags. The main desire to have this inside of a filter_plugin is that we need to build the following key. - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} ''' tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): status} + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} return tags diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 0dac1c23d..0aac40ddd 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -21,10 +21,6 @@ - "'results' in amiout" - amiout.results|length > 0 -- when: openshift_aws_create_security_groups - name: "Create {{ openshift_aws_node_group_type }} security groups" - include: security_group.yml - - when: openshift_aws_create_launch_config name: "Create {{ openshift_aws_node_group_type }} launch config" include: launch_config.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 7bc3184df..56abe9dd7 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -9,12 +9,6 @@ - name: debug debug: var=vpcout -- name: fetch the remote instances - ec2_remote_facts: - region: "{{ openshift_aws_region }}" - filters: "{{ openshift_aws_elb_instance_filter }}" - register: instancesout - - name: fetch the default subnet id ec2_vpc_subnet_facts: region: "{{ openshift_aws_region }}" @@ -23,7 +17,7 @@ vpc-id: "{{ vpcout.vpcs[0].id }}" register: subnetout -- name: +- name: dump the elb listeners debug: msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type @@ -33,6 +27,7 @@ ec2_elb_lb: name: "{{ l_openshift_aws_elb_name }}" state: present + cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" security_group_names: "{{ openshift_aws_elb_security_groups }}" idle_timeout: "{{ openshift_aws_elb_idle_timout }}" region: "{{ openshift_aws_region }}" @@ -43,25 +38,9 @@ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type else openshift_aws_elb_listeners }}" scheme: "{{ openshift_aws_elb_scheme }}" - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_elb_tags }}" register: new_elb -# It is necessary to ignore_errors here because the instances are not in 'ready' -# state when first added to ELB -- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" - ec2_elb: - instance_id: "{{ item.id }}" - ec2_elbs: "{{ l_openshift_aws_elb_name }}" - state: present - region: "{{ openshift_aws_region }}" - wait: False - with_items: "{{ instancesout.instances }}" - ignore_errors: True - retries: 10 - register: elb_call - until: elb_call|succeeded - - debug: msg: "{{ item }}" with_items: diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 8b7b02a0e..94aca5a35 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -19,7 +19,7 @@ - name: fetch the security groups for launch config ec2_group_facts: filters: - group-name: "{{ openshift_aws_launch_config_security_groups }}" + group-name: "{{ openshift_aws_elb_security_groups }}" vpc-id: "{{ vpcout.vpcs[0].id }}" region: "{{ openshift_aws_region }}" register: ec2sgs diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 737cfc7a6..1c99229ff 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,20 +3,18 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_elb_name }}-external" - - "{{ openshift_aws_elb_name }}-internal" + - "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}" delegate_to: localhost register: elbs - debug: var=elbs + run_once: true - name: set fact set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}" osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" + - "{{ elbs.elbs[0].dns_name }}" - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index a8518d43a..e99017b9f 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -7,6 +7,38 @@ name: create s3 bucket for registry include: s3.yml +- when: openshift_aws_create_security_groups + block: + - name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + + - name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + vars: + openshift_aws_node_group_type: infra + +- name: create our master internal load balancer + include: elb.yml + vars: + openshift_aws_elb_direction: internal + openshift_aws_elb_scheme: internal + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}" + +- name: create our master external load balancer + include: elb.yml + vars: + openshift_aws_elb_direction: external + openshift_aws_elb_scheme: internet-facing + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['external'] }}" + +- name: create our infra node external load balancer + include: elb.yml + vars: + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict['infra']['external'] }}" + openshift_aws_elb_direction: external + openshift_aws_elb_scheme: internet-facing + openshift_aws_node_group_type: infra + - name: include scale group creation for master include: build_node_group.yml @@ -22,20 +54,6 @@ delay: 3 until: instancesout.instances|length > 0 -- name: create our master internal load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: internal - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" - openshift_aws_elb_scheme: internal - -- name: create our master external load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: external - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" - openshift_aws_elb_scheme: internet-facing - - name: wait for ssh to become available wait_for: port: 22 diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 161e72fb4..e1fb99b02 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -38,8 +38,7 @@ - name: tag sg groups with proper tags ec2_tag: - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_security_groups_tags }}" resource: "{{ item.group_id }}" region: "{{ openshift_aws_region }}" with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index fad1ff5de..587526d07 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -106,6 +106,36 @@ delegate_to: "{{ openshift_ca_host }}" run_once: true +# Create client-ca-bundle.crt containing old and new OpenShift CA +# certificates. This bundle will be used when rolling the OpenShift CA +# certificate. +- name: Create client-ca-bundle.crt + block: + - command: mktemp -d /tmp/openshift-ansible-XXXXXX + register: openshift_ca_clientconfig_tmpdir + delegate_to: "{{ openshift_ca_host }}" + - copy: + src: "{{ item }}" + dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/" + remote_src: true + with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}" + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - copy: + src: "{{ openshift_ca_config_dir }}/ca.crt" + dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/" + remote_src: true + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - assemble: + src: "{{ openshift_ca_clientconfig_tmpdir.stdout }}" + dest: "{{ openshift_ca_config_dir }}/client-ca-bundle.crt" + mode: 0644 + owner: root + group: root + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - name: Test local loopback context command: > {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view diff --git a/roles/openshift_docker_gc/defaults/main.yml b/roles/openshift_docker_gc/defaults/main.yml new file mode 100644 index 000000000..9d79de8a1 --- /dev/null +++ b/roles/openshift_docker_gc/defaults/main.yml @@ -0,0 +1,3 @@ +--- +r_enable_docker_gc: "{{ openshift_crio_enable_docker_gc | default(False) }}" +r_docker_gc_node_selectors: "{{ openshift_crio_docker_gc_node_selector | default({}) }}" diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml new file mode 100644 index 000000000..f88a7c533 --- /dev/null +++ b/roles/openshift_docker_gc/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: OpenShift + description: docker garbage collection + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: lib_openshift diff --git a/roles/openshift_docker_gc/tasks/main.yaml b/roles/openshift_docker_gc/tasks/main.yaml new file mode 100644 index 000000000..9ba551479 --- /dev/null +++ b/roles/openshift_docker_gc/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +- name: Create docker-gc tempdir + command: mktemp -d + register: templates_tmpdir + +# NOTE: oc_adm_policy_user does not support -z (yet) +- name: Add dockergc as priviledged + shell: oc adm policy add-scc-to-user -z dockergc privileged +# oc_adm_policy_user: +# user: dockergc +# resource_kind: scc +# resource_name: privileged +# state: present + +- name: Create dockergc DaemonSet + become: yes + template: + src: dockergc-ds.yaml.j2 + dest: "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" + +- name: Apply dockergc DaemonSet + oc_obj: + state: present + kind: DaemonSet + name: "dockergc" + files: + - "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 new file mode 100644 index 000000000..53e8b448b --- /dev/null +++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: dockergc + # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged + # in order for the dockergc to access the docker socket and root directory +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: dockergc + labels: + app: dockergc + spec: + template: + metadata: + labels: + app: dockergc + name: dockergc + spec: +{# Only set nodeSelector if the dict is not empty #} +{% if r_docker_gc_node_selectors %} + nodeSelector: +{% for k,v in r_docker_gc_node_selectors.items() %} + {{ k }}: {{ v }}{% endfor %}{% endif %} + + serviceAccountName: dockergc + containers: + - image: openshift/origin:latest + args: + - "ex" + - "dockergc" + - "--image-gc-low-threshold=60" + - "--image-gc-high-threshold=80" + - "--minimum-ttl-duration=1h0m0s" + securityContext: + privileged: true + name: dockergc + resources: + requests: + memory: 30Mi + cpu: 50m + volumeMounts: + - name: docker-root + readOnly: true + mountPath: /var/lib/docker + - name: docker-socket + readOnly: false + mountPath: /var/run/docker.sock + volumes: + - name: docker-root + hostPath: + path: /var/lib/docker + - name: docker-socket + hostPath: + path: /var/run/docker.sock diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 33028fea4..a88945538 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1289,7 +1289,7 @@ def get_container_openshift_version(facts): If containerized, see if we can determine the installed version via the systemd environment files. """ - for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']: + for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']: env_path = filename % facts['common']['service_type'] if not os.path.exists(env_path): continue diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml index ce7688581..88b893448 100644 --- a/roles/openshift_hosted_metrics/handlers/main.yml +++ b/roles/openshift_hosted_metrics/handlers/main.yml @@ -4,8 +4,13 @@ when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index ce7688581..acc838bd1 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -4,8 +4,13 @@ when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "{{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index b98e281a3..cec295d65 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -78,7 +78,7 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" @@ -136,7 +136,7 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 05ca27913..96de82669 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -89,6 +89,16 @@ App is created` task, we recommend running the [uninstall scripts](#uninstall) first before running the installer again. +### Beta + +Only required for enterprise +(`openshift_deployment_type=openshift-enterprise`) users: + +* `openshift_management_install_beta` - by setting this value to + `true` you acknowledge that this software is currently in BETA and + support may be limited nonexistent. This is required to begin the + installation. + # Requirements @@ -129,6 +139,7 @@ installer. | `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | | `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. | | `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| `openshift_management_install_beta` | **No** | `false` | Boolean, by setting this value to `true` you acknowledge that this software is currently in BETA and support may be limited. Only required for *openshift-enterprise* users. | | `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) | | `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) | | **PRODUCT CHOICE** | | | | | @@ -310,7 +321,7 @@ inventory. The following keys are required: **NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be able to deploy the app successfully. -Your inventory would contain a line similar to this: +Your inventory would contain lines similar to this: ```ini [OSEv3:vars] @@ -328,7 +339,11 @@ At run time you may run into errors similar to this: TASK [openshift_management : Ensure the CFME App is created] *********************************** task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 ******* -{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""} +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", + "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server + (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version + \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char + 'f'\n", "stdout": ""} ``` Or like this: @@ -338,7 +353,10 @@ TASK [openshift_management : Ensure the CFME App is created] ******************* task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 ******* fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg": -{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""}, +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": + "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" + is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP + address, (e.g. 10.9.8.7)\n", "stdout": ""}, ``` While intimidating at first, there are useful bits of information in @@ -568,6 +586,35 @@ automatically removed. You are urged to manually erase any data from old application or database deployments before attempting to initialize a new deployment. +Failure to erase old PostgreSQL data can result in cascading +errors. The postgres pod may enter a `crashloopbackoff` state. This +will block the management pod from ever starting. The cause of the +`crashloopbackoff` is due to incorrect file permissions on the +database NFS export created during a previous deployment. + +To continue, erase all data from the postgres export and delete the +pod (**not** the deployer pod). For example, if you have pods like +such: + +``` +# oc get pods +NAME READY STATUS RESTARTS AGE +httpd-1-cx7fk 1/1 Running 1 21h +manageiq-0 0/1 Running 1 21h +memcached-1-vkc7p 1/1 Running 1 21h +postgresql-1-deploy 1/1 Running 1 21h +postgresql-1-6w2t4 0/1 CrashLoopBackOff 1 21h +``` + +Then you would: + +1. Erase the data from the database NFS export +2. `oc delete postgresql-1-6w2t4` + +The postgres deployer pod will try to scale up a new postgres pod to +replace the one you deleted. Once the postgres pod is running the +manageiq pod will stop blocking and begin application initialization. + # Additional Information The upstream project, diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml index 383e6edb5..50a5252cc 100644 --- a/roles/openshift_management/tasks/add_container_provider.yml +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -10,6 +10,18 @@ - name: Ensure OpenShift facts are loaded openshift_facts: +- name: Ensure we use openshift_master_cluster_public_hostname if it is available + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_public_hostname }}" + when: + - openshift.master.cluster_public_hostname is defined + +- name: Ensure we default to the first master if openshift_master_cluster_public_hostname is unavailable + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_hostname }}" + when: + - l_cluster_hostname is not defined + - name: Ensure the management SA Secrets are read oc_serviceaccount_secret: state: list @@ -59,7 +71,7 @@ connection_configurations: - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} - hostname: "{{ openshift.master.cluster_public_hostname }}" + hostname: "{{ l_cluster_hostname }}" name: "{{ openshift_management_project }}" port: "{{ openshift.master.api_port }}" type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index 8b20bdc5e..b22f36a4f 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -2,12 +2,25 @@ # Validate configuration parameters passed to the openshift_management role ###################################################################### +# BETA ACKNOWLEDGEMENT +- name: Ensure BETA software notice has been acknowledged + assert: + that: + - openshift_management_install_beta | default(false) | bool + msg: | + openshift-management (CFME/MIQ) is currently BETA status. You + must set openshift_management_install_beta to true to + acknowledge that you accept this risk and understand that + support is limited or nonexistent. + when: + - openshift_deployment_type == 'openshift-enterprise' + +###################################################################### # CORE PARAMETERS - name: Ensure openshift_management_app_template is valid assert: that: - openshift_management_app_template in __openshift_management_app_templates - msg: | "openshift_management_app_template must be one of {{ __openshift_management_app_templates | join(', ') }}" diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index d5094c2c9..f88c4a7dc 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -9,10 +9,13 @@ notify: - Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: - not (master_controllers_service_status_changed | default(false) | bool) - openshift.master.cluster_method == 'native' diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 5751723ab..8420dfb8c 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -58,6 +58,17 @@ - l_create_ha_unit_files | changed # end workaround for missing systemd unit files +- name: enable master services + systemd: + name: "{{ openshift.common.service_type }}-master-{{ item }}" + enabled: yes + with_items: + - api + - controllers + when: + - openshift.master.cluster_method == "native" + - not openshift.common.is_master_system_container | bool + - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: l_master_api_proxy diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index a4f410296..1bea66c26 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -326,10 +326,8 @@ class IdentityProviderOauthBase(IdentityProviderBase): self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']] def validate(self): - ''' validate this idp instance ''' - if self.challenge: - raise errors.AnsibleFilterError("|failed provider {0} does not " - "allow challenge authentication".format(self.__class__.__name__)) + ''' validate an instance of this idp class ''' + pass class OpenIDIdentityProvider(IdentityProviderOauthBase): @@ -510,7 +508,7 @@ class FilterModule(object): 'master.kubelet-client.crt', 'master.kubelet-client.key'] if bool(include_ca): - certs += ['ca.crt', 'ca.key', 'ca-bundle.crt'] + certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt'] if bool(include_keys): certs += ['serviceaccounts.private.key', 'serviceaccounts.public.key'] diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index ce7688581..88b893448 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -4,8 +4,13 @@ when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index d41245093..95ba9fe4c 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,6 +10,11 @@ - name: Ensure libselinux-python is installed package: name=libselinux-python state=present + - name: Remove openshift_additional.repo file + file: + dest: /etc/yum.repos.d/openshift_additional.repo + state: absent + - name: Create any additional repos that are defined yum_repository: description: "{{ item.description | default(item.name | default(item.id)) }}" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index a6c168bc7..70b236033 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -69,3 +69,21 @@ - openshift_clusterid is not defined - openshift_cloudprovider_kind is defined - openshift_cloudprovider_kind == 'aws' + +- name: Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive + fail: + msg: > + Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive, + do not set both to true. ansible_service_broker_install defaults to true. + when: + - ansible_service_broker_remove | default(false) | bool + - ansible_service_broker_install | default(true) | bool + +- name: Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive + fail: + msg: > + Ensure that template_service_broker_remove and template_service_broker_install are mutually exclusive, + do not set both to true. template_service_broker_remove defaults to true. + when: + - template_service_broker_remove | default(false) | bool + - template_service_broker_install | default(true) | bool diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index 9d55185c8..cd7bda2c6 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -60,7 +60,7 @@ register: apiserver_ca - shell: > - oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 0641483cd..3507330e3 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - oc adm pod-network make-projects-global kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml @@ -93,7 +93,7 @@ # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) @@ -116,7 +116,7 @@ # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) @@ -139,7 +139,7 @@ # only do this if we don't already have the updated role info - name: update view role for service catalog access command: > - oc replace -f {{ mktemp.stdout }}/view_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml when: - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index ca9844e79..a832e1f85 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml index d7ca970c7..da8aa291b 100644 --- a/roles/template_service_broker/tasks/main.yml +++ b/roles/template_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: template_service_broker_install | default(false) | bool + when: template_service_broker_install | default(true) | bool - include: remove.yml when: template_service_broker_remove | default(false) | bool |