diff options
Diffstat (limited to 'roles')
109 files changed, 1414 insertions, 647 deletions
diff --git a/roles/cockpit-ui/defaults/main.yml b/roles/cockpit-ui/defaults/main.yml new file mode 100644 index 000000000..b1696f1b8 --- /dev/null +++ b/roles/cockpit-ui/defaults/main.yml @@ -0,0 +1,3 @@ +--- +openshift_config_base: "/etc/origin" +openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master" diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index 0114498f8..244e2cc41 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -50,7 +50,9 @@ -n default register: deploy_registry_console changed_when: "'already exists' not in deploy_registry_console.stderr" - failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0" + failed_when: + - "'already exists' not in deploy_registry_console.stderr" + - "deploy_registry_console.rc != 0" - name: Delete temp directory file: diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 274fd8603..e36dfa7b9 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -9,6 +9,8 @@ openshift_docker_additional_registries: [] openshift_docker_blocked_registries: [] openshift_docker_insecure_registries: [] +openshift_docker_ent_reg: 'registry.access.redhat.com' + # The l2_docker_* variables convert csv strings to lists, if # necessary. These variables should be used in place of their respective # openshift_docker_* counterparts to ensure the properly formatted lists are diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 0c5621259..4215dc5bd 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -50,6 +50,14 @@ src: custom.conf.j2 when: not os_firewall_use_firewalld | default(False) | bool +- name: Add enterprise registry, if necessary + set_fact: + l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" + when: + - openshift.common.deployment_type == 'openshift-enterprise' + - openshift_docker_ent_reg != '' + - openshift_docker_ent_reg not in l2_docker_additional_registries + - stat: path=/etc/sysconfig/docker register: docker_check diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index e6fc2db06..66ce475e1 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -1,17 +1,17 @@ --- # TODO: Much of this file is shared with container engine tasks - set_fact: - l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}" - when: openshift.docker.insecure_registries + l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" + when: l2_docker_insecure_registries - set_fact: - l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}" - when: openshift.docker.additional_registries + l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" + when: l2_docker_additional_registries - set_fact: l_crio_registries: "{{ ['docker.io'] }}" - when: not openshift.docker.additional_registries + when: not l2_docker_additional_registries - set_fact: l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" - when: openshift.docker.additional_registries + when: l2_docker_additional_registries - name: Ensure container-selinux is installed package: @@ -104,7 +104,7 @@ - name: Use RHEL based image when distribution is Red Hat set_fact: - l_crio_image_prepend: "registry.access.redhat.com" + l_crio_image_prepend: "registry.access.redhat.com/openshift3" l_crio_image_name: "cri-o" when: ansible_distribution == "RedHat" diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 146e5f430..8b43393cb 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -148,10 +148,10 @@ # Set local versions of facts that must be in json format for container-daemon.json # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson - set_fact: - l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}" + l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}" l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" - l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" - l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" + l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}" + l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}" l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" # Configure container-engine using the container-daemon.json file diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 3cc2bbb18..18164050a 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -1,6 +1,66 @@ --- -r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}" +r_etcd_common_backup_tag: '' +r_etcd_common_backup_sufix_name: '' + +# runc, docker, host +r_etcd_common_etcd_runtime: "docker" +r_etcd_common_embedded_etcd: false + +# etcd run on a host => use etcdctl command directly +# etcd run as a docker container => use docker exec +# etcd run as a runc container => use runc exec +r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" + +# etcd server vars +etcd_conf_dir: '/etc/etcd' +r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd +etcd_system_container_conf_dir: /var/lib/etcd/etc +etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" +etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" +etcd_key_file: "{{ etcd_conf_dir }}/server.key" +etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" + +# etcd ca vars +etcd_ca_dir: "{{ etcd_conf_dir}}/ca" +etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" +etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" +etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" +etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" +etcd_ca_name: etcd_ca +etcd_req_ext: etcd_v3_req +etcd_ca_exts_peer: etcd_v3_ca_peer +etcd_ca_exts_server: etcd_v3_ca_server +etcd_ca_exts_self: etcd_v3_ca_self +etcd_ca_exts_client: etcd_v3_ca_client +etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" +etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" +etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" +etcd_ca_serial: "{{ etcd_ca_dir }}/serial" +etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" +etcd_ca_default_days: 1825 + +r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt +r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key +r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt + +# etcd server & certificate vars +etcd_hostname: "{{ inventory_hostname }}" +etcd_ip: "{{ ansible_default_ipv4.address }}" +etcd_is_atomic: False +etcd_is_containerized: False +etcd_is_thirdparty: False + +# etcd dir vars +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" + +# etcd ports and protocols +etcd_client_port: 2379 +etcd_peer_port: 2380 +etcd_url_scheme: http +etcd_peer_url_scheme: http etcd_initial_cluster_state: new etcd_initial_cluster_token: etcd-cluster-1 @@ -10,8 +70,15 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_client_port: 2379 -etcd_peer_port: 2380 +etcd_peer: 127.0.0.1 +etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" + +etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" +# Location of the service file is fixed and not meant to be changed +etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" + +r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}" etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" r_etcd_os_firewall_deny: [] @@ -20,3 +87,6 @@ r_etcd_os_firewall_allow: port: "{{etcd_client_port}}/tcp" - service: etcd peering port: "{{ etcd_peer_port }}/tcp" + +# set the backend quota to 4GB by default +etcd_quota_backend_bytes: 4294967296 diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py index 0cab1ca88..0cab1ca88 100755 --- a/roles/etcd_common/library/delegated_serial_command.py +++ b/roles/etcd/library/delegated_serial_command.py diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 537c01c0e..879ca4f4e 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -19,4 +19,3 @@ dependencies: - role: lib_openshift - role: lib_os_firewall - role: lib_utils -- role: etcd_common diff --git a/roles/etcd_common/tasks/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index 6cb456677..11bd2310e 100644 --- a/roles/etcd_common/tasks/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -3,7 +3,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not openshift.common.is_atomic | bool -- name: Configure etcd profile.d alises +- name: Configure etcd profile.d aliases template: dest: "/etc/profile.d/etcdctl.sh" src: etcdctl.sh.j2 diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml new file mode 100644 index 000000000..c0538e596 --- /dev/null +++ b/roles/etcd/tasks/backup.yml @@ -0,0 +1,2 @@ +--- +- include: backup/backup.yml diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd/tasks/backup/backup.yml index 42d27c081..42d27c081 100644 --- a/roles/etcd_common/tasks/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml new file mode 100644 index 000000000..a41b032f3 --- /dev/null +++ b/roles/etcd/tasks/backup_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_ca_certificates.yml diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml new file mode 100644 index 000000000..8cf2a10cc --- /dev/null +++ b/roles/etcd/tasks/backup_generated_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_generated_certificates.yml diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml new file mode 100644 index 000000000..267ffeb4d --- /dev/null +++ b/roles/etcd/tasks/backup_server_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_server_certificates.yml diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml index 7cda49069..cca1e9ad7 100644 --- a/roles/etcd/tasks/ca.yml +++ b/roles/etcd/tasks/ca.yml @@ -1,2 +1,2 @@ --- -- include: ca/deploy.yml +- include: certificates/deploy_ca.yml diff --git a/roles/etcd/tasks/certificates/backup_ca_certificates.yml b/roles/etcd/tasks/certificates/backup_ca_certificates.yml new file mode 100644 index 000000000..f60eb82ef --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_ca_certificates.yml @@ -0,0 +1,12 @@ +--- +- name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat +- name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool diff --git a/roles/etcd/tasks/certificates/backup_generated_certificates.yml b/roles/etcd/tasks/certificates/backup_generated_certificates.yml new file mode 100644 index 000000000..6a24cfcb3 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_generated_certificates.yml @@ -0,0 +1,13 @@ +--- +- name: Determine if generated etcd certificates exist + stat: + path: "{{ etcd_conf_dir }}/generated_certs" + register: etcd_generated_certs_dir_stat + +- name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/generated_certs + args: + warn: no + when: etcd_generated_certs_dir_stat.stat.exists | bool diff --git a/roles/etcd/tasks/certificates/backup_server_certificates.yml b/roles/etcd/tasks/certificates/backup_server_certificates.yml new file mode 100644 index 000000000..8e6cc6965 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_server_certificates.yml @@ -0,0 +1,11 @@ +--- +- name: Backup etcd certificates + command: > + tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/ca.crt + {{ etcd_conf_dir }}/server.crt + {{ etcd_conf_dir }}/server.key + {{ etcd_conf_dir }}/peer.crt + {{ etcd_conf_dir }}/peer.key + args: + warn: no diff --git a/roles/etcd/tasks/ca/deploy.yml b/roles/etcd/tasks/certificates/deploy_ca.yml index 3d32290a2..3d32290a2 100644 --- a/roles/etcd/tasks/ca/deploy.yml +++ b/roles/etcd/tasks/certificates/deploy_ca.yml diff --git a/roles/etcd/tasks/certificates/distribute_ca.yml b/roles/etcd/tasks/certificates/distribute_ca.yml new file mode 100644 index 000000000..632ac15dd --- /dev/null +++ b/roles/etcd/tasks/certificates/distribute_ca.yml @@ -0,0 +1,47 @@ +--- +- name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + +- name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_sync_cert_dir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + +- name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + +- name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ etcd_sync_cert_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + +- name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + +- name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + +- copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" diff --git a/roles/etcd/tasks/client_certificates/fetch_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 119071a72..119071a72 100644 --- a/roles/etcd/tasks/client_certificates/fetch_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml diff --git a/roles/etcd/tasks/server_certificates/fetch_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 064fe1952..26492fb3c 100644 --- a/roles/etcd/tasks/server_certificates/fetch_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -1,8 +1,4 @@ --- -- include: ../ca/deploy.yml - when: - - etcd_ca_setup | default(True) | bool - - name: Install etcd package: name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}" diff --git a/roles/etcd/tasks/certificates/remove_ca_certificates.yml b/roles/etcd/tasks/certificates/remove_ca_certificates.yml new file mode 100644 index 000000000..4a86eb60d --- /dev/null +++ b/roles/etcd/tasks/certificates/remove_ca_certificates.yml @@ -0,0 +1,5 @@ +--- +- name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent diff --git a/roles/etcd/tasks/certificates/remove_generated_certificates.yml b/roles/etcd/tasks/certificates/remove_generated_certificates.yml new file mode 100644 index 000000000..993b18de2 --- /dev/null +++ b/roles/etcd/tasks/certificates/remove_generated_certificates.yml @@ -0,0 +1,5 @@ +--- +- name: Remove generated etcd certificates + file: + path: "{{ etcd_conf_dir }}/generated_certs" + state: absent diff --git a/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml new file mode 100644 index 000000000..70b5c6523 --- /dev/null +++ b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml @@ -0,0 +1,8 @@ +--- +- name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ etcd_sync_cert_dir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml index 2e9c078b9..2f4108a0d 100644 --- a/roles/etcd/tasks/client_certificates.yml +++ b/roles/etcd/tasks/client_certificates.yml @@ -1,2 +1,2 @@ --- -- include: client_certificates/fetch_from_ca.yml +- include: certificates/fetch_client_certificates_from_ca.yml diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca new file mode 100644 index 000000000..040c5f7af --- /dev/null +++ b/roles/etcd/tasks/distribute_ca @@ -0,0 +1,2 @@ +--- +- include: certificates/distribute_ca.yml diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml new file mode 100644 index 000000000..4c1f609f7 --- /dev/null +++ b/roles/etcd/tasks/drop_etcdctl.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/drop_etcdctl.yml diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 870c11ad4..f643d292d 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -16,10 +16,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool -- include_role: - name: etcd_common - vars: - r_etcd_common_action: drop_etcdctl +- include: drop_etcdctl.yml when: - openshift_etcd_etcdctl_profile | default(true) | bool diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml new file mode 100644 index 000000000..36df1a1cc --- /dev/null +++ b/roles/etcd/tasks/remove_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/remove_ca_certificates.yml diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml new file mode 100644 index 000000000..b10a4b32d --- /dev/null +++ b/roles/etcd/tasks/remove_generated_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/remove_generated_certificates.yml diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml new file mode 100644 index 000000000..bd6c4ec85 --- /dev/null +++ b/roles/etcd/tasks/retrieve_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/retrieve_ca_certificates.yml diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml index f0ba58b6e..ae26079f9 100644 --- a/roles/etcd/tasks/server_certificates.yml +++ b/roles/etcd/tasks/server_certificates.yml @@ -1,2 +1,6 @@ --- -- include: server_certificates/fetch_from_ca.yml +- include: ca.yml + when: + - etcd_ca_setup | default(True) | bool + +- include: certificates/fetch_server_certificates_from_ca.yml diff --git a/roles/etcd/tasks/upgrade/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml index cea95a1b3..24071f9ad 100644 --- a/roles/etcd/tasks/upgrade/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade/upgrade_image.yml @@ -20,6 +20,11 @@ regexp: "{{ current_image.stdout }}$" replace: "{{ new_etcd_image }}" +- lineinfile: + destfile: "{{ etcd_conf_file }}" + regexp: '^ETCD_QUOTA_BACKEND_BYTES=' + line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}" + - name: Restart etcd_container systemd: name: "{{ etcd_service }}" diff --git a/roles/etcd/tasks/upgrade/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml index 324b69605..505e28afb 100644 --- a/roles/etcd/tasks/upgrade/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml @@ -19,6 +19,11 @@ name: "{{ l_etcd_target_package }}" state: latest +- lineinfile: + destfile: "{{ etcd_conf_file }}" + regexp: '^ETCD_QUOTA_BACKEND_BYTES=' + line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}" + - name: Restart etcd service: name: "{{ etcd_service }}" diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 2c2803aee..8462bb4c8 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -45,6 +45,7 @@ ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} #ETCD_STRICT_RECONFIG_CHECK="false" #ETCD_AUTO_COMPACTION_RETENTION="0" #ETCD_ENABLE_V2="true" +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} #[proxy] #ETCD_PROXY=off diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd/templates/etcdctl.sh.j2 index ac7d9c72f..ac7d9c72f 100644 --- a/roles/etcd_common/templates/etcdctl.sh.j2 +++ b/roles/etcd/templates/etcdctl.sh.j2 diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md deleted file mode 100644 index d1c3a6602..000000000 --- a/roles/etcd_common/README.md +++ /dev/null @@ -1,53 +0,0 @@ -etcd_common -======================== - -Common resources for dependent etcd roles. E.g. default variables for: -* config directories -* certificates -* ports -* other settings - -Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. - -```yaml -- delegated_serial_command: - command: /usr/bin/make_database.sh arg1 arg2 - creates: /path/to/database -``` - -Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example). - -Dependencies ------------- - -openshift-repos - -Example Playbook ----------------- - -**Drop etcdctl aliases** - -```yaml -- include_role: - name: etcd_common - tasks_from: etcdctl -``` - -**Get access to common variables** - -```yaml -# meta.yml of etcd -... -dependencies: -- { role: etcd_common } -``` - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml deleted file mode 100644 index 6705e1ac5..000000000 --- a/roles/etcd_common/defaults/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -# Default action when calling this role -r_etcd_common_action: noop -r_etcd_common_backup_tag: '' -r_etcd_common_backup_sufix_name: '' - -# runc, docker, host -r_etcd_common_etcd_runtime: "docker" -r_etcd_common_embedded_etcd: false - -# etcd run on a host => use etcdctl command directly -# etcd run as a docker container => use docker exec -# etcd run as a runc container => use runc exec -r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" - -# etcd server vars -etcd_conf_dir: '/etc/etcd' -r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd -etcd_system_container_conf_dir: /var/lib/etcd/etc -etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" -etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" -etcd_key_file: "{{ etcd_conf_dir }}/server.key" -etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" -etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" - -# etcd ca vars -etcd_ca_dir: "{{ etcd_conf_dir}}/ca" -etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" -etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" -etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" -etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" -etcd_ca_name: etcd_ca -etcd_req_ext: etcd_v3_req -etcd_ca_exts_peer: etcd_v3_ca_peer -etcd_ca_exts_server: etcd_v3_ca_server -etcd_ca_exts_self: etcd_v3_ca_self -etcd_ca_exts_client: etcd_v3_ca_client -etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" -etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" -etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" -etcd_ca_serial: "{{ etcd_ca_dir }}/serial" -etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" -etcd_ca_default_days: 1825 - -r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt -r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key -r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt - -# etcd server & certificate vars -etcd_hostname: "{{ inventory_hostname }}" -etcd_ip: "{{ ansible_default_ipv4.address }}" -etcd_is_atomic: False -etcd_is_containerized: False -etcd_is_thirdparty: False - -# etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" - -# etcd ports and protocols -etcd_client_port: 2379 -etcd_peer_port: 2380 -etcd_url_scheme: http -etcd_peer_url_scheme: http - -etcd_initial_cluster_state: new -etcd_initial_cluster_token: etcd-cluster-1 - -etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" -etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" -etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" - -etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" - -# etcd_peer needs to be set by a role caller -etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml deleted file mode 100644 index dfb1c7a2c..000000000 --- a/roles/etcd_common/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: [] diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml deleted file mode 100644 index 6ed87e6c7..000000000 --- a/roles/etcd_common/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Fail if invalid r_etcd_common_action provided - fail: - msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'" - when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl'] - -- name: Include main action task file - include: "{{ r_etcd_common_action }}.yml" - when: r_etcd_common_action != "noop" diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml deleted file mode 100644 index a88d78235..000000000 --- a/roles/etcd_common/tasks/noop.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# This is file is here because the usage of tags, specifically `pre_upgrade` -# breaks the functionality of this role. -# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025 diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml deleted file mode 100644 index 00d697776..000000000 --- a/roles/etcd_common/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" -# Location of the service file is fixed and not meant to be changed -etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md new file mode 100644 index 000000000..321acca21 --- /dev/null +++ b/roles/installer_checkpoint/README.md @@ -0,0 +1,177 @@ +OpenShift-Ansible Installer Checkpoint +====================================== + +A complete OpenShift cluster installation is comprised of many different +components which can take 30 minutes to several hours to complete. If the +installation should fail, it could be confusing to understand at which component +the failure occurred. Additionally, it may be desired to re-run only the +component which failed instead of starting over from the beginning. Components +which came after the failed component would also need to be run individually. + +Design +------ + +The Installer Checkpoint implements an Ansible callback plugin to allow +displaying and logging of the installer status at the end of a playbook run. + +To ensure the callback plugin is loaded, regardless of ansible.cfg file +configuration, the plugin has been placed inside the installer_checkpoint role +which must be called early in playbook execution. The `std_include.yml` playbook +is run first for all entry point playbooks, therefore, the initialization of the +checkpoint plugin has been placed at the beginning of that file. + +Playbooks use the [set_stats][set_stats] Ansible module to set a custom stats +variable indicating the status of the phase being executed. + +The installer_checkpoint.py callback plugin extends the Ansible +`v2_playbook_on_stats` method, which is called at the end of a playbook run, to +display the status of each phase which was run. The INSTALLER STATUS report is +displayed immediately following the PLAY RECAP. + +Phases of cluster installation are mapped to the steps in the +[common/openshift-cluster/config.yml][openshift_cluster_config] playbook. + +To correctly display the order of the installer phases, the `installer_phases` +variable defines the phase or component order. + +```python + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_initialize', + 'installer_phase_etcd', + 'installer_phase_nfs', + 'installer_phase_loadbalancer', + 'installer_phase_master', + 'installer_phase_master_additional', + 'installer_phase_node', + 'installer_phase_glusterfs', + 'installer_phase_hosted', + 'installer_phase_metrics', + 'installer_phase_logging', + 'installer_phase_servicecatalog', + ] +``` + +Additional attributes, such as display title and component playbook, of each +phase are stored in the `phase_attributes` variable. + +```python + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_initialize': { + 'title': 'Initialization', + 'playbook': '' + }, + 'installer_phase_etcd': { + 'title': 'etcd Install', + 'playbook': 'playbooks/byo/openshift-etcd/config.yml' + }, + 'installer_phase_nfs': { + 'title': 'NFS Install', + 'playbook': 'playbooks/byo/openshift-nfs/config.yml' + }, + #... + } +``` + +Usage +----- + +In order to indicate the beginning of a component installation, a play must be +added to the beginning of the main playbook for the component to set the phase +status to "In Progress". Additionally, a play must be added after the last play +for that component to set the phase status to "Complete". + +The following example shows the first play of the 'installer phase' loading the +`installer_checkpoint` role, as well as the `set_stats` task for setting +`installer_phase_initialize` to "In Progress". Various plays are run for the +phase/component and then a final play for setting `installer_hase_initialize` to +"Complete". + +```yaml +# common/openshift-cluster/std_include.yml +--- +- name: Initialization Checkpoint Start + hosts: localhost + connection: local + gather_facts: false + roles: + - installer_checkpoint + tasks: + - name: Set install initialization 'In Progress' + set_stats: + data: + installer_phase_initialize: "In Progress" + aggregate: false + +#... +# Various plays here +#... + +- name: Initialization Checkpoint End + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Set install initialization 'Complete' + set_stats: + data: + installer_phase_initialize: "Complete" + aggregate: false +``` + +Each phase or component of the installer will follow a similar pattern, with the +exception that the `installer_checkpoint` role does not need to be called since +it was already loaded by the play in `std_include.yml`. It is important to +place the 'In Progress' and 'Complete' plays as the first and last plays of the +phase or component. + +Examples +-------- + +Example display of a successful playbook run: + +``` +PLAY RECAP ********************************************************************* +master01.example.com : ok=158 changed=16 unreachable=0 failed=0 +node01.example.com : ok=469 changed=74 unreachable=0 failed=0 +node02.example.com : ok=157 changed=17 unreachable=0 failed=0 +localhost : ok=24 changed=0 unreachable=0 failed=0 + + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : Complete +Master Additional Install : Complete +Node Install : Complete +GlusterFS Install : Not Started +Hosted Install : Complete +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started +``` + +Example display if a failure occurs during execution: + +``` +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : In Progress + This phase can be restarted by running: playbooks/byo/openshift-master/config.yml +Master Additional Install : Not Started +Node Install : Not Started +GlusterFS Install : Not Started +Hosted Install : Not Started +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started +``` + +[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html +[openshift_cluster_config]: https://github.com/openshift/openshift-ansible/blob/master/playbooks/common/openshift-cluster/config.yml diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py new file mode 100644 index 000000000..033240e62 --- /dev/null +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -0,0 +1,182 @@ +"""Ansible callback plugin to print a summary completion status of installation +phases. +""" +from ansible.plugins.callback import CallbackBase +from ansible import constants as C + +DOCUMENTATION = ''' + +''' + +EXAMPLES = ''' +--------------------------------------------- +Example display of a successful playbook run: + +PLAY RECAP ********************************************************************* +master01.example.com : ok=158 changed=16 unreachable=0 failed=0 +node01.example.com : ok=469 changed=74 unreachable=0 failed=0 +node02.example.com : ok=157 changed=17 unreachable=0 failed=0 +localhost : ok=24 changed=0 unreachable=0 failed=0 + + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : Complete +Master Additional Install : Complete +Node Install : Complete +GlusterFS Install : Not Started +Hosted Install : Complete +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started + +----------------------------------------------------- +Example display if a failure occurs during execution: + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : In Progress + This phase can be restarted by running: playbooks/byo/openshift-master/config.yml +Master Additional Install : Not Started +Node Install : Not Started +GlusterFS Install : Not Started +Hosted Install : Not Started +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started + +''' + + +class CallbackModule(CallbackBase): + """This callback summarizes installation phase status.""" + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'installer_checkpoint' + CALLBACK_NEEDS_WHITELIST = False + + def __init__(self): + super(CallbackModule, self).__init__() + + def v2_playbook_on_stats(self, stats): + + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_initialize', + 'installer_phase_etcd', + 'installer_phase_nfs', + 'installer_phase_loadbalancer', + 'installer_phase_master', + 'installer_phase_master_additional', + 'installer_phase_node', + 'installer_phase_glusterfs', + 'installer_phase_hosted', + 'installer_phase_metrics', + 'installer_phase_logging', + 'installer_phase_servicecatalog', + ] + + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_initialize': { + 'title': 'Initialization', + 'playbook': '' + }, + 'installer_phase_etcd': { + 'title': 'etcd Install', + 'playbook': 'playbooks/byo/openshift-etcd/config.yml' + }, + 'installer_phase_nfs': { + 'title': 'NFS Install', + 'playbook': 'playbooks/byo/openshift-nfs/config.yml' + }, + 'installer_phase_loadbalancer': { + 'title': 'Load balancer Install', + 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml' + }, + 'installer_phase_master': { + 'title': 'Master Install', + 'playbook': 'playbooks/byo/openshift-master/config.yml' + }, + 'installer_phase_master_additional': { + 'title': 'Master Additional Install', + 'playbook': 'playbooks/byo/openshift-master/additional_config.yml' + }, + 'installer_phase_node': { + 'title': 'Node Install', + 'playbook': 'playbooks/byo/openshift-node/config.yml' + }, + 'installer_phase_glusterfs': { + 'title': 'GlusterFS Install', + 'playbook': 'playbooks/byo/openshift-glusterfs/config.yml' + }, + 'installer_phase_hosted': { + 'title': 'Hosted Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml' + }, + 'installer_phase_metrics': { + 'title': 'Metrics Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml' + }, + 'installer_phase_logging': { + 'title': 'Logging Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml' + }, + 'installer_phase_servicecatalog': { + 'title': 'Service Catalog Install', + 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml' + }, + } + + # Find the longest phase title + max_column = 0 + for phase in phase_attributes: + max_column = max(max_column, len(phase_attributes[phase]['title'])) + + if '_run' in stats.custom: + self._display.banner('INSTALLER STATUS') + for phase in installer_phases: + phase_title = phase_attributes[phase]['title'] + padding = max_column - len(phase_title) + 2 + if phase in stats.custom['_run']: + phase_status = stats.custom['_run'][phase] + self._display.display( + '{}{}: {}'.format(phase_title, ' ' * padding, phase_status), + color=self.phase_color(phase_status)) + if phase_status == 'In Progress' and phase != 'installer_phase_initialize': + self._display.display( + '\tThis phase can be restarted by running: {}'.format( + phase_attributes[phase]['playbook'])) + else: + # Phase was not found in custom stats + self._display.display( + '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'), + color=C.COLOR_SKIP) + + self._display.display("", screen_only=True) + + def phase_color(self, status): + """ Return color code for installer phase""" + valid_status = [ + 'In Progress', + 'Complete', + ] + + if status not in valid_status: + self._display.warning('Invalid phase status defined: {}'.format(status)) + + if status == 'Complete': + phase_color = C.COLOR_OK + elif status == 'In Progress': + phase_color = C.COLOR_ERROR + else: + phase_color = C.COLOR_WARN + + return phase_color diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py index f350bd25d..f350bd25d 100644 --- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py +++ b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 4e7f54f79..4d88db037 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -13,6 +13,7 @@ openshift_aws_wait_for_ssh: True openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" +openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index 06e1f9602..06e1f9602 100644 --- a/roles/openshift_aws/filter_plugins/filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml index 8d4e5ac43..48555e5da 100644 --- a/roles/openshift_aws/tasks/build_ami.yml +++ b/roles/openshift_aws/tasks/build_ami.yml @@ -31,7 +31,7 @@ assign_public_ip: yes region: "{{ openshift_aws_region }}" key_name: "{{ openshift_aws_ssh_key_name }}" - group: "{{ openshift_aws_clusterid }}" + group: "{{ openshift_aws_build_ami_group }}" instance_type: m4.xlarge vpc_subnet_id: "{{ subnetout.subnets[0].id }}" image: "{{ openshift_aws_base_ami }}" diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 334150f63..5a3e50678 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -6,9 +6,6 @@ with_items: - role: docker local_facts: - additional_registries: "{{ openshift_docker_additional_registries | default(None) }}" - blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}" - insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}" selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" log_driver: "{{ openshift_docker_log_driver | default(None) }}" log_options: "{{ openshift_docker_log_options | default(None) }}" @@ -23,12 +20,6 @@ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - set_fact: - docker_additional_registries: "{{ openshift.docker.additional_registries - | default(omit) }}" - docker_blocked_registries: "{{ openshift.docker.blocked_registries - | default(omit) }}" - docker_insecure_registries: "{{ openshift.docker.insecure_registries - | default(omit) }}" docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 1c2c91a5a..11ef9fa97 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -55,9 +55,6 @@ def migrate_docker_facts(facts): """ Apply migrations for docker facts """ params = { 'common': ( - 'additional_registries', - 'insecure_registries', - 'blocked_registries', 'options' ), 'node': ( @@ -768,14 +765,6 @@ def set_deployment_facts_if_unset(facts): service_type = 'origin' facts['common']['service_type'] = service_type - if 'docker' in facts: - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'openshift-enterprise': - addtl_regs = facts['docker'].get('additional_registries', []) - ent_reg = 'registry.access.redhat.com' - if ent_reg not in addtl_regs: - facts['docker']['additional_registries'] = addtl_regs + [ent_reg] - for role in ('master', 'node'): if role in facts: deployment_type = facts['common']['deployment_type'] @@ -2248,19 +2237,6 @@ class OpenShiftFacts(object): protected_facts_to_overwrite) if 'docker' in new_local_facts: - # remove duplicate and empty strings from registry lists, preserving order - for cat in ['additional', 'blocked', 'insecure']: - key = '{0}_registries'.format(cat) - if key in new_local_facts['docker']: - val = new_local_facts['docker'][key] - if isinstance(val, string_types): - val = [x.strip() for x in val.split(',')] - seen = set() - new_local_facts['docker'][key] = list() - for registry in val: - if registry not in seen and registry != '': - seen.add(registry) - new_local_facts['docker'][key].append(registry) # Convert legacy log_options comma sep string to a list if present: if 'log_options' in new_local_facts['docker'] and \ isinstance(new_local_facts['docker']['log_options'], string_types): diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 98372d979..93a5973d4 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -153,7 +153,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def known_docker_registries(self): """Build a list of docker registries available according to inventory vars.""" - regs = list(self.get_var("openshift.docker.additional_registries", default=[])) + regs = list(self.get_var("openshift_docker_additional_registries", default=[])) deployment_type = self.get_var("openshift_deployment_type") if deployment_type == "origin" and "docker.io" not in regs: diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 952fa9aa6..c523ffd5c 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -72,7 +72,7 @@ def test_all_images_available_remotely(task_vars, available_locally): return {'images': [], 'failed': available_locally} return {} - task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"] + task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"] task_vars['openshift_image_tag'] = 'v3.4' check = DockerImageAvailability(execute_module, task_vars) check._module_retry_interval = 0 @@ -90,7 +90,7 @@ def test_all_images_unavailable(task_vars): return {} # docker_image_facts failure - task_vars['openshift']['docker']['additional_registries'] = ["docker.io"] + task_vars['openshift_docker_additional_registries'] = ["docker.io"] task_vars['openshift_deployment_type'] = "openshift-enterprise" task_vars['openshift_image_tag'] = 'latest' check = DockerImageAvailability(execute_module, task_vars) @@ -121,9 +121,9 @@ def test_no_known_registries(): service_type='origin', is_containerized=False, is_atomic=False, - ), - docker=dict(additional_registries=["docker.io"]), + ) ), + openshift_docker_additional_registries=["docker.io"], openshift_deployment_type="openshift-enterprise", openshift_image_tag='latest', group_names=['nodes', 'masters'], @@ -154,7 +154,7 @@ def test_skopeo_update_failure(task_vars, message, extra_words): return {} - task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"] + task_vars['openshift_docker_additional_registries'] = ["unknown.io"] task_vars['openshift_deployment_type'] = "openshift-enterprise" check = DockerImageAvailability(execute_module, task_vars) check._module_retry_interval = 0 diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 712a2a591..c234c3740 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -1,14 +1,33 @@ --- +########## +# Common # +########## +openshift_hosted_infra_selector: "region=infra" +r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}" +r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}" + +openshift_default_projects: + default: + default_node_selector: '' + logging: + default_node_selector: '' + openshift-infra: + default_node_selector: '' + +# openshift_additional_projects shares the same format as openshift_default_projects +openshift_additional_projects: {} + +openshift_config_base: "/etc/origin" +openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master" +openshift_cluster_domain: 'cluster.local' + +########## +# Router # +########## r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" - openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" -openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" - -registry_volume_claim: 'registry-claim' openshift_hosted_router_edits: - key: spec.strategy.rollingParams.intervalSeconds @@ -36,20 +55,49 @@ openshift_hosted_routers: certificate: "{{ openshift_hosted_router_certificate | default({}) }}" openshift_hosted_router_certificate: {} -openshift_hosted_registry_cert_expire_days: 730 openshift_hosted_router_create_certificate: True r_openshift_hosted_router_os_firewall_deny: [] r_openshift_hosted_router_os_firewall_allow: [] +############ +# Registry # +############ + +r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +openshift_hosted_registry_name: docker-registry +openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" +registry_volume_claim: 'registry-claim' +openshift_hosted_registry_cert_expire_days: 730 + r_openshift_hosted_registry_os_firewall_deny: [] r_openshift_hosted_registry_os_firewall_allow: - service: Docker Registry Port port: 5000/tcp cond: "{{ r_openshift_hosted_use_calico }}" -# NOTE -# r_openshift_hosted_use_calico_default may be defined external to this role. -# openshift_use_calico, if defined, may affect other roles or play behavior. -r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}" -r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}" +openshift_hosted_registry_serviceaccount: registry +openshift_hosted_registry_volumes: [] +openshift_hosted_registry_env_vars: {} + +# These edits are being specified only to prevent 'changed' on rerun +openshift_hosted_registry_edits: +- key: spec.strategy.rollingParams + value: + intervalSeconds: 1 + maxSurge: "25%" + maxUnavailable: "25%" + timeoutSeconds: 600 + updatePeriodSeconds: 1 + action: put + +openshift_hosted_registry_force: +- False + +openshift_push_via_dns: False + +# NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles +openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}" +openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}" diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py index 7f41529ac..7f41529ac 100644 --- a/roles/openshift_hosted/filter_plugins/filters.py +++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index 28fd396d6..1d70ef7eb 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -12,7 +12,6 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_cli - role: openshift_hosted_facts - role: lib_openshift - role: lib_os_firewall diff --git a/roles/openshift_hosted/tasks/create_projects.yml b/roles/openshift_hosted/tasks/create_projects.yml new file mode 100644 index 000000000..1b25d0c64 --- /dev/null +++ b/roles/openshift_hosted/tasks/create_projects.yml @@ -0,0 +1,14 @@ +--- +- name: Create default projects + oc_project: + name: "{{ item.key }}" + node_selector: + - "{{ item.value.default_node_selector }}" + with_dict: "{{ openshift_default_projects }}" + +- name: Create additional projects + oc_project: + name: "{{ item.key }}" + node_selector: + - "{{ item.value.default_node_selector }}" + with_dict: "{{ openshift_additional_projects }}" diff --git a/roles/openshift_hosted/tasks/router/firewall.yml b/roles/openshift_hosted/tasks/firewall.yml index ff90f3372..1eb2c92c8 100644 --- a/roles/openshift_hosted/tasks/router/firewall.yml +++ b/roles/openshift_hosted/tasks/firewall.yml @@ -8,7 +8,7 @@ protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}" + with_items: "{{ l_openshift_hosted_fw_allow }}" - name: Remove iptables rules os_firewall_manage_iptables: @@ -17,9 +17,9 @@ protocol: "{{ item.port.split('/')[1] }}" port: "{{ item.port.split('/')[0] }}" when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}" + with_items: "{{ l_openshift_hosted_fw_deny }}" -- when: r_openshift_hosted_router_firewall_enabled | bool and r_openshift_hosted_router_use_firewalld | bool +- when: l_openshift_hosted_firewall_enabled | bool and l_openshift_hosted_use_firewalld | bool block: - name: Add firewalld allow rules firewalld: @@ -28,7 +28,7 @@ immediate: true state: enabled when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}" + with_items: "{{ l_openshift_hosted_fw_allow }}" - name: Remove firewalld allow rules firewalld: @@ -37,4 +37,4 @@ immediate: true state: disabled when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}" + with_items: "{{ l_openshift_hosted_fw_deny }}" diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml index 6efe2f63c..d306adf42 100644 --- a/roles/openshift_hosted/tasks/main.yml +++ b/roles/openshift_hosted/tasks/main.yml @@ -1,13 +1,9 @@ --- -- name: Create projects - oc_project: - name: "{{ item.key }}" - node_selector: - - "{{ item.value.default_node_selector }}" - with_dict: "{{ openshift_projects }}" - -- include: router/router.yml - when: openshift_hosted_manage_router | default(true) | bool - -- include: registry/registry.yml - when: openshift_hosted_manage_registry | default(true) | bool +# This role is intended to be used with include_role. +# include_role: +# name: openshift_hosted +# tasks_from: "{{ item }}" +# with_items: +# - create_projects.yml +# - router.yml +# - registry.yml diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 48f53aef8..f1aa9c5a8 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -1,7 +1,11 @@ --- - name: setup firewall include: firewall.yml - static: yes + vars: + l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}" + l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}" + l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}" + l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}" - when: openshift.hosted.registry.replicas | default(none) is none block: @@ -36,30 +40,14 @@ - name: set openshift_hosted facts set_fact: openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}" - openshift_hosted_registry_name: docker-registry - openshift_hosted_registry_serviceaccount: registry openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}" openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}" - openshift_hosted_registry_volumes: [] - openshift_hosted_registry_env_vars: {} - openshift_hosted_registry_edits: - # These edits are being specified only to prevent 'changed' on rerun - - key: spec.strategy.rollingParams - value: - intervalSeconds: 1 - maxSurge: "25%" - maxUnavailable: "25%" - timeoutSeconds: 600 - updatePeriodSeconds: 1 - action: put - openshift_hosted_registry_force: - - False - name: Update registry environment variables when pushing via dns set_fact: openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}" - when: openshift_push_via_dns | default(false) | bool + when: openshift_push_via_dns | bool - name: Update registry proxy settings for dc/docker-registry set_fact: @@ -137,36 +125,17 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" -- when: openshift_hosted_registry_wait | bool - block: - - name: Ensure OpenShift registry correctly rolls out (best-effort today) - command: | - oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig - async: 600 - poll: 15 - failed_when: false - - - name: Determine the latest version of the OpenShift registry deployment - command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .status.latestVersion }' - register: openshift_hosted_registry_latest_version - - - name: Sanity-check that the OpenShift registry rolled out correctly - command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ - --namespace {{ openshift_hosted_registry_namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' - register: openshift_hosted_registry_rc_phase - until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" - delay: 15 - retries: 40 - failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout" +- name: setup registry list + set_fact: + r_openshift_hosted_registry_list: + - name: "{{ openshift_hosted_registry_name }}" + namespace: "{{ openshift_hosted_registry_namespace }}" + +- name: Wait for pod (Registry) + include: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" + l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" - include: storage/glusterfs.yml when: diff --git a/roles/openshift_hosted/tasks/registry/firewall.yml b/roles/openshift_hosted/tasks/registry/firewall.yml deleted file mode 100644 index 775b7d6d7..000000000 --- a/roles/openshift_hosted/tasks/registry/firewall.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- when: r_openshift_hosted_registry_firewall_enabled | bool and not r_openshift_hosted_registry_use_firewalld | bool - block: - - name: Add iptables allow rules - os_firewall_manage_iptables: - name: "{{ item.service }}" - action: add - protocol: "{{ item.port.split('/')[1] }}" - port: "{{ item.port.split('/')[0] }}" - when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}" - - - name: Remove iptables rules - os_firewall_manage_iptables: - name: "{{ item.service }}" - action: remove - protocol: "{{ item.port.split('/')[1] }}" - port: "{{ item.port.split('/')[0] }}" - when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}" - -- when: r_openshift_hosted_registry_firewall_enabled | bool and r_openshift_hosted_registry_use_firewalld | bool - block: - - name: Add firewalld allow rules - firewalld: - port: "{{ item.port }}" - permanent: true - immediate: true - state: enabled - when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}" - - - name: Remove firewalld allow rules - firewalld: - port: "{{ item.port }}" - permanent: true - immediate: true - state: disabled - when: item.cond | default(True) - with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}" diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router.yml index 2a42b5a7c..2aeecc943 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -1,7 +1,11 @@ --- - name: setup firewall include: firewall.yml - static: yes + vars: + l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}" + l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}" + l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_router_os_firewall_allow }}" + l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_router_os_firewall_deny }}" - name: Retrieve list of openshift nodes matching router selector oc_obj: @@ -82,7 +86,7 @@ replicas: "{{ item.replicas }}" namespace: "{{ item.namespace | default('default') }}" # This option is not yet implemented - # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}" + # force_subdomain: "{{ openshift_hosted_router_force_subdomain | default(none) }}" service_account: "{{ item.serviceaccount | default('router') }}" selector: "{{ item.selector | default(none) }}" images: "{{ item.images | default(omit) }}" @@ -94,38 +98,8 @@ stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" -- when: openshift_hosted_router_wait | bool - block: - - name: Ensure OpenShift router correctly rolls out (best-effort today) - command: | - {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ - --namespace {{ item.namespace | default('default') }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig - async: 600 - poll: 15 - with_items: "{{ openshift_hosted_routers }}" - failed_when: false - - - name: Determine the latest version of the OpenShift router deployment - command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ - --namespace {{ item.namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .status.latestVersion }' - register: openshift_hosted_routers_latest_version - with_items: "{{ openshift_hosted_routers }}" - - - name: Poll for OpenShift router deployment success - command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ - --namespace {{ item.0.namespace }} \ - --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ - -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' - register: openshift_hosted_router_rc_phase - until: "'Running' not in openshift_hosted_router_rc_phase.stdout" - delay: 15 - retries: 40 - failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" - with_together: - - "{{ openshift_hosted_routers }}" - - "{{ openshift_hosted_routers_latest_version.results }}" +- name: Wait for pod (Routers) + include: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" + l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/secure.yml index 434b679df..0da8ac8a7 100644 --- a/roles/openshift_hosted/tasks/registry/secure.yml +++ b/roles/openshift_hosted/tasks/secure.yml @@ -38,11 +38,11 @@ - "{{ docker_registry_service.results.clusterip }}" - "{{ docker_registry_route.results[0].spec.host }}" - "{{ openshift_hosted_registry_name }}.default.svc" - - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}" + - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}" - "{{ openshift_hosted_registry_routehost }}" cert: "{{ docker_registry_cert_path }}" key: "{{ docker_registry_key_path }}" - expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}" + expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift_deployment_type) | bool else omit }}" register: registry_self_cert when: docker_registry_self_signed diff --git a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml b/roles/openshift_hosted/tasks/secure/passthrough.yml index 5b44fda10..5b44fda10 100644 --- a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml +++ b/roles/openshift_hosted/tasks/secure/passthrough.yml diff --git a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml b/roles/openshift_hosted/tasks/secure/reencrypt.yml index 48e5b0fba..48e5b0fba 100644 --- a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml +++ b/roles/openshift_hosted/tasks/secure/reencrypt.yml diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index c2954fde1..c2954fde1 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml index 8553a8098..8553a8098 100644 --- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml +++ b/roles/openshift_hosted/tasks/storage/object_storage.yml diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2 index f3e82ad4f..f3e82ad4f 120000 --- a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 +++ b/roles/openshift_hosted/tasks/storage/registry_config.j2 diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml index 318969885..8e905d905 100644 --- a/roles/openshift_hosted/tasks/registry/storage/s3.yml +++ b/roles/openshift_hosted/tasks/storage/s3.yml @@ -3,7 +3,7 @@ assert: that: - openshift.hosted.registry.storage.s3.bucket | default(none) is not none - - openshift.hosted.registry.storage.s3.region | default(none) is not none + - openshift.hosted.registry.storage.s3.bucket | default(none) is not none msg: | When using S3 storage, the following variables are required: openshift_hosted_registry_storage_s3_bucket diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml new file mode 100644 index 000000000..056c79334 --- /dev/null +++ b/roles/openshift_hosted/tasks/wait_for_pod.yml @@ -0,0 +1,36 @@ +--- +- when: l_openshift_hosted_wait_for_pod | default(False) | bool + block: + - name: Ensure OpenShift pod correctly rolls out (best-effort today) + command: | + {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace | default('default') }} \ + --config {{ openshift_master_config_dir }}/admin.kubeconfig + async: 600 + poll: 15 + with_items: "{{ l_openshift_hosted_wfp_items }}" + failed_when: false + + - name: Determine the latest version of the OpenShift pod deployment + command: | + {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace }} \ + --config {{ openshift_master_config_dir }}/admin.kubeconfig \ + -o jsonpath='{ .status.latestVersion }' + register: l_openshift_hosted_wfp_latest_version + with_items: "{{ l_openshift_hosted_wfp_items }}" + + - name: Poll for OpenShift pod deployment success + command: | + {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + --namespace {{ item.0.namespace }} \ + --config {{ openshift_master_config_dir }}/admin.kubeconfig \ + -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' + register: openshift_hosted_wfp_rc_phase + until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout" + delay: 15 + retries: 40 + failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout" + with_together: + - "{{ l_openshift_hosted_wfp_items }}" + - "{{ l_openshift_hosted_wfp_latest_version.results }}" diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2 index 61da452de..eae8b328e 100644 --- a/roles/openshift_hosted/templates/registry_config.j2 +++ b/roles/openshift_hosted/templates/registry_config.j2 @@ -70,10 +70,8 @@ auth: openshift: realm: openshift middleware: -{% if openshift.common.version_gte_3_3_or_1_3 | bool %} registry: - name: openshift -{% endif %} repository: - name: openshift options: @@ -87,7 +85,7 @@ middleware: baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }} privatekey: /etc/origin/cloudfront.pem keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }} -{% elif openshift.common.version_gte_3_3_or_1_3 | bool %} +{% else %} storage: - name: openshift {% endif -%} diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml index 0821d0e7e..0e756d9e1 100644 --- a/roles/openshift_hosted/vars/main.yml +++ b/roles/openshift_hosted/vars/main.yml @@ -1,13 +1,2 @@ --- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" registry_config_secret_name: registry-config - -openshift_default_projects: - default: - default_node_selector: '' - logging: - default_node_selector: '' - openshift-infra: - default_node_selector: '' - -openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts(openshift_default_projects) }}" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index f283261c4..45477f60d 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -12,13 +12,13 @@ generation for Elasticsearch (it uses JKS) as well as openssl to sign certificat As part of the installation, it is recommended that you add the Fluentd node selector label to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels). -###Required vars: +### Required vars: - `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging. When `openshift_logging_install_logging` is set to `False` the `openshift_logging` role will uninstall Aggregated Logging. -###Optional vars: +### Optional vars: - `openshift_logging_purge_logging`: When `openshift_logging_install_logging` is set to 'False' to trigger uninstalation and `openshift_logging_purge_logging` is set to 'True', it will completely and irreversibly remove all logging persistent data including PVC. Defaults to 'False'. - `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'. - `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`. @@ -62,7 +62,6 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. - `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'. -- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'. - `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver. - `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false. - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. @@ -91,6 +90,12 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. - `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'. +- `openshift_logging_install_eventrouter`: Coupled with `openshift_logging_install_logging`. When both are 'True', eventrouter will be installed. When both are 'False', eventrouter will be uninstalled. +Other combinations will keep the eventrouter untouched. + +Detailed eventrouter configuration can be found in +- `roles/openshift_logging_eventrouter/README.md` + When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the same as above for their non-ops counterparts, but apply to the OPS cluster instance: - `openshift_logging_es_ops_host`: logging-es-ops @@ -194,3 +199,26 @@ Elasticsearch OPS too, if using an OPS cluster: Defaults to 'logging-mux'. - `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux. Defaults to '65534'. + +### remote syslog forwarding +`openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +`openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server +`openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514` +`openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug` +`openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0` +`openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +`openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +`openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +`openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message + +The corresponding openshift_logging_mux_ parameters are below. + +`openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +`openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server +`openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514` +`openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug` +`openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0` +`openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +`openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +`openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +`openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 6699e2062..5574a1446 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -32,7 +32,7 @@ openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_proxy_debug: false openshift_logging_kibana_proxy_cpu_limit: null -openshift_logging_kibana_proxy_memory_limit: 96Mi +openshift_logging_kibana_proxy_memory_limit: 256Mi openshift_logging_kibana_replica_count: 1 openshift_logging_kibana_edge_term_policy: Redirect @@ -56,7 +56,7 @@ openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_proxy_debug: false openshift_logging_kibana_ops_proxy_cpu_limit: null -openshift_logging_kibana_ops_proxy_memory_limit: 96Mi +openshift_logging_kibana_ops_proxy_memory_limit: 256Mi openshift_logging_kibana_ops_replica_count: 1 #The absolute path on the control node to the cert file to use @@ -74,7 +74,6 @@ openshift_logging_kibana_ops_ca: "" openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi -openshift_logging_fluentd_es_copy: false openshift_logging_fluentd_journal_source: "" openshift_logging_fluentd_journal_read_from_head: "" openshift_logging_fluentd_hosts: ['--all'] diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 45298e345..3040d15ca 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -105,3 +105,9 @@ - logging-elasticsearch - logging-fluentd - logging-mux + +## EventRouter +- include_role: + name: openshift_logging_eventrouter + when: + not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index de5e25061..2695ef030 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -270,4 +270,12 @@ openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}" openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}" + +## EventRouter +- include_role: + name: openshift_logging_eventrouter + when: + openshift_logging_install_eventrouter | default(false) | bool + + - include: update_master_config.yaml diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md new file mode 100644 index 000000000..da313d68b --- /dev/null +++ b/roles/openshift_logging_eventrouter/README.md @@ -0,0 +1,20 @@ +Event router +------------ + +A pod forwarding kubernetes events to EFK aggregated logging stack. + +- **eventrouter** is deployed to logging project, has a service account and its own role to read events +- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT +- **fluentd** picks them up and inserts to elasticsearch *.operations* index + +- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled. + +Configuration variables: + +- `openshift_logging_eventrouter_image_prefix`: The prefix for the eventrouter logging image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'. +- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'. +- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. +- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'. +- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'. +- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'. diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml new file mode 100644 index 000000000..34e33f75f --- /dev/null +++ b/roles/openshift_logging_eventrouter/defaults/main.yaml @@ -0,0 +1,9 @@ +--- +openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}" +openshift_logging_eventrouter_replicas: 1 +openshift_logging_eventrouter_sink: stdout +openshift_logging_eventrouter_nodeselector: "" +openshift_logging_eventrouter_cpu_limit: 100m +openshift_logging_eventrouter_memory_limit: 128Mi +openshift_logging_eventrouter_namespace: default diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml new file mode 100644 index 000000000..91708e54b --- /dev/null +++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml @@ -0,0 +1,103 @@ +# this openshift template should match (except nodeSelector) jinja2 template in +# ../templates/eventrouter-template.j2 +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: Always + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml new file mode 100644 index 000000000..cf0abbde9 --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml @@ -0,0 +1,40 @@ +--- +# delete eventrouter +- name: Delete EventRouter service account + oc_serviceaccount: + state: absent + name: "aggregated-logging-eventrouter" + namespace: "{{ openshift_logging_eventrouter_namespace }}" + +- name: Delete event-reader cluster role + oc_clusterrole: + state: absent + name: event-reader + +- name: Unset privileged permissions for EventRouter + oc_adm_policy_user: + namespace: "{{ openshift_logging_eventrouter_namespace }}" + resource_kind: cluster-role + resource_name: event-reader + state: absent + user: "system:serviceaccount:{{ openshift_logging_eventrouter_namespace }}:aggregated-logging-eventrouter" + +- name: Delete EventRouter configmap + oc_configmap: + state: absent + name: logging-eventrouter + namespace: "{{ openshift_logging_eventrouter_namespace }}" + +- name: Delete EventRouter DC + oc_obj: + state: absent + name: logging-eventrouter + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: dc + +- name: Delete EventRouter Template + oc_obj: + state: absent + name: eventrouter-template + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: template diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml new file mode 100644 index 000000000..8df7435e2 --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml @@ -0,0 +1,59 @@ +--- +# initial checks +- assert: + msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed + that: openshift_logging_eventrouter_sink in __eventrouter_sinks + +# allow passing in a tempdir +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX + register: mktemp + changed_when: False + +- set_fact: + tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory + file: + state: directory + path: "{{ tempdir }}/templates" + mode: 0755 + changed_when: False + +# create EventRouter deployment config +- name: Generate EventRouter template + template: + src: eventrouter-template.j2 + dest: "{{ tempdir }}/templates/eventrouter-template.yaml" + vars: + node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}" + +- name: Create EventRouter template + oc_obj: + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: template + name: eventrouter-template + state: present + files: + - "{{ tempdir }}/templates/eventrouter-template.yaml" + +- name: Process EventRouter template + oc_process: + state: present + template_name: eventrouter-template + namespace: "{{ openshift_logging_eventrouter_namespace }}" + params: + IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}" + REPLICAS: "{{ openshift_logging_eventrouter_replicas }}" + CPU: "{{ openshift_logging_eventrouter_cpu_limit }}" + MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}" + NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}" + SINK: "{{ openshift_logging_eventrouter_sink }}" + +## Placeholder for migration when necessary ## + +- name: Delete temp directory + file: + name: "{{ tempdir }}" + state: absent + changed_when: False diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml new file mode 100644 index 000000000..58e5a559f --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include: "{{ role_path }}/tasks/install_eventrouter.yaml" + when: openshift_logging_install_eventrouter | default(false) | bool + +- include: "{{ role_path }}/tasks/delete_eventrouter.yaml" + when: not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 new file mode 100644 index 000000000..9ff4c7e80 --- /dev/null +++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 @@ -0,0 +1,109 @@ +# this jinja2 template should always match (except nodeSelector) openshift template in +# ../files/eventrouter-template.yaml +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter +{% if node_selector is iterable and node_selector | length > 0 %} + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: Always + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/vars/main.yaml b/roles/openshift_logging_eventrouter/vars/main.yaml new file mode 100644 index 000000000..bdf561fe3 --- /dev/null +++ b/roles/openshift_logging_eventrouter/vars/main.yaml @@ -0,0 +1,2 @@ +--- +__eventrouter_sinks: ["glog", "stdout"] diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 30d3d854a..82326bdd1 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -50,8 +50,6 @@ openshift_logging_fluentd_aggregating_key_path: none openshift_logging_fluentd_aggregating_passphrase: none ### Deprecating in 3.6 -openshift_logging_fluentd_es_copy: false - # following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly #fluentd_config_contents: #fluentd_throttle_contents: diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 74b4d7db4..37960afd1 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -1,5 +1,8 @@ --- - fail: + msg: The ES_COPY feature is no longer supported. Please remove the variable from your inventory + when: openshift_logging_fluentd_es_copy is defined +- fail: msg: Only one Fluentd nodeselector key pair should be provided when: openshift_logging_fluentd_nodeselector.keys() | count > 1 diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 index 46de94d60..6e07b403a 100644 --- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 +++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 @@ -49,7 +49,9 @@ @include configs.d/openshift/filter-viaq-data-model.conf @include configs.d/openshift/filter-post-*.conf ## +</label> +<label @OUTPUT> ## matches @include configs.d/openshift/output-pre-*.conf @include configs.d/openshift/output-operations.conf diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index a4afb6618..b5f27b60d 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -94,8 +94,6 @@ spec: value: "{{ openshift_logging_fluentd_ops_client_key }}" - name: "OPS_CA" value: "{{ openshift_logging_fluentd_ops_ca }}" - - name: "ES_COPY" - value: "false" - name: "JOURNAL_SOURCE" value: "{{ openshift_logging_fluentd_journal_source | default('') }}" - name: "JOURNAL_READ_FROM_HEAD" @@ -120,6 +118,56 @@ spec: - name: "MUX_CLIENT_MODE" value: "{{ openshift_logging_mux_client_mode }}" {% endif %} +{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} + - name: "TRANSFORM_EVENTS" + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_fluentd_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_fluentd_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" +{% endif %} + volumes: - name: runlogjournal hostPath: diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf index aeaa705ee..bf61c9811 100644 --- a/roles/openshift_logging_mux/files/fluent.conf +++ b/roles/openshift_logging_mux/files/fluent.conf @@ -25,7 +25,9 @@ @include configs.d/openshift/filter-viaq-data-model.conf @include configs.d/openshift/filter-post-*.conf ## +</label> +<label @OUTPUT> ## matches @include configs.d/openshift/output-pre-*.conf @include configs.d/openshift/output-operations.conf diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index ff18d3270..4cc48139f 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -119,6 +119,52 @@ spec: resource: limits.memory - name: "FILE_BUFFER_LIMIT" value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" + +{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_mux_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_mux_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_mux_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_mux_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_mux_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" +{% endif %} + volumes: - name: config configMap: diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 0461039fc..c92458c50 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -45,11 +45,11 @@ - include: install_metrics.yaml when: - - openshift_metrics_install_metrics | default(false) | bool + - openshift_metrics_install_metrics | bool - include: uninstall_metrics.yaml when: - - openshift_metrics_uninstall_metrics | default(false) | bool + - openshift_metrics_uninstall_metrics | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml index b4a0c3583..9f98126a0 100644 --- a/roles/openshift_node_dnsmasq/handlers/main.yml +++ b/roles/openshift_node_dnsmasq/handlers/main.yml @@ -3,6 +3,7 @@ systemd: name: NetworkManager state: restarted + enabled: True - name: restart dnsmasq systemd: diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml index d5fda7bd0..8a7da66c2 100644 --- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml +++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml @@ -1,2 +1,11 @@ --- - fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." + when: not openshift_node_bootstrap | bool + +- name: Install NetworkManager during node_bootstrap provisioning + package: + name: NetworkManager + state: present + notify: restart NetworkManager + +- include: ./network-manager.yml diff --git a/roles/openshift_node_facts/filter_plugins/filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py index 69069f2dc..69069f2dc 100644 --- a/roles/openshift_node_facts/filter_plugins/filters.py +++ b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py diff --git a/roles/openshift_repos/tasks/centos_repos.yml b/roles/openshift_repos/tasks/centos_repos.yml new file mode 100644 index 000000000..7dc15af2a --- /dev/null +++ b/roles/openshift_repos/tasks/centos_repos.yml @@ -0,0 +1,25 @@ +--- +# Note: OpenShift repositories under CentOS may be shipped through the +# "centos-release-openshift-origin" package which configures the repository. +# This task matches the file names provided by the package so that they are +# not installed twice in different files and remains idempotent. + +- name: Configure origin gpg keys + copy: + src: "origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS" + dest: "/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS" + notify: refresh cache + +# openshift_release is formatted to a standard string in openshift_version role. +# openshift_release is expected to be in format 'x.y.z...' here. +# Here, we drop the '.' characters and try to match the correct repo template +# for our corresponding openshift_release. +- name: Configure correct origin release repository + template: + src: "{{ item }}" + dest: "/etc/yum.repos.d/{{ (item | basename | splitext)[0] }}" + with_first_found: + - "CentOS-OpenShift-Origin{{ (openshift_release | default('')).split('.') | join('') }}.repo.j2" + - "CentOS-OpenShift-Origin{{ ((openshift_release | default('')).split('.') | join(''))[0:2] }}.repo.j2" + - "CentOS-OpenShift-Origin.repo.j2" + notify: refresh cache diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index f972c0fd9..d41245093 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -30,30 +30,13 @@ - when: r_openshift_repos_has_run is not defined block: - # Note: OpenShift repositories under CentOS may be shipped through the - # "centos-release-openshift-origin" package which configures the repository. - # This task matches the file names provided by the package so that they are - # not installed twice in different files and remains idempotent. - - name: Configure origin repositories and gpg keys if needed - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - with_items: - - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS - dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS - - src: origin/repos/openshift-ansible-centos-paas-sig.repo - dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo - notify: refresh cache + - include: centos_repos.yml when: - ansible_os_family == "RedHat" - ansible_distribution != "Fedora" - openshift_deployment_type == 'origin' - openshift_enable_origin_repo | default(true) | bool - - name: Enable centos-openshift-origin-testing repository - command: yum-config-manager --enable centos-openshift-origin-testing - when: openshift_repos_enable_testing | bool - - name: Ensure clean repo cache in the event repos have been changed manually debug: msg: "First run of openshift_repos" diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 index 09364c26f..0e2d57cb6 100644 --- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/ -enabled=0 +enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 new file mode 100644 index 000000000..2470931e1 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin14] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin14/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin14/ +enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin14/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 new file mode 100644 index 000000000..901f02cf4 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin15] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin15/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin15/ +enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin15/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 new file mode 100644 index 000000000..abc4ad1b5 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin36] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin36/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin36/ +enabled={% if openshift_repos_enable_testing %}1{% else %}0{% endif %} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin36/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml index e52ab5f6d..e534e0cca 100644 --- a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml @@ -35,10 +35,10 @@ - set_fact: openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size | default('10Gi') if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index faf1aea97..e202ae173 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -23,10 +23,22 @@ name: "kube-service-catalog" node_selector: "" -- name: Make kube-service-catalog project network global - command: > - oc adm pod-network make-projects-global kube-service-catalog - when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant' +- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant' + block: + - name: Waiting for netnamespace kube-service-catalog to be ready + oc_obj: + kind: netnamespace + name: kube-service-catalog + state: list + register: get_output + until: not get_output.results.stderr is defined + retries: 30 + delay: 1 + changed_when: false + + - name: Make kube-service-catalog project network global + command: > + oc adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml deleted file mode 100644 index 6431c6d3f..000000000 --- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml +++ /dev/null @@ -1,197 +0,0 @@ ---- -- name: Make temp cert dir - command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX - register: certtemp - changed_when: False - -- name: Check for First Master Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_crt - changed_when: false - delegate_to: "{{ first_master }}" - -- name: Check for First Master Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_key - changed_when: false - delegate_to: "{{ first_master }}" - -# TODO: this currently has a bug where hostnames are required -- name: Creating First Master Aggregator signer certs - command: > - {{ hostvars[first_master].openshift.common.client_binary }} adm ca create-signer-cert - --cert=/etc/origin/master/front-proxy-ca.crt - --key=/etc/origin/master/front-proxy-ca.key - --serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ first_master }}" - when: - - not first_proxy_ca_crt.stat.exists - - not first_proxy_ca_key.stat.exists - -- name: Check for Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_crt - changed_when: false - -- name: Check for Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_key - changed_when: false - -- name: Copy Aggregator Signer certs from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - delegate_to: "{{ first_master }}" - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -- name: Copy Aggregator Signer certs to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -# oc_adm_ca_server_cert: -# cert: /etc/origin/master/front-proxy-ca.crt -# key: /etc/origin/master/front-proxy-ca.key - -- name: Check for first master api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: first_front_proxy_kubeconfig - delegate_to: "{{ first_master }}" - -- name: Create first master api-client config for Aggregator - command: > - {{ hostvars[first_master].openshift.common.client_binary }} adm create-api-client-config - --certificate-authority=/etc/origin/master/front-proxy-ca.crt - --signer-cert=/etc/origin/master/front-proxy-ca.crt - --signer-key=/etc/origin/master/front-proxy-ca.key - --user aggregator-front-proxy - --client-dir=/etc/origin/master - --signer-serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ first_master }}" - when: - - not first_front_proxy_kubeconfig.stat.exists - -- name: Check for api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: front_proxy_kubeconfig - -- name: Copy api-client config from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - delegate_to: "{{ first_master }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: Copy api-client config to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - -- name: Update master config - yedit: - state: present - src: /etc/origin/master/master-config.yaml - edits: - - key: aggregatorConfig.proxyClientInfo.certFile - value: aggregator-front-proxy.crt - - key: aggregatorConfig.proxyClientInfo.keyFile - value: aggregator-front-proxy.key - - key: authConfig.requestHeader.clientCA - value: front-proxy-ca.crt - - key: authConfig.requestHeader.clientCommonNames - value: [aggregator-front-proxy] - - key: authConfig.requestHeader.usernameHeaders - value: [X-Remote-User] - - key: authConfig.requestHeader.groupHeaders - value: [X-Remote-Group] - - key: authConfig.requestHeader.extraHeaderPrefixes - value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - - key: kubernetesMasterConfig.apiServerArguments.runtime-config - value: [apis/settings.k8s.io/v1alpha1=true] - - key: admissionConfig.pluginConfig.PodPreset.configuration.kind - value: DefaultAdmissionConfig - - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion - value: v1 - - key: admissionConfig.pluginConfig.PodPreset.configuration.disable - value: false - register: yedit_output - -#restart master serially here -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index a2a579e9d..b727eb74d 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -1,6 +1,6 @@ --- - set_fact: - l_use_crio: "{{ openshift_use_crio | default(false) }}" + l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - name: Set containerized version to configure if openshift_image_tag specified set_fact: @@ -22,7 +22,9 @@ command: > docker run --rm {{ openshift.common.cli_image }}:latest version register: cli_image_version - when: openshift_version is not defined + when: + - openshift_version is not defined + - not l_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) - set_fact: @@ -31,6 +33,7 @@ - openshift_version is not defined - openshift.common.deployment_type == 'origin' - cli_image_version.stdout_lines[0].split('-') | length > 1 + - not l_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" @@ -45,14 +48,14 @@ when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio + - not l_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio + - not l_use_crio_only # TODO: figure out a way to check for the openshift_version when using CRI-O. # We should do that using the images in the ostree storage so we don't have diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 199df83c2..a9d22aa06 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -8,7 +8,9 @@ - name: set ansible_service_broker facts set_fact: - template_service_broker_image: "{{ template_service_broker_image | default(__template_service_broker_image) }}" + template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}" + template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}" + template_service_broker_image_name: "{{ template_service_broker_image_name | default(__template_service_broker_image_name) }}" - oc_project: name: openshift-template-service-broker @@ -28,7 +30,7 @@ - name: Apply template file shell: > - oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" | kubectl apply -f - + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f - # reconcile with rbac - name: Reconcile with RBAC file diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml index 807f2822c..77afe1f43 100644 --- a/roles/template_service_broker/vars/default_images.yml +++ b/roles/template_service_broker/vars/default_images.yml @@ -1,2 +1,4 @@ --- -__template_service_broker_image: "" +__template_service_broker_prefix: "docker.io/openshift/" +__template_service_broker_version: "latest" +__template_service_broker_image_name: "origin" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml index 807f2822c..dfab1e01b 100644 --- a/roles/template_service_broker/vars/openshift-enterprise.yml +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -1,2 +1,4 @@ --- -__template_service_broker_image: "" +__template_service_broker_prefix: "registry.access.redhat.com/openshift3/" +__template_service_broker_version: "v3.7" +__template_service_broker_image_name: "ose" |