diff options
Diffstat (limited to 'roles')
136 files changed, 1927 insertions, 953 deletions
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 248e0363d..0ed1d9674 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -1,6 +1,6 @@ --- -__ansible_service_broker_image_prefix: ansibleplaybookbundle/ +__ansible_service_broker_image_prefix: ansibleplaybookbundle/origin- __ansible_service_broker_image_tag: latest __ansible_service_broker_etcd_image_prefix: quay.io/coreos/ diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml index b90190ebf..23fd8528a 100644 --- a/roles/container_runtime/tasks/common/post.yml +++ b/roles/container_runtime/tasks/common/post.yml @@ -22,5 +22,5 @@ - include_tasks: setup_docker_symlink.yml when: - - openshift_use_crio + - openshift_use_crio | bool - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index eedb18604..d588f2618 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -104,4 +104,4 @@ # 'docker login' - include_tasks: common/post.yml vars: - openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}" + openshift_docker_alternative_creds: "{{ openshift_use_crio_only | bool }}" diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index dc0452553..5f715cd21 100644 --- a/roles/container_runtime/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -42,6 +42,12 @@ - debug: var: l_docker_image +# Do the authentication before pulling the container engine system container +# as the pull might be from an authenticated registry. +- include_tasks: registry_auth.yml + vars: + openshift_docker_alternative_creds: True + # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image command: "atomic pull --storage ostree {{ l_docker_image }}" diff --git a/roles/contiv/README.md b/roles/contiv/README.md index fa36039d9..ce414f9fb 100644 --- a/roles/contiv/README.md +++ b/roles/contiv/README.md @@ -19,8 +19,8 @@ Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Mini * ``openshift_use_contiv=True`` * ``openshift_use_openshift_sdn=False`` * ``os_sdn_network_plugin_name='cni'`` -* ``netmaster_interface=eth0`` -* ``netplugin_interface=eth1`` +* ``contiv_netmaster_interface=eth0`` +* ``contiv_netplugin_interface=eth1`` * ref. Openshift docs Contiv section for more details ## Example bare metal deployment of Openshift + Contiv diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index 0825af8a5..4869abc61 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -1,51 +1,63 @@ --- # The version of Contiv binaries to use -contiv_version: 1.1.1 +contiv_version: 1.2.0 # The version of cni binaries -cni_version: v0.4.0 +contiv_cni_version: v0.4.0 + +# If the node we are deploying to is to be a contiv master. +contiv_master: false contiv_default_subnet: "10.128.0.0/16" contiv_default_gw: "10.128.254.254" -# TCP port that Netmaster listens for network connections -netmaster_port: 9999 -# Default for contiv_role -contiv_role: netmaster +# Ports netmaster listens on +contiv_netmaster_port: 9999 +contiv_netmaster_port_proto: tcp +contiv_ofnet_master_port: 9001 +contiv_ofnet_master_port_proto: tcp +# Ports netplugin listens on +contiv_netplugin_port: 6640 +contiv_netplugin_port_proto: tcp +contiv_ofnet_vxlan_port: 9002 +contiv_ofnet_vxlan_port_proto: tcp +contiv_ovs_port: 9003 +contiv_ovs_port_proto: tcp -# TCP port that Netplugin listens for network connections -netplugin_port: 6640 -contiv_rpc_port1: 9001 -contiv_rpc_port2: 9002 -contiv_rpc_port3: 9003 +contiv_vxlan_port: 4789 +contiv_vxlan_port_proto: udp # Interface used by Netplugin for inter-host traffic when encap_mode is vlan. # The interface must support 802.1Q trunking. -netplugin_interface: "eno16780032" +contiv_netplugin_interface: "eno16780032" # IP address of the interface used for control communication within the cluster # It needs to be reachable from all nodes in the cluster. -netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}" +contiv_netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}" # IP used to terminate vxlan tunnels -netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}" +contiv_netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}" # Interface used to bind Netmaster service -netmaster_interface: "{{ netplugin_interface }}" +contiv_netmaster_interface: "{{ contiv_netplugin_interface }}" + +# IP address of the interface used for control communication within the cluster +# It needs to be reachable from all nodes in the cluster. +contiv_netmaster_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}" # Path to the contiv binaries -bin_dir: /usr/bin +contiv_bin_dir: /usr/bin # Path to the contivk8s cni binary -cni_bin_dir: /opt/cni/bin +contiv_cni_bin_dir: /opt/cni/bin # Path to cni archive download directory -cni_download_dir: /tmp +contiv_cni_download_dir: /tmp # URL for cni binaries -cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" -cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2" +contiv_cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +contiv_cni_bin_url: "{{ contiv_cni_bin_url_base }}/{{ contiv_cni_version }}/cni-{{ contiv_cni_version }}.tbz2" # Contiv config directory @@ -60,11 +72,11 @@ contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2" # This is where kubelet looks for plugin files -kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec +contiv_kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec # Specifies routed mode vs bridged mode for networking (bridge | routing) # if you are using an external router for all routing, you should select bridge here -netplugin_fwd_mode: bridge +contiv_netplugin_fwd_mode: routing # Contiv fabric mode aci|default contiv_fabric_mode: default @@ -73,10 +85,10 @@ contiv_fabric_mode: default contiv_vlan_range: "2900-3000" # Encapsulation type vlan|vxlan to use for instantiating container networks -contiv_encap_mode: vlan +contiv_encap_mode: vxlan # Backend used by Netplugin for instantiating container networks -netplugin_driver: ovs +contiv_netplugin_driver: ovs # Create a default Contiv network for use by pods contiv_default_network: true @@ -85,38 +97,80 @@ contiv_default_network: true contiv_default_network_tag: "" #SRFIXME (use the openshift variables) -https_proxy: "" -http_proxy: "" -no_proxy: "" +contiv_https_proxy: "" +contiv_http_proxy: "" +contiv_no_proxy: "" # The following are aci specific parameters when contiv_fabric_mode: aci is set. # Otherwise, you can ignore these. -apic_url: "" -apic_username: "" -apic_password: "" -apic_leaf_nodes: "" -apic_phys_dom: "" -apic_contracts_unrestricted_mode: no -apic_epg_bridge_domain: not_specified +contiv_apic_url: "" +contiv_apic_username: "" +contiv_apic_password: "" +contiv_apic_leaf_nodes: "" +contiv_apic_phys_dom: "" +contiv_apic_contracts_unrestricted_mode: no +contiv_apic_epg_bridge_domain: not_specified apic_configure_default_policy: false -apic_default_external_contract: "uni/tn-common/brc-default" -apic_default_app_profile: "contiv-infra-app-profile" -kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" -master_name: "{{ groups['masters'][0] }}" -contiv_etcd_port: 22379 -etcd_url: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ contiv_etcd_port }}" -kube_ca_cert: "{{ kube_cert_dir }}/ca.crt" -kube_key: "{{ kube_cert_dir }}/admin.key" -kube_cert: "{{ kube_cert_dir }}/admin.crt" -kube_master_api_port: 8443 +contiv_apic_default_external_contract: "uni/tn-common/brc-default" +contiv_apic_default_app_profile: "contiv-infra-app-profile" +contiv_kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" +contiv_kube_ca_cert: "{{ contiv_kube_cert_dir }}/ca.crt" +contiv_kube_key: "{{ contiv_kube_cert_dir }}/admin.key" +contiv_kube_cert: "{{ contiv_kube_cert_dir }}/admin.crt" +contiv_kube_master_api_port: 8443 +contiv_kube_master_api_port_proto: tcp # contivh1 default subnet and gateway -#contiv_h1_subnet_default: "132.1.1.0/24" -#contiv_h1_gw_default: "132.1.1.1" contiv_h1_subnet_default: "10.129.0.0/16" contiv_h1_gw_default: "10.129.0.1" # contiv default private subnet for ext access contiv_private_ext_subnet: "10.130.0.0/16" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" +contiv_openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" + +contiv_api_proxy_port: 10000 +contiv_api_proxy_port_proto: tcp +contiv_api_proxy_image_repo: contiv/auth_proxy +contiv_api_proxy_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}" + +contiv_etcd_system_user: contivetcd +contiv_etcd_system_uid: 823 +contiv_etcd_system_group: contivetcd +contiv_etcd_system_gid: 823 +contiv_etcd_port: 22379 +contiv_etcd_port_proto: tcp +contiv_etcd_peer_port: 22380 +contiv_etcd_peer_port_proto: tcp +contiv_etcd_url: "http://127.0.0.1:{{ contiv_etcd_port }}" +contiv_etcd_init_image_repo: ferest/etcd-initer +contiv_etcd_init_image_tag: latest +contiv_etcd_image_repo: quay.io/coreos/etcd +contiv_etcd_image_tag: v3.2.4 +contiv_etcd_conf_dir: /etc/contiv-etcd +contiv_etcd_data_dir: /var/lib/contiv-etcd +contiv_etcd_peers: |- + {% for host in groups.oo_masters_to_config -%} + {{ host }}=http://{{ hostvars[host]['ip'] | default(hostvars[host].ansible_default_ipv4['address']) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %} + {%- endfor %} + +# List of port/protocol pairs to allow inbound access to on every host +# netplugin runs on, from all host IPs in the cluster. +contiv_netplugin_internal: [ "{{ contiv_ofnet_vxlan_port }}/{{ contiv_ofnet_vxlan_port_proto }}", + "{{ contiv_ovs_port }}/{{ contiv_ovs_port_proto }}", + "{{ contiv_vxlan_port }}/{{ contiv_vxlan_port_proto }}" ] +# Allow all forwarded traffic in and out of these interfaces. +contiv_netplugin_forward_interfaces: [ contivh0, contivh1 ] + +# List of port/protocol pairs to allow inbound access to on every host +# netmaster runs on, from all host IPs in the cluster. Note that every host +# that runs netmaster also runs netplugin, so the above netplugin rules will +# apply as well. +contiv_netmaster_internal: [ "{{ contiv_ofnet_master_port }}/{{ contiv_ofnet_master_port_proto }}", + "{{ contiv_netmaster_port }}/{{ contiv_netmaster_port_proto }}", + "{{ contiv_etcd_port }}/{{ contiv_etcd_port_proto }}", + "{{ contiv_etcd_peer_port }}/{{ contiv_etcd_peer_port_proto }}", + "{{ contiv_kube_master_api_port }}/{{ contiv_kube_master_api_port_proto }}" ] +# List of port/protocol pairs to allow inbound access to on every host +# netmaster runs on, from any host anywhere. +contiv_netmaster_external: [ "{{ contiv_api_proxy_port }}/{{ contiv_api_proxy_port_proto }}" ] diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index 67fb23db8..e8607cc90 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -15,17 +15,3 @@ galaxy_info: dependencies: - role: lib_utils - role: contiv_facts -- role: etcd - etcd_service: contiv-etcd - etcd_is_thirdparty: True - etcd_peer_port: 22380 - etcd_client_port: 22379 - etcd_conf_dir: /etc/contiv-etcd/ - etcd_data_dir: /var/lib/contiv-etcd/ - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_config_dir: /etc/contiv-etcd/ - etcd_url_scheme: http - etcd_peer_url_scheme: http - when: contiv_role == "netmaster" -- role: contiv_auth_proxy - when: contiv_role == "netmaster" diff --git a/roles/contiv/tasks/aci.yml b/roles/contiv/tasks/aci.yml index 30d2eb339..8a56b3590 100644 --- a/roles/contiv/tasks/aci.yml +++ b/roles/contiv/tasks/aci.yml @@ -11,7 +11,7 @@ - name: ACI | Copy shell script used by aci-gw service template: src: aci_gw.j2 - dest: "{{ bin_dir }}/aci_gw.sh" + dest: "{{ contiv_bin_dir }}/aci_gw.sh" mode: u=rwx,g=rx,o=rx - name: ACI | Copy systemd units for aci-gw diff --git a/roles/contiv/tasks/api_proxy.yml b/roles/contiv/tasks/api_proxy.yml new file mode 100644 index 000000000..8b524dd6e --- /dev/null +++ b/roles/contiv/tasks/api_proxy.yml @@ -0,0 +1,120 @@ +--- +- name: API proxy | Create contiv-api-proxy openshift user + oc_serviceaccount: + state: present + name: contiv-api-proxy + namespace: kube-system + run_once: true + +- name: API proxy | Set contiv-api-proxy openshift user permissions + oc_adm_policy_user: + user: system:serviceaccount:kube-system:contiv-api-proxy + resource_kind: scc + resource_name: hostnetwork + state: present + run_once: true + +- name: API proxy | Create temp directory for doing work + command: mktemp -d /tmp/openshift-contiv-XXXXXX + register: mktemp + changed_when: False + # For things that pass temp files between steps, we want to make sure they + # run on the same node. + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Check for existing api proxy secret volume + oc_obj: + namespace: kube-system + kind: secret + state: list + selector: "name=contiv-api-proxy-secret" + register: existing_secret_volume + run_once: true + +- name: API proxy | Generate a self signed certificate for api proxy + command: openssl req -new -nodes -x509 -subj "/C=US/ST=/L=/O=/CN=localhost" -days 3650 -keyout "{{ mktemp.stdout }}/key.pem" -out "{{ mktemp.stdout }}/cert.pem" -extensions v3_ca + when: (contiv_api_proxy_cert is not defined or contiv_api_proxy_key is not defined) + and not existing_secret_volume.results.results[0]['items'] + register: created_self_signed_cert + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Read self signed certificate file + command: cat "{{ mktemp.stdout }}/cert.pem" + register: generated_cert + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Read self signed key file + command: cat "{{ mktemp.stdout }}/key.pem" + register: generated_key + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-secrets.yml from template using generated cert + template: + src: api-proxy-secrets.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml" + vars: + key: "{{ generated_key.stdout }}" + cert: "{{ generated_cert.stdout }}" + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-secrets.yml from template using user defined cert + template: + src: api-proxy-secrets.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml" + vars: + key: "{{ lookup('file', contiv_api_proxy_key) }}" + cert: "{{ lookup('file', contiv_api_proxy_cert) }}" + when: contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create secret certificate volume + oc_obj: + state: present + namespace: "kube-system" + kind: secret + name: contiv-api-proxy-secret + files: + - "{{ mktemp.stdout }}/api-proxy-secrets.yml" + when: (contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined) + or created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-daemonset.yml from template + template: + src: api-proxy-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-daemonset.yml" + vars: + etcd_host: "etcd://{{ groups.oo_etcd_to_config.0 }}:{{ contiv_etcd_port }}" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +# Always "import" this file, k8s won't do anything if it matches exactly what +# is already in the cluster. +- name: API proxy | Add API proxy daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-api-proxy + files: + - "{{ mktemp.stdout }}/api-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml index 8a928ea54..e9763d34a 100644 --- a/roles/contiv/tasks/default_network.yml +++ b/roles/contiv/tasks/default_network.yml @@ -1,71 +1,71 @@ --- -- name: Contiv | Wait for netmaster - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" tenant ls' +- name: Default network | Wait for netmaster + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" tenant ls' register: tenant_result until: tenant_result.stdout.find("default") != -1 retries: 9 delay: 10 -- name: Contiv | Set globals - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' +- name: Default network | Set globals + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ contiv_netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' run_once: true -- name: Contiv | Set arp mode to flood if ACI - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood' +- name: Default network | Set arp mode to flood if ACI + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --arp-mode flood' when: contiv_fabric_mode == "aci" run_once: true -- name: Contiv | Check if default-net exists - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls' +- name: Default network | Check if default-net exists + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net ls' register: net_result run_once: true -- name: Contiv | Create default-net - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' +- name: Default network | Create default-net + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' when: net_result.stdout.find("default-net") == -1 run_once: true -- name: Contiv | Create host access infra network for VxLan routing case - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' - when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing") +- name: Default network | Create host access infra network for VxLan routing case + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' + when: (contiv_encap_mode == "vxlan") and (contiv_netplugin_fwd_mode == "routing") run_once: true -#- name: Contiv | Create an allow-all policy for the default-group -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy' +#- name: Default network | Create an allow-all policy for the default-group +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy create ose-allow-all-policy' # when: contiv_fabric_mode == "aci" # run_once: true -- name: Contiv | Set up aci external contract to consume default external contract - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume' +- name: Default network | Set up aci external contract to consume default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -c -a {{ contiv_apic_default_external_contract }} oseExtToConsume' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) run_once: true -- name: Contiv | Set up aci external contract to provide default external contract - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide' +- name: Default network | Set up aci external contract to provide default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -p -a {{ contiv_apic_default_external_contract }} oseExtToProvide' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) run_once: true -- name: Contiv | Create aci default-group - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group' +- name: Default network | Create aci default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create default-net default-group' when: contiv_fabric_mode == "aci" run_once: true -- name: Contiv | Add external contracts to the default-group - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' +- name: Default network | Add external contracts to the default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) run_once: true -#- name: Contiv | Add policy rule 1 for allow-all policy -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' +#- name: Default network | Add policy rule 1 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' # when: contiv_fabric_mode == "aci" # run_once: true -#- name: Contiv | Add policy rule 2 for allow-all policy -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' +#- name: Default network | Add policy rule 2 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' # when: contiv_fabric_mode == "aci" # run_once: true -- name: Contiv | Create default aci app profile - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}' +- name: Default network | Create default aci app profile + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" app-profile create -g default-group {{ contiv_apic_default_app_profile }}' when: contiv_fabric_mode == "aci" run_once: true diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml index 831fd360a..47d74da9c 100644 --- a/roles/contiv/tasks/download_bins.yml +++ b/roles/contiv/tasks/download_bins.yml @@ -4,7 +4,7 @@ path: "{{ contiv_current_release_directory }}" state: directory -- name: Install bzip2 +- name: Download Bins | Install bzip2 yum: name: bzip2 state: installed @@ -18,9 +18,9 @@ mode: 0755 validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" - name: Download Bins | Extract Contiv tar file unarchive: @@ -30,19 +30,19 @@ - name: Download Bins | Download cni tar file get_url: - url: "{{ cni_bin_url }}" - dest: "{{ cni_download_dir }}" + url: "{{ contiv_cni_bin_url }}" + dest: "{{ contiv_cni_download_dir }}" mode: 0755 validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" register: download_file - name: Download Bins | Extract cni tar file unarchive: src: "{{ download_file.dest }}" - dest: "{{ cni_download_dir }}" + dest: "{{ contiv_cni_download_dir }}" copy: no when: download_file.changed diff --git a/roles/contiv/tasks/etcd.yml b/roles/contiv/tasks/etcd.yml new file mode 100644 index 000000000..b08ead982 --- /dev/null +++ b/roles/contiv/tasks/etcd.yml @@ -0,0 +1,114 @@ +--- +# To run contiv-etcd in a container as non-root, we need to match the uid/gid +# with the filesystem permissions on the host. +- name: Contiv etcd | Create local unix group + group: + name: "{{ contiv_etcd_system_group }}" + gid: "{{ contiv_etcd_system_gid }}" + system: yes + +- name: Contiv etcd | Create local unix user + user: + name: "{{ contiv_etcd_system_user }}" + createhome: no + uid: "{{ contiv_etcd_system_uid }}" + group: "{{ contiv_etcd_system_group }}" + home: "{{ contiv_etcd_data_dir }}" + shell: /bin/false + system: yes + +- name: Contiv etcd | Create directories + file: + path: "{{ item }}" + state: directory + mode: g-rwx,o-rwx + owner: "{{ contiv_etcd_system_user }}" + group: "{{ contiv_etcd_system_group }}" + setype: svirt_sandbox_file_t + seuser: system_u + serole: object_r + selevel: s0 + recurse: yes + with_items: + - "{{ contiv_etcd_data_dir }}" + - "{{ contiv_etcd_conf_dir }}" + +- name: Contiv etcd | Create contiv-etcd openshift user + oc_serviceaccount: + state: present + name: contiv-etcd + namespace: kube-system + run_once: true + +- name: Contiv etcd | Create temp directory for doing work + command: mktemp -d /tmp/openshift-contiv-XXXXXX + register: mktemp + changed_when: False + # For things that pass temp files between steps, we want to make sure they + # run on the same node. + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd-scc.yml from template + template: + src: etcd-scc.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-scc.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd.yml from template + template: + src: etcd-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd-proxy.yml from template + template: + src: etcd-proxy-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Add etcd scc + oc_obj: + state: present + namespace: "kube-system" + kind: SecurityContextConstraints + name: contiv-etcd + files: + - "{{ mktemp.stdout }}/etcd-scc.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +# Always "import" this file, k8s won't do anything if it matches exactly what +# is already in the cluster. +- name: Contiv etcd | Add etcd daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-etcd + files: + - "{{ mktemp.stdout }}/etcd-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Add etcd-proxy daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-etcd-proxy + files: + - "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml index cb9196a71..4d530ae90 100644 --- a/roles/contiv/tasks/main.yml +++ b/roles/contiv/tasks/main.yml @@ -1,14 +1,15 @@ --- -- name: Ensure bin_dir exists +- include_tasks: old_version_cleanup.yml + +- name: Ensure contiv_bin_dir exists file: - path: "{{ bin_dir }}" + path: "{{ contiv_bin_dir }}" recurse: yes state: directory - include_tasks: download_bins.yml - include_tasks: netmaster.yml - when: contiv_role == "netmaster" + when: contiv_master - include_tasks: netplugin.yml - when: contiv_role == "netplugin" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index 6f15af8c2..bb22fb801 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -1,34 +1,16 @@ --- - include_tasks: netmaster_firewalld.yml - when: has_firewalld + when: contiv_has_firewalld - include_tasks: netmaster_iptables.yml - when: not has_firewalld and has_iptables + when: not contiv_has_firewalld and contiv_has_iptables -- name: Netmaster | Check is /etc/hosts file exists - stat: - path: /etc/hosts - register: hosts - -- name: Netmaster | Create hosts file if it is not present - file: - path: /etc/hosts - state: touch - when: not hosts.stat.exists - -- name: Netmaster | Build hosts file - lineinfile: - dest: /etc/hosts - regexp: .*netmaster$ - line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster" - state: present - when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined - with_items: "{{ groups['masters'] }}" +- include_tasks: etcd.yml - name: Netmaster | Create netmaster symlinks file: src: "{{ contiv_current_release_directory }}/{{ item }}" - dest: "{{ bin_dir }}/{{ item }}" + dest: "{{ contiv_bin_dir }}/{{ item }}" state: link with_items: - netmaster @@ -36,7 +18,7 @@ - name: Netmaster | Copy environment file for netmaster template: - src: netmaster.env.j2 + src: netmaster.j2 dest: /etc/default/netmaster mode: 0644 notify: restart netmaster @@ -75,3 +57,5 @@ - include_tasks: default_network.yml when: contiv_default_network == true + +- include_tasks: api_proxy.yml diff --git a/roles/contiv/tasks/netmaster_firewalld.yml b/roles/contiv/tasks/netmaster_firewalld.yml index 2975351ac..0d52f821d 100644 --- a/roles/contiv/tasks/netmaster_firewalld.yml +++ b/roles/contiv/tasks/netmaster_firewalld.yml @@ -1,16 +1,17 @@ --- -- name: Netmaster Firewalld | Open Netmaster port +- name: Netmaster Firewalld | Add internal rules firewalld: - port: "{{ netmaster_port }}/tcp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes + immediate: true + permanent: true + port: "{{ item[0] }}" + source: "{{ item[1] }}" + with_nested: + - "{{ contiv_netmaster_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" -- name: Netmaster Firewalld | Save Netmaster port +- name: Netmaster Firewalld | Add external rules firewalld: - port: "{{ netmaster_port }}/tcp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes + port: "{{ item }}" + with_items: "{{ contiv_netmaster_external }}" diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml index c98e7b6a5..3b68ea0c3 100644 --- a/roles/contiv/tasks/netmaster_iptables.yml +++ b/roles/contiv/tasks/netmaster_iptables.yml @@ -1,27 +1,32 @@ --- -- name: Netmaster IPtables | Get iptables rules - command: iptables -L --wait - register: iptablesrules - check_mode: no - -- name: Netmaster IPtables | Enable iptables at boot - service: - name: iptables - enabled: yes - state: started - -- name: Netmaster IPtables | Open Netmaster with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ contiv_rpc_port1 }}" - - "{{ contiv_rpc_port2 }}" - - "{{ contiv_rpc_port3 }}" - when: iptablesrules.stdout.find("contiv") == -1 +- name: Netmaster IPtables | Add internal rules + iptables: + action: insert + chain: INPUT + # Parsed from the contiv_netmaster_internal list, this will be tcp or udp. + protocol: "{{ item[0].split('/')[1] }}" + match: "{{ item[0].split('/')[1] }}" + # Parsed from the contiv_netmaster_internal list, this will be a port number. + destination_port: "{{ item[0].split('/')[0] }}" + # This is an IP address from a node in the cluster. + source: "{{ item[1] }}" + jump: ACCEPT + comment: contiv + with_nested: + - "{{ contiv_netmaster_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" notify: Save iptables rules -- name: Netmaster IPtables | Open netmaster main port - command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}" - when: iptablesrules.stdout.find("contiv") == -1 +- name: Netmaster IPtables | Add external rules + iptables: + action: insert + chain: INPUT + # Parsed from the contiv_netmaster_external list, this will be tcp or udp. + protocol: "{{ item.split('/')[1] }}" + match: "{{ item.split('/')[1] }}" + # Parsed from the contiv_netmaster_external list, this will be a port number. + destination_port: "{{ item.split('/')[0] }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netmaster_external }}" notify: Save iptables rules diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 540f6e4bc..60f432202 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -1,9 +1,9 @@ --- - include_tasks: netplugin_firewalld.yml - when: has_firewalld + when: contiv_has_firewalld - include_tasks: netplugin_iptables.yml - when: has_iptables + when: not contiv_has_firewalld and contiv_has_iptables - name: Netplugin | Ensure localhost entry correct in /etc/hosts lineinfile: @@ -20,41 +20,40 @@ state: absent - include_tasks: ovs.yml - when: netplugin_driver == "ovs" + when: contiv_netplugin_driver == "ovs" - name: Netplugin | Create Netplugin bin symlink file: src: "{{ contiv_current_release_directory }}/netplugin" - dest: "{{ bin_dir }}/netplugin" + dest: "{{ contiv_bin_dir }}/netplugin" state: link - -- name: Netplugin | Ensure cni_bin_dir exists +- name: Netplugin | Ensure contiv_cni_bin_dir exists file: - path: "{{ cni_bin_dir }}" + path: "{{ contiv_cni_bin_dir }}" recurse: yes state: directory - name: Netplugin | Create CNI bin symlink file: src: "{{ contiv_current_release_directory }}/contivk8s" - dest: "{{ cni_bin_dir }}/contivk8s" + dest: "{{ contiv_cni_bin_dir }}/contivk8s" state: link - name: Netplugin | Copy CNI loopback bin copy: - src: "{{ cni_download_dir }}/loopback" - dest: "{{ cni_bin_dir }}/loopback" + src: "{{ contiv_cni_download_dir }}/loopback" + dest: "{{ contiv_cni_bin_dir }}/loopback" remote_src: True mode: 0755 -- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist +- name: Netplugin | Ensure contiv_kube_plugin_dir and cni/net.d directories exist file: path: "{{ item }}" recurse: yes state: directory with_items: - - "{{ kube_plugin_dir }}" + - "{{ contiv_kube_plugin_dir }}" - "/etc/cni/net.d" - name: Netplugin | Ensure contiv_config_dir exists @@ -68,7 +67,7 @@ src: contiv_cni.conf dest: "{{ item }}" with_items: - - "{{ kube_plugin_dir }}/contiv_cni.conf" + - "{{ contiv_kube_plugin_dir }}/contiv_cni.conf" - "/etc/cni/net.d" # notify: restart kubelet @@ -85,11 +84,11 @@ mode: 0644 notify: restart netplugin -- name: Docker | Make sure proxy setting exists +- name: Netplugin | Make sure docker proxy setting exists lineinfile: dest: /etc/sysconfig/docker-network regexp: '^https_proxy.*' - line: 'https_proxy={{ https_proxy }}' + line: 'https_proxy={{ contiv_https_proxy }}' state: present register: docker_updated @@ -103,9 +102,9 @@ command: systemctl daemon-reload when: docker_updated is changed -- name: Docker | Restart docker +- name: Netplugin | Restart docker service: - name: "{{ openshift_docker_service_name }}" + name: "{{ contiv_openshift_docker_service_name }}" state: restarted when: docker_updated is changed register: l_docker_restart_docker_in_contiv_result diff --git a/roles/contiv/tasks/netplugin_firewalld.yml b/roles/contiv/tasks/netplugin_firewalld.yml index 3aeffae56..5ac531ec6 100644 --- a/roles/contiv/tasks/netplugin_firewalld.yml +++ b/roles/contiv/tasks/netplugin_firewalld.yml @@ -1,34 +1,17 @@ --- -- name: Netplugin Firewalld | Open Netplugin port +- name: Netplugin Firewalld | Add internal rules firewalld: - port: "{{ netplugin_port }}/tcp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - -- name: Netplugin Firewalld | Save Netplugin port - firewalld: - port: "{{ netplugin_port }}/tcp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - -- name: Netplugin Firewalld | Open vxlan port - firewalld: - port: "8472/udp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - when: contiv_encap_mode == "vxlan" + port: "{{ item[0] }}" + source: "{{ item[1] }}" + with_nested: + - "{{ contiv_netplugin_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" -- name: Netplugin Firewalld | Save firewalld vxlan port for flanneld +- name: Netplugin Firewalld | Add dns rule firewalld: - port: "8472/udp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - when: contiv_encap_mode == "vxlan" + port: "53/udp" + interface: contivh0 diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml index 3ea34645d..9d376f4e5 100644 --- a/roles/contiv/tasks/netplugin_iptables.yml +++ b/roles/contiv/tasks/netplugin_iptables.yml @@ -1,58 +1,52 @@ --- -- name: Netplugin IPtables | Get iptables rules - command: iptables -L --wait - register: iptablesrules - check_mode: no +- name: Netplugin IPtables | Add internal rules + iptables: + action: insert + chain: INPUT + protocol: "{{ item[0].split('/')[1] }}" + match: "{{ item[0].split('/')[1] }}" + destination_port: "{{ item[0].split('/')[0] }}" + source: "{{ item[1] }}" + jump: ACCEPT + comment: contiv + with_nested: + - "{{ contiv_netplugin_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add [in] forward rules + iptables: + action: insert + chain: FORWARD + in_interface: "{{ item }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netplugin_forward_interfaces }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add [out] forward rules + iptables: + action: insert + chain: FORWARD + out_interface: "{{ item }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netplugin_forward_interfaces }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add dns rule + iptables: + action: insert + chain: INPUT + protocol: udp + match: udp + destination_port: 53 + in_interface: contivh0 + jump: ACCEPT + comment: contiv + notify: Save iptables rules - name: Netplugin IPtables | Enable iptables at boot service: name: iptables enabled: yes - state: started - -- name: Netplugin IPtables | Open Netmaster with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ netmaster_port }}" - - "{{ contiv_rpc_port1 }}" - - "{{ contiv_rpc_port2 }}" - - "{{ contiv_rpc_port3 }}" - - "{{ contiv_etcd_port }}" - - "{{ kube_master_api_port }}" - when: iptablesrules.stdout.find("contiv") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472" - when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789" - when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow from contivh0 - command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input" - when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow to contivh0 - command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output" - when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow from contivh1 - command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input" - when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow to contivh1 - command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output" - when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow dns - command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns" - when: iptablesrules.stdout.find("contiv dns") == -1 - notify: Save iptables rules diff --git a/roles/contiv/tasks/old_version_cleanup.yml b/roles/contiv/tasks/old_version_cleanup.yml new file mode 100644 index 000000000..8b3d88096 --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup.yml @@ -0,0 +1,43 @@ +--- +- name: Old version cleanup | Check if old auth proxy service exists + stat: + path: /etc/systemd/system/auth-proxy.service + register: auth_proxy_stat + +- name: Old version cleanup | Stop old auth proxy + service: + name: auth-proxy + enabled: no + state: stopped + when: auth_proxy_stat.stat.exists + +# Note(NB): The new containerized contiv-etcd service uses the same data +# directory on the host, so etcd data is not lost. +- name: Old version cleanup | Check if old contiv-etcd service exists + stat: + path: /etc/systemd/system/contiv-etcd.service + register: contiv_etcd_stat + +- name: Old version cleanup | Stop old contiv-etcd + service: + name: contiv-etcd + enabled: no + state: stopped + when: contiv_etcd_stat.stat.exists + +- name: Old version cleanup | Delete old files + file: + state: absent + path: "{{ item }}" + with_items: + - /etc/systemd/system/auth-proxy.service + - /var/contiv/certs + - /usr/bin/auth_proxy.sh + - /etc/systemd/system/contiv-etcd.service + - /etc/systemd/system/contiv-etcd.service.d + +- include_tasks: old_version_cleanup_iptables.yml + when: not contiv_has_firewalld and contiv_has_iptables + +- include_tasks: old_version_cleanup_firewalld.yml + when: contiv_has_firewalld diff --git a/roles/contiv/tasks/old_version_cleanup_firewalld.yml b/roles/contiv/tasks/old_version_cleanup_firewalld.yml new file mode 100644 index 000000000..675a6358a --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup_firewalld.yml @@ -0,0 +1,11 @@ +--- +- name: Old version cleanup | Delete old firewalld rules + firewalld: + state: absent + immediate: true + permanent: true + port: "{{ item }}" + with_items: + - "9999/tcp" + - "6640/tcp" + - "8472/udp" diff --git a/roles/contiv/tasks/old_version_cleanup_iptables.yml b/roles/contiv/tasks/old_version_cleanup_iptables.yml new file mode 100644 index 000000000..513357606 --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup_iptables.yml @@ -0,0 +1,44 @@ +--- +- name: Old version cleanup | Delete old forward [in] iptables rules + iptables: + state: absent + chain: FORWARD + in_interface: "{{ item }}" + jump: ACCEPT + comment: "{{ item }} FORWARD input" + with_items: + - contivh0 + - contivh1 + notify: Save iptables rules + +- name: Old version cleanup | Delete old forward [out] iptables rules + iptables: + state: absent + chain: FORWARD + out_interface: "{{ item }}" + jump: ACCEPT + comment: "{{ item }} FORWARD output" + with_items: + - contivh0 + - contivh1 + notify: Save iptables rules + +- name: Old version cleanup | Delete old input iptables rules + iptables: + state: absent + chain: INPUT + protocol: "{{ item.split('/')[1] }}" + match: "{{ item.split('/')[1] }}" + destination_port: "{{ item.split('/')[0] }}" + comment: "{{ item.split('/')[2] }}" + jump: ACCEPT + with_items: + - "53/udp/contiv dns" + - "4789/udp/netplugin vxlan 4789" + - "8472/udp/netplugin vxlan 8472" + - "9003/tcp/contiv" + - "9002/tcp/contiv" + - "9001/tcp/contiv" + - "9999/tcp/contiv" + - "10000/tcp/Contiv auth proxy service (10000)" + notify: Save iptables rules diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml index 5c92e90e9..21ba6ead4 100644 --- a/roles/contiv/tasks/ovs.yml +++ b/roles/contiv/tasks/ovs.yml @@ -1,6 +1,6 @@ --- - include_tasks: packageManagerInstall.yml - when: source_type == "packageManager" + when: contiv_source_type == "packageManager" tags: - binary-update diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index 3367844a8..8c8e7a7bd 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -4,10 +4,9 @@ did_install: false - include_tasks: pkgMgrInstallers/centos-install.yml - when: (ansible_os_family == "RedHat") and - not openshift_is_atomic + when: ansible_os_family == "RedHat" and not openshift_is_atomic | bool - name: Package Manager | Set fact saying we did CentOS package install set_fact: did_install: true - when: (ansible_os_family == "RedHat") + when: ansible_os_family == "RedHat" diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 53c5b4099..2c82973d6 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -12,9 +12,9 @@ dest: /tmp/rdo-release-ocata-2.noarch.rpm validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" tags: - ovs_install @@ -30,9 +30,9 @@ pkg=openvswitch state=present environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" tags: - ovs_install register: result diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 9b3f12567..e2813c99d 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,10 +1,10 @@ [Unit] Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ contiv_openshift_docker_service_name }}.service [Service] -ExecStart={{ bin_dir }}/aci_gw.sh start -ExecStop={{ bin_dir }}/aci_gw.sh stop +ExecStart={{ contiv_bin_dir }}/aci_gw.sh start +ExecStop={{ contiv_bin_dir }}/aci_gw.sh stop KillMode=control-group Restart=always RestartSec=10 diff --git a/roles/contiv/templates/aci_gw.j2 b/roles/contiv/templates/aci_gw.j2 index ab4ad46a6..5ff349945 100644 --- a/roles/contiv/templates/aci_gw.j2 +++ b/roles/contiv/templates/aci_gw.j2 @@ -11,13 +11,13 @@ start) set -e docker run --net=host \ - -e "APIC_URL={{ apic_url }}" \ - -e "APIC_USERNAME={{ apic_username }}" \ - -e "APIC_PASSWORD={{ apic_password }}" \ - -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \ - -e "APIC_PHYS_DOMAIN={{ apic_phys_dom }}" \ - -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \ - -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \ + -e "APIC_URL={{ contiv_apic_url }}" \ + -e "APIC_USERNAME={{ contiv_apic_username }}" \ + -e "APIC_PASSWORD={{ contiv_apic_password }}" \ + -e "APIC_LEAF_NODE={{ contiv_apic_leaf_nodes }}" \ + -e "APIC_PHYS_DOMAIN={{ contiv_apic_phys_dom }}" \ + -e "APIC_EPG_BRIDGE_DOMAIN={{ contiv_apic_epg_bridge_domain }}" \ + -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ contiv_apic_contracts_unrestricted_mode }}" \ --name=contiv-aci-gw \ contiv/aci-gw ;; diff --git a/roles/contiv/templates/api-proxy-daemonset.yml.j2 b/roles/contiv/templates/api-proxy-daemonset.yml.j2 new file mode 100644 index 000000000..a15073580 --- /dev/null +++ b/roles/contiv/templates/api-proxy-daemonset.yml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-api-proxy + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-api-proxy + template: + metadata: + namespace: kube-system + labels: + name: contiv-api-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-api-proxy + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: contiv-api-proxy + image: "{{ contiv_api_proxy_image_repo }}:{{ contiv_version }}" + args: + - "--listen-address=0.0.0.0:{{ contiv_api_proxy_port }}" + - --tls-key-file=/var/contiv/api_proxy_key.pem + - --tls-certificate=/var/contiv/api_proxy_cert.pem + - "--data-store-address={{ etcd_host }}" + - --data-store-driver=etcd + - "--netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}" + ports: + - containerPort: "{{ contiv_api_proxy_port }}" + hostPort: "{{ contiv_api_proxy_port }}" + volumeMounts: + - name: secret-volume + mountPath: /var/contiv + readOnly: true + volumes: + - name: secret-volume + secret: + secretName: contiv-api-proxy-secret diff --git a/roles/contiv/templates/api-proxy-secrets.yml.j2 b/roles/contiv/templates/api-proxy-secrets.yml.j2 new file mode 100644 index 000000000..cd800c97d --- /dev/null +++ b/roles/contiv/templates/api-proxy-secrets.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: contiv-api-proxy-secret + namespace: kube-system + labels: + name: contiv-api-proxy-secret +# Use data+b64encode, because stringData doesn't preserve newlines. +data: + api_proxy_key.pem: "{{ key | b64encode }}" + api_proxy_cert.pem: "{{ cert | b64encode }}" diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2 index f0e99c556..1dce9fcc2 100644 --- a/roles/contiv/templates/contiv.cfg.j2 +++ b/roles/contiv/templates/contiv.cfg.j2 @@ -1,5 +1,5 @@ { - "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key", "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt", diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2 index fac8e3c4c..ca29b8001 100644 --- a/roles/contiv/templates/contiv.cfg.master.j2 +++ b/roles/contiv/templates/contiv.cfg.master.j2 @@ -1,5 +1,5 @@ { - "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key", "K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt", diff --git a/roles/contiv/templates/etcd-daemonset.yml.j2 b/roles/contiv/templates/etcd-daemonset.yml.j2 new file mode 100644 index 000000000..76937e670 --- /dev/null +++ b/roles/contiv/templates/etcd-daemonset.yml.j2 @@ -0,0 +1,83 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-etcd + template: + metadata: + namespace: kube-system + labels: + name: contiv-etcd + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-etcd + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + initContainers: + - name: contiv-etcd-init + image: "{{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}" + env: + - name: ETCD_INIT_ARGSFILE + value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args" + - name: ETCD_INIT_LISTEN_PORT + value: "{{ contiv_etcd_port }}" + - name: ETCD_INIT_PEER_PORT + value: "{{ contiv_etcd_peer_port }}" + - name: ETCD_INIT_CLUSTER + value: "{{ contiv_etcd_peers }}" + - name: ETCD_INIT_DATA_DIR + value: "{{ contiv_etcd_data_dir }}" + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: "{{ contiv_etcd_conf_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + containers: + - name: contiv-etcd + image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}" + command: + - sh + - -c + - 'exec etcd $(cat "$ETCD_INIT_ARGSFILE")' + env: + - name: ETCD_INIT_ARGSFILE + value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args" + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: "{{ contiv_etcd_conf_dir }}" + - name: contiv-etcd-data-dir + mountPath: "{{ contiv_etcd_data_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + volumes: + - name: contiv-etcd-data-dir + hostPath: + type: DirectoryOrCreate + path: "{{ contiv_etcd_data_dir }}" + - name: contiv-etcd-conf-dir + hostPath: + type: DirectoryOrCreate + path: "{{ contiv_etcd_conf_dir }}" diff --git a/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 new file mode 100644 index 000000000..4ec6cfd76 --- /dev/null +++ b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 @@ -0,0 +1,55 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd-proxy + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-etcd-proxy + template: + metadata: + namespace: kube-system + labels: + name: contiv-etcd-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-etcd + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: contiv-etcd-proxy + image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}" + command: + - etcd + - "--proxy=on" + - "--listen-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}" + - "--advertise-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}" + - "--initial-cluster={{ contiv_etcd_peers }}" + - "--data-dir={{ contiv_etcd_data_dir }}" + volumeMounts: + - name: contiv-etcd-data-dir + mountPath: "{{ contiv_etcd_data_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + volumes: + - name: contiv-etcd-data-dir + emptyDir: {} diff --git a/roles/contiv/templates/etcd-scc.yml.j2 b/roles/contiv/templates/etcd-scc.yml.j2 new file mode 100644 index 000000000..6c4bb1d1e --- /dev/null +++ b/roles/contiv/templates/etcd-scc.yml.j2 @@ -0,0 +1,42 @@ +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: [] +allowedFlexVolumes: [] +apiVersion: v1 +defaultAddCapabilities: [] +fsGroup: + ranges: + - max: "{{ contiv_etcd_system_gid }}" + min: "{{ contiv_etcd_system_gid }}" + type: MustRunAs +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'For contiv-etcd only.' + creationTimestamp: null + name: contiv-etcd +priority: null +readOnlyRootFilesystem: true +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAs + uid: "{{ contiv_etcd_system_uid }}" +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: MustRunAs +users: +- system:serviceaccount:kube-system:contiv-etcd +volumes: +- emptyDir +- hostPath +- secret diff --git a/roles/contiv/templates/netmaster.env.j2 b/roles/contiv/templates/netmaster.env.j2 deleted file mode 100644 index 5b5c84a2e..000000000 --- a/roles/contiv/templates/netmaster.env.j2 +++ /dev/null @@ -1,2 +0,0 @@ -NETMASTER_ARGS='--cluster-store etcd://{{ etcd_url }} --cluster-mode=kubernetes' - diff --git a/roles/contiv/templates/netmaster.j2 b/roles/contiv/templates/netmaster.j2 new file mode 100644 index 000000000..c9db122b5 --- /dev/null +++ b/roles/contiv/templates/netmaster.j2 @@ -0,0 +1 @@ +NETMASTER_ARGS='--etcd={{ contiv_etcd_url }} --listen-url=127.0.0.1:{{ contiv_netmaster_port }} --fwdmode={{ contiv_netplugin_fwd_mode }} --infra={{ contiv_fabric_mode }} --control-url={{ contiv_netmaster_ctrl_ip }}:{{ contiv_netmaster_port }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service index ce7d0c75e..b7289bc38 100644 --- a/roles/contiv/templates/netmaster.service +++ b/roles/contiv/templates/netmaster.service @@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service [Service] EnvironmentFile=/etc/default/netmaster -ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS +ExecStart={{ contiv_bin_dir }}/netmaster $NETMASTER_ARGS KillMode=control-group Restart=always RestartSec=10 diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2 index a4928cc3d..0fd727401 100644 --- a/roles/contiv/templates/netplugin.j2 +++ b/roles/contiv/templates/netplugin.j2 @@ -1,7 +1,6 @@ {% if contiv_encap_mode == "vlan" %} -NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='--vlan-if={{ contiv_netplugin_interface }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' {% endif %} {% if contiv_encap_mode == "vxlan" %} -NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='--vtep-ip={{ contiv_netplugin_ctrl_ip }} --vxlan-port={{ contiv_vxlan_port }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' {% endif %} - diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service index 6358d89ec..2e1ca1bdf 100644 --- a/roles/contiv/templates/netplugin.service +++ b/roles/contiv/templates/netplugin.service @@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service [Service] EnvironmentFile=/etc/default/netplugin -ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS +ExecStart={{ contiv_bin_dir }}/netplugin $NETPLUGIN_ARGS KillMode=control-group Restart=always RestartSec=10 diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md deleted file mode 100644 index 287b6c148..000000000 --- a/roles/contiv_auth_proxy/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Role Name -========= - -Role to install Contiv API Proxy and UI - -Requirements ------------- - -Docker needs to be installed to run the auth proxy container. - -Role Variables --------------- - -auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container. -auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates. -auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address. - -Dependencies ------------- - -docker - -Example Playbook ----------------- - -- hosts: netplugin-node - become: true - roles: - - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 } diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml deleted file mode 100644 index e1d904c6a..000000000 --- a/roles/contiv_auth_proxy/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -auth_proxy_image: "contiv/auth_proxy:1.1.1" -auth_proxy_port: 10000 -contiv_certs: "/var/contiv/certs" -cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" -auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem" -auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem" -auth_proxy_datastore: "{{ cluster_store }}" -auth_proxy_binaries: "/var/contiv_cache" -auth_proxy_local_install: False -auth_proxy_rule_comment: "Contiv auth proxy service" -service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}" diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service deleted file mode 100644 index 7cd2edff1..000000000 --- a/roles/contiv_auth_proxy/files/auth-proxy.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Contiv Proxy and UI -After=auditd.service systemd-user-sessions.service time-sync.target docker.service - -[Service] -ExecStart=/usr/bin/auth_proxy.sh start -ExecStop=/usr/bin/auth_proxy.sh stop -KillMode=control-group -Restart=on-failure -RestartSec=10 - -[Install] -WantedBy=multi-user.target diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem deleted file mode 100644 index 63df4603f..000000000 --- a/roles/contiv_auth_proxy/files/cert.pem +++ /dev/null @@ -1,33 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM -BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j -YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL -MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG -A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0 -aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl -MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p -7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06 -grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl -yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L -DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje -XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4 -dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1 -hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N -wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq -FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV -HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot// -iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN -BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2 -nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R -/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW -SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB -PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It -X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ -yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf -0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv -DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM -XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX -jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA= ------END CERTIFICATE----- diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem deleted file mode 100644 index 7224e569c..000000000 --- a/roles/contiv_auth_proxy/files/key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d -5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj -v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8 -Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3 -3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn -2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md -qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL -+J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI -M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74 -Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh -41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA -AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP -2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk -PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5 -9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB -UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O -m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj -RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H -0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk -evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk -MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l -kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8 -nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68 -4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29 -5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh -YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf -M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR -wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh -rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn -yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo -20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc -RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1 -IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3 -7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3 -FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou -qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb -58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN -JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI -ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg -0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g -UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm -zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA -RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg -hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h -+YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd -1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm -qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS -QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt -d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft ------END RSA PRIVATE KEY----- diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml deleted file mode 100644 index 9cb9bea49..000000000 --- a/roles/contiv_auth_proxy/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for auth_proxy diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml deleted file mode 100644 index a29659cc9..000000000 --- a/roles/contiv_auth_proxy/tasks/cleanup.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: stop auth-proxy container - service: name=auth-proxy state=stopped - -- name: cleanup iptables for auth proxy - shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" - become: true - with_items: - - "{{ auth_proxy_port }}" diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml deleted file mode 100644 index 74e7bf794..000000000 --- a/roles/contiv_auth_proxy/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# tasks file for auth_proxy -- name: setup iptables for auth proxy - shell: > - ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \ - iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" - become: true - with_items: - - "{{ auth_proxy_port }}" - -# Load the auth-proxy-image from local tar. Ignore any errors to handle the -# case where the image is not built in -- name: copy auth-proxy image - copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar - when: auth_proxy_local_install == True - -- name: load auth-proxy image - shell: docker load -i /tmp/auth-proxy-image.tar - when: auth_proxy_local_install == True - -- name: create cert folder for proxy - file: path=/var/contiv/certs state=directory - -- name: copy shell script for starting auth-proxy - template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx - -- name: copy cert for starting auth-proxy - copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r - -- name: copy key for starting auth-proxy - copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r - -- name: copy systemd units for auth-proxy - copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service - -- name: start auth-proxy container - systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2 deleted file mode 100644 index 0ab8c831b..000000000 --- a/roles/contiv_auth_proxy/templates/auth_proxy.j2 +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -usage="$0 start/stop" -if [ $# -ne 1 ]; then - echo USAGE: $usage - exit 1 -fi - -case $1 in -start) - set -e - - /usr/bin/docker run --rm \ - -p 10000:{{ auth_proxy_port }} \ - --net=host --name=auth-proxy \ - -e NO_NETMASTER_STARTUP_CHECK=1 \ - -v /var/contiv:/var/contiv:z \ - {{ auth_proxy_image }} \ - --tls-key-file={{ auth_proxy_key }} \ - --tls-certificate={{ auth_proxy_cert }} \ - --data-store-address={{ auth_proxy_datastore }} \ - --netmaster-address={{ service_vip }}:9999 \ - --listen-address=:10000 - ;; - -stop) - # don't stop on error - /usr/bin/docker stop auth-proxy - /usr/bin/docker rm -f -v auth-proxy - ;; - -*) - echo USAGE: $usage - exit 1 - ;; -esac diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory deleted file mode 100644 index d18580b3c..000000000 --- a/roles/contiv_auth_proxy/tests/inventory +++ /dev/null @@ -1 +0,0 @@ -localhost
\ No newline at end of file diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml deleted file mode 100644 index 2af3250cd..000000000 --- a/roles/contiv_auth_proxy/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - auth_proxy diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml deleted file mode 100644 index 9032766c4..000000000 --- a/roles/contiv_auth_proxy/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for auth_proxy diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml index 7b8150954..c1622c56a 100644 --- a/roles/contiv_facts/defaults/main.yaml +++ b/roles/contiv_facts/defaults/main.yaml @@ -1,13 +1,10 @@ --- # The directory where binaries are stored on Ansible # managed systems. -bin_dir: /usr/bin +contiv_bin_dir: /usr/bin # The directory used by Ansible to temporarily store # files on Ansible managed systems. -ansible_temp_dir: /tmp/.ansible/files +contiv_ansible_temp_dir: /tmp/.ansible/files -source_type: packageManager - -# Whether or not to also install and enable the Contiv auth_proxy -contiv_enable_auth_proxy: false +contiv_source_type: packageManager diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml index 932ff091a..b8239a636 100644 --- a/roles/contiv_facts/tasks/fedora-install.yml +++ b/roles/contiv_facts/tasks/fedora-install.yml @@ -11,9 +11,9 @@ retries: 5 delay: 10 environment: - https_proxy: "{{ https_proxy }}" - http_proxy: "{{ http_proxy }}" - no_proxy: "{{ no_proxy }}" + https_proxy: "{{ contiv_https_proxy }}" + http_proxy: "{{ contiv_http_proxy }}" + no_proxy: "{{ contiv_no_proxy }}" - name: Install libselinux-python command: dnf install {{ item }} -y @@ -21,6 +21,6 @@ - python-dnf - libselinux-python environment: - https_proxy: "{{ https_proxy }}" - http_proxy: "{{ http_proxy }}" - no_proxy: "{{ no_proxy }}" + https_proxy: "{{ contiv_https_proxy }}" + http_proxy: "{{ contiv_http_proxy }}" + no_proxy: "{{ contiv_no_proxy }}" diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index ced04759d..11f1e1369 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -4,42 +4,28 @@ register: distro check_mode: no -- name: Init the is_coreos fact +- name: Init the contiv_is_coreos fact set_fact: - is_coreos: false + contiv_is_coreos: false -- name: Set the is_coreos fact +- name: Set the contiv_is_coreos fact set_fact: - is_coreos: true + contiv_is_coreos: true when: "'CoreOS' in distro.stdout" -- name: Set docker config file directory - set_fact: - docker_config_dir: "/etc/sysconfig" - -- name: Override docker config file directory for Debian - set_fact: - docker_config_dir: "/etc/default" - when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu" - -- name: Create config file directory - file: - path: "{{ docker_config_dir }}" - state: directory - - name: Set the bin directory path for CoreOS set_fact: - bin_dir: "/opt/bin" - when: is_coreos + contiv_bin_dir: "/opt/bin" + when: contiv_is_coreos - name: Create the directory used to store binaries file: - path: "{{ bin_dir }}" + path: "{{ contiv_bin_dir }}" state: directory - name: Create Ansible temp directory file: - path: "{{ ansible_temp_dir }}" + path: "{{ contiv_ansible_temp_dir }}" state: directory - name: Determine if has rpm @@ -48,26 +34,26 @@ changed_when: false check_mode: no -- name: Init the has_rpm fact +- name: Init the contiv_has_rpm fact set_fact: - has_rpm: false + contiv_has_rpm: false -- name: Set the has_rpm fact +- name: Set the contiv_has_rpm fact set_fact: - has_rpm: true + contiv_has_rpm: true when: s.stat.exists -- name: Init the has_firewalld fact +- name: Init the contiv_has_firewalld fact set_fact: - has_firewalld: false + contiv_has_firewalld: false -- name: Init the has_iptables fact +- name: Init the contiv_has_iptables fact set_fact: - has_iptables: false + contiv_has_iptables: false # collect information about what packages are installed - include_tasks: rpm.yml - when: has_rpm + when: contiv_has_rpm - include_tasks: fedora-install.yml when: not openshift_is_atomic and ansible_distribution == "Fedora" diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml index d12436f96..dc6c5d3b7 100644 --- a/roles/contiv_facts/tasks/rpm.yml +++ b/roles/contiv_facts/tasks/rpm.yml @@ -13,9 +13,9 @@ failed_when: false check_mode: no -- name: Set the has_firewalld fact +- name: Set the contiv_has_firewalld fact set_fact: - has_firewalld: true + contiv_has_firewalld: true when: s.rc == 0 and ss.rc == 0 - name: Determine if iptables-services installed @@ -25,7 +25,7 @@ failed_when: false check_mode: no -- name: Set the has_iptables fact +- name: Set the contiv_has_iptables fact set_fact: - has_iptables: true + contiv_has_iptables: true when: s.rc == 0 diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index d4518554c..78578a055 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -79,13 +79,6 @@ when: etcd_client_certs_missing | bool delegate_to: "{{ etcd_ca_host }}" -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX - register: g_etcd_client_mktemp - changed_when: False - when: etcd_client_certs_missing | bool - become: no - - name: Create a tarball of the etcd certs command: > tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz @@ -101,8 +94,7 @@ - name: Retrieve the etcd cert tarballs fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ g_etcd_client_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_client_certs_missing | bool @@ -116,10 +108,15 @@ - name: Unarchive etcd cert tarballs unarchive: - src: "{{ g_etcd_client_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" dest: "{{ etcd_cert_config_dir }}" when: etcd_client_certs_missing | bool +- name: Delete temporary directory + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent + changed_when: False + when: etcd_client_certs_missing | bool + - file: path: "{{ etcd_cert_config_dir }}/{{ item }}" owner: root @@ -130,9 +127,3 @@ - "{{ etcd_cert_prefix }}client.key" - "{{ etcd_cert_prefix }}ca.crt" when: etcd_client_certs_missing | bool - -- name: Delete temporary directory - local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent - changed_when: False - when: etcd_client_certs_missing | bool - become: no diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 59a6b6590..987380d0c 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -105,13 +105,6 @@ when: etcd_server_certs_missing | bool delegate_to: "{{ etcd_ca_host }}" -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX - become: no - register: g_etcd_server_mktemp - changed_when: False - when: etcd_server_certs_missing | bool - - name: Create a tarball of the etcd certs command: > tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz @@ -127,8 +120,7 @@ - name: Retrieve etcd cert tarball fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ g_etcd_server_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_server_certs_missing | bool @@ -144,7 +136,7 @@ - name: Unarchive cert tarball unarchive: - src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" dest: "{{ etcd_cert_config_dir }}" when: etcd_server_certs_missing | bool @@ -161,8 +153,7 @@ - name: Retrieve etcd ca cert tarball fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ g_etcd_server_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_server_certs_missing | bool @@ -177,8 +168,7 @@ when: etcd_server_certs_missing | bool - name: Delete temporary directory - local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent - become: no + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent changed_when: False when: etcd_server_certs_missing | bool diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 847b1d722..630640ab1 100644 --- a/roles/etcd/tasks/migration/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml @@ -1,7 +1,7 @@ --- # Should this be run in a serial manner? - set_fact: - l_etcd_service: "{{ 'etcd_container' if openshift_is_containerized else 'etcd' }}" + l_etcd_service: "{{ 'etcd_container' if (openshift_is_containerized | bool) else 'etcd' }}" - name: Migrate etcd data command: > diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 7d79bd3d4..f94399fab 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -21,3 +21,7 @@ until: not (l_restart_node_result is failed) retries: 3 delay: 30 + +- name: save iptable rules + become: yes + command: 'iptables-save' diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 4627bf69c..11981fb80 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -41,3 +41,13 @@ notify: - restart docker - restart node + +- name: Enable Pod to Pod communication + command: /sbin/iptables --wait -I FORWARD -d {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -i {{ flannel_interface }} -j ACCEPT -m comment --comment "Pod to Pod communication" + notify: + - save iptable rules + +- name: Allow external network access + command: /sbin/iptables -t nat -A POSTROUTING -o {{ flannel_interface }} -j MASQUERADE -m comment --comment "Allow external network access" + notify: + - save iptable rules diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 83ca83350..da7e7b1da 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -31,6 +31,7 @@ class CallbackModule(CallbackBase): 'installer_phase_node', 'installer_phase_glusterfs', 'installer_phase_hosted', + 'installer_phase_web_console', 'installer_phase_metrics', 'installer_phase_logging', 'installer_phase_prometheus', @@ -80,6 +81,10 @@ class CallbackModule(CallbackBase): 'title': 'Hosted Install', 'playbook': 'playbooks/openshift-hosted/config.yml' }, + 'installer_phase_web_console': { + 'title': 'Web Console Install', + 'playbook': 'playbooks/openshift-web-console/config.yml' + }, 'installer_phase_metrics': { 'title': 'Metrics Install', 'playbook': 'playbooks/openshift-metrics/config.yml' diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py index d810735f2..9d10c84f3 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_scale.py +++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py @@ -27,7 +27,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing a list ''' params = {'name': 'router', 'namespace': 'default', 'replicas': 2, @@ -71,8 +71,296 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_state_present(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a state present ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 2, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 2) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_up(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale up ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 3, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + dc_updated = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6559", + "generation": 9, + "creationTimestamp": "2017-01-24T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 3, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc_updated, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 3) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_down(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale down ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 1, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + dc_updated = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6560", + "generation": 9, + "creationTimestamp": "2017-01-24T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 1, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc_updated, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 1) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_failed(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale failure ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 1, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + error_message = "foo" + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': error_message, + 'returncode': 1}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['failed']) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_state_unknown(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing an unknown state ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 2, + 'state': 'unknown-state', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse('changed' in results) + self.assertEqual(results['failed'], True) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') def test_scale(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing scale ''' params = {'name': 'router', 'namespace': 'default', 'replicas': 3, @@ -120,8 +408,57 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_rc(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing scale for replication controllers ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 3, + 'state': 'list', + 'kind': 'rc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + rc = '''{"kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 3, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get rc router -n default', + 'results': rc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc create -f /tmp/router -n default', + 'results': '', + 'returncode': 0} + ] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse(results['changed']) + self.assertEqual(results['result'][0], 3) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') def test_no_dc_scale(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing scale for inexisting dc ''' params = {'name': 'not_there', 'namespace': 'default', 'replicas': 3, @@ -205,7 +542,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup fallback ''' + ''' Testing binary lookup fallback in py3 ''' mock_env_get.side_effect = lambda _v, _d: '' @@ -217,7 +554,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in path ''' + ''' Testing binary lookup in path in py3 ''' oc_bin = '/usr/bin/oc' @@ -231,7 +568,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in /usr/local/bin ''' + ''' Testing binary lookup in /usr/local/bin in py3 ''' oc_bin = '/usr/local/bin/oc' @@ -245,7 +582,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in ~/bin ''' + ''' Testing binary lookup in ~/bin in py3 ''' oc_bin = os.path.expanduser('~/bin/oc') diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py index 1bf332678..09ce55e8f 100644 --- a/roles/lib_utils/action_plugins/sanity_checks.py +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -2,6 +2,8 @@ Ansible action plugin to ensure inventory variables are set appropriately and no conflicting options have been provided. """ +import re + from ansible.plugins.action import ActionBase from ansible import errors @@ -15,6 +17,27 @@ NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True), ('openshift_use_contiv', False), ('openshift_use_calico', False)) +ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format +v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, +v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6 +You specified openshift_image_tag={}""" + +ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format +v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 +You specified openshift_image_tag={}""" + +ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)', + 'error_msg': ORIGIN_TAG_REGEX_ERROR} +ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)', + 'error_msg': ENTERPRISE_TAG_REGEX_ERROR} +IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX, + 'openshift-enterprise': ENTERPRISE_TAG_REGEX} + +CONTAINERIZED_NO_TAG_ERROR_MSG = """To install a containerized Origin release, +you must set openshift_release or openshift_image_tag in your inventory to +specify which version of the OpenShift component images to use. +(Suggestion: add openshift_release="x.y" to inventory.)""" + def to_bool(var_to_check): """Determine a boolean value given the multiple @@ -44,6 +67,7 @@ class ActionModule(ActionBase): type_strings = ", ".join(VALID_DEPLOYMENT_TYPES) msg = "openshift_deployment_type must be defined and one of {}".format(type_strings) raise errors.AnsibleModuleError(msg) + return openshift_deployment_type def check_python_version(self, hostvars, host, distro): """Ensure python version is 3 for Fedora and python 2 for others""" @@ -58,6 +82,35 @@ class ActionModule(ActionBase): if ansible_python['version']['major'] != 2: msg = "openshift-ansible requires Python 2 for {};".format(distro) + def check_image_tag_format(self, hostvars, host, openshift_deployment_type): + """Ensure openshift_image_tag is formatted correctly""" + openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag') + if not openshift_image_tag or openshift_image_tag == 'latest': + return None + regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re'] + res = re.match(regex_to_match, str(openshift_image_tag)) + if res is None: + msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg'] + msg = msg.format(str(openshift_image_tag)) + raise errors.AnsibleModuleError(msg) + + def no_origin_image_version(self, hostvars, host, openshift_deployment_type): + """Ensure we can determine what image version to use with origin + fail when: + - openshift_is_containerized + - openshift_deployment_type == 'origin' + - openshift_release is not defined + - openshift_image_tag is not defined""" + if not openshift_deployment_type == 'origin': + return None + oic = self.template_var(hostvars, host, 'openshift_is_containerized') + if not to_bool(oic): + return None + orelease = self.template_var(hostvars, host, 'openshift_release') + oitag = self.template_var(hostvars, host, 'openshift_image_tag') + if not orelease and not oitag: + raise errors.AnsibleModuleError(CONTAINERIZED_NO_TAG_ERROR_MSG) + def network_plugin_check(self, hostvars, host): """Ensure only one type of network plugin is enabled""" res = [] @@ -88,8 +141,10 @@ class ActionModule(ActionBase): def run_checks(self, hostvars, host): """Execute the hostvars validations against host""" distro = self.template_var(hostvars, host, 'ansible_distribution') - self.check_openshift_deployment_type(hostvars, host) + odt = self.check_openshift_deployment_type(hostvars, host) self.check_python_version(hostvars, host, distro) + self.check_image_tag_format(hostvars, host, odt) + self.no_origin_image_version(hostvars, host, odt) self.network_plugin_check(hostvars, host) self.check_hostname_vars(hostvars, host) diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 696b323c0..786db1570 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -14,11 +14,7 @@ instance_type: m4.xlarge vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" image: "{{ openshift_aws_base_ami }}" - volumes: - - device_name: /dev/sdb - volume_type: gp2 - volume_size: 100 - delete_on_termination: true + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" wait: yes exact_count: 1 count_tag: @@ -46,5 +42,5 @@ - name: add host to nodes add_host: - groups: nodes + groups: nodes,g_new_node_hosts name: "{{ instancesout.instances[0].public_dns_name }}" diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py index a2bc9ecdb..58b228fee 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py @@ -31,7 +31,6 @@ certificates Example playbook usage: - name: Generate expiration results JSON - become: no run_once: yes delegate_to: localhost when: openshift_certificate_expiry_save_json_results|bool diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index b5234bd1e..8dea2c07f 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -7,7 +7,6 @@ register: check_results - name: Generate expiration report HTML - become: no run_once: yes template: src: cert-expiry-table.html.j2 @@ -21,7 +20,6 @@ when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file - become: no run_once: yes template: src: save_json_results.j2 diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index 631a0455e..9faec639f 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -8,4 +8,4 @@ system_images_registry: "{{ system_images_registry_dict[openshift_deployment_typ openshift_use_crio_only: False l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}" -l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}" +l_use_cli_atomic_image: "{{ (openshift_use_crio_only | bool) or (l_is_system_container_image | bool) }}" diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index dff492a69..3513577fa 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -19,3 +19,6 @@ - include_tasks: gce.yml when: cloudprovider_is_gce | bool + +- include_tasks: vsphere.yml + when: cloudprovider_is_vsphere | bool diff --git a/roles/openshift_cloud_provider/tasks/vsphere.yml b/roles/openshift_cloud_provider/tasks/vsphere.yml new file mode 100644 index 000000000..3a33df241 --- /dev/null +++ b/roles/openshift_cloud_provider/tasks/vsphere.yml @@ -0,0 +1,6 @@ +--- +- name: Create cloud config + template: + dest: "{{ openshift.common.config_base }}/cloudprovider/vsphere.conf" + src: vsphere.conf.j2 + when: openshift_cloudprovider_vsphere_username is defined and openshift_cloudprovider_vsphere_password is defined and openshift_cloudprovider_vsphere_host is defined and openshift_cloudprovider_vsphere_datacenter is defined and openshift_cloudprovider_vsphere_datastore is defined diff --git a/roles/openshift_cloud_provider/templates/vsphere.conf.j2 b/roles/openshift_cloud_provider/templates/vsphere.conf.j2 new file mode 100644 index 000000000..84e5e371c --- /dev/null +++ b/roles/openshift_cloud_provider/templates/vsphere.conf.j2 @@ -0,0 +1,15 @@ +[Global] +user = "{{ openshift_cloudprovider_vsphere_username }}" +password = "{{ openshift_cloudprovider_vsphere_password }}" +server = "{{ openshift_cloudprovider_vsphere_host }}" +port = 443 +insecure-flag = 1 +datacenter = {{ openshift_cloudprovider_vsphere_datacenter }} +datastore = {{ openshift_cloudprovider_vsphere_datastore }} +{% if openshift_cloudprovider_vsphere_folder is defined %} +working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/ +{% else %} +working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/ +{% endif %} +[Disk] +scsicontrollertype = pvscsi diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml index c9d953f58..e71db80b9 100644 --- a/roles/openshift_cloud_provider/vars/main.yml +++ b/roles/openshift_cloud_provider/vars/main.yml @@ -3,3 +3,4 @@ has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}" cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}" cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}" cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}" +cloudprovider_is_vsphere: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'vsphere' }}" diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index 9e635b34f..d716c9505 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -1,5 +1,5 @@ --- -etcd_is_containerized: "{{ openshift_is_containerized }}" +etcd_is_containerized: "{{ openshift_is_containerized | bool }}" etcd_is_atomic: "{{ openshift_is_atomic }}" etcd_hostname: "{{ openshift.common.hostname }}" etcd_ip: "{{ openshift.common.ip }}" diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index a09a598bd..7787da4f0 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -13,18 +13,23 @@ # use it either due to changes introduced in Ansible 2.x. - name: Create local temp dir for OpenShift examples copy local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - become: False register: copy_examples_mktemp run_once: True +- name: Chmod local temp dir for OpenShift examples copy + local_action: command chmod 777 "{{ copy_examples_mktemp.stdout }}" + run_once: True + - name: Create tar of OpenShift examples local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" . args: # Disables the following warning: # Consider using unarchive module rather than running tar warn: no - become: False - register: copy_examples_tar + +- name: Chmod local temp dir for OpenShift examples copy + local_action: command chmod 744 "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" + run_once: True - name: Create the remote OpenShift examples directory file: @@ -38,7 +43,6 @@ dest: "{{ examples_base }}/" - name: Cleanup the OpenShift Examples temp dir - become: False local_action: file dest="{{ copy_examples_mktemp.stdout }}" state=absent # Done copying examples diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md index c9c7b378c..402c3dc3e 100644 --- a/roles/openshift_expand_partition/README.md +++ b/roles/openshift_expand_partition/README.md @@ -45,7 +45,6 @@ space on /dev/xvda, and the file system will be expanded to fill the new partition space. - hosts: mynodes - become: no remote_user: root gather_facts: no roles: @@ -68,7 +67,6 @@ partition space. * Create an ansible playbook, say `expandvar.yaml`: ``` - hosts: mynodes - become: no remote_user: root gather_facts: no roles: diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 980350d14..a223ffba6 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -1,5 +1,5 @@ --- -openshift_client_binary: "{{ openshift_is_containerized | ternary('/usr/local/bin/oc', 'oc') }}" +openshift_client_binary: "{{ (openshift_is_containerized | bool) | ternary('/usr/local/bin/oc', 'oc') }}" openshift_cli_image_dict: origin: 'openshift/origin' diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index dcaf87eca..c83adb26d 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -175,6 +175,8 @@ def format_failure(failure): play = failure['play'] task = failure['task'] msg = failure['msg'] + if not isinstance(msg, string_types): + msg = str(msg) checks = failure['checks'] fields = ( (u'Hosts', host), diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index b7b16e0ea..83e551b5d 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -95,6 +95,13 @@ class OpenShiftCheck(object): # These are intended to be a sequential record of what the check observed and determined. self.logs = [] + def template_var(self, var_to_template): + """Return a templated variable if self._templar is not None, else + just return the variable as-is""" + if self._templar is not None: + return self._templar.template(var_to_template) + return var_to_template + @abstractproperty def name(self): """The name of this check, usually derived from the class name.""" diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 744b79c1a..7afb8f730 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -64,7 +64,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): self.registries["configured"] = regs # for the oreg_url registry there may be credentials specified - components = self.get_var("oreg_url", default="").split('/') + oreg_url = self.get_var("oreg_url", default="") + oreg_url = self.template_var(oreg_url) + components = oreg_url.split('/') self.registries["oreg"] = "" if len(components) < 3 else components[0] # Retrieve and template registry credentials, if provided @@ -72,9 +74,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): oreg_auth_user = self.get_var('oreg_auth_user', default='') oreg_auth_password = self.get_var('oreg_auth_password', default='') if oreg_auth_user != '' and oreg_auth_password != '': - if self._templar is not None: - oreg_auth_user = self._templar.template(oreg_auth_user) - oreg_auth_password = self._templar.template(oreg_auth_password) + oreg_auth_user = self.template_var(oreg_auth_user) + oreg_auth_password = self.template_var(oreg_auth_password) self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password)) # record whether we could reach a registry or not (and remember results) @@ -153,6 +154,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") image_url = self.get_var("oreg_url", default="") or image_url + image_url = self.template_var(image_url) if 'oo_nodes_to_config' in host_groups: for suffix in NODE_IMAGE_SUFFIXES: required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index b6501d288..f40085976 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -69,7 +69,7 @@ r_openshift_hosted_router_os_firewall_allow: [] ############ openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" -penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" +openshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" openshift_hosted_registry_routecertificates: {} openshift_hosted_registry_routetermination: "passthrough" diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index fca3485fd..d7cc1e288 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml index b2313c297..34d39f3a5 100644 --- a/roles/openshift_hosted_templates/tasks/main.yml +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -1,20 +1,25 @@ --- - name: Create local temp dir for OpenShift hosted templates copy local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - become: False register: copy_hosted_templates_mktemp run_once: True # AUDIT:changed_when: not set here because this task actually # creates something +- name: Chmod local temp dir for OpenShift examples copy + local_action: command chmod 777 "{{ copy_hosted_templates_mktemp.stdout }}" + run_once: True + - name: Create tar of OpenShift examples local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" . args: # Disables the following warning: # Consider using unarchive module rather than running tar warn: no - become: False - register: copy_hosted_templates_tar + +- name: Chmod local tar of OpenShift examples + local_action: command chmod 744 "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" + run_once: True - name: Create remote OpenShift hosted templates directory file: @@ -28,7 +33,6 @@ dest: "{{ hosted_base }}/" - name: Cleanup the OpenShift hosted templates temp dir - become: False local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent - name: Modify registry paths if registry_url is not registry.access.redhat.com diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 27cfc17d6..a192bd67e 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster: clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 +- `openshift_logging_mux_external_address`: The IP address that mux will listen + on for connections from *external* clients. Default is the default ipv4 + interface as reported by the `ansible_default_ipv4` fact. - `openshift_logging_mux_cpu_request`: 100m - `openshift_logging_mux_memory_limit`: 512Mi - `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 302a9b4c9..37ffb0204 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -276,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict()) # this needs to end up nested under the service account... @@ -288,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) # pylint: disable=no-self-use, too-many-return-statements diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index b1ceade88..fbc3e3fd1 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -130,3 +130,14 @@ name: openshift_logging_eventrouter when: not openshift_logging_install_eventrouter | default(false) | bool + +# Update asset config in openshift-web-console namespace +- name: Remove Kibana route information from web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: loggingPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml index d6ac88dcc..6e3204589 100644 --- a/roles/openshift_logging/tasks/generate_jks.yaml +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -24,25 +24,21 @@ local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r" when: elasticsearch_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r" when: logging_es_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r" when: system_admin_jks.stat.exists changed_when: False - become: no - name: Create placeholder for previously created JKS certs to prevent recreating... local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r" when: truststore_jks.stat.exists changed_when: False - become: no - name: pulling down signing items from host fetch: @@ -61,12 +57,10 @@ vars: - top_dir: "{{local_tmp.stdout}}" when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - become: no - name: Run JKS generation script local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} check_mode: no - become: no when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - name: Pushing locally generated JKS certs to remote host... diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 6aae251c1..67904a9d3 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -75,7 +75,7 @@ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}" # We don't allow scaling down of ES nodes currently -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -103,7 +103,7 @@ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0 # Create any new DC that may be required -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -137,7 +137,7 @@ when: - openshift_logging_use_ops | bool -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -180,7 +180,7 @@ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0 # Create any new DC that may be required -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -314,4 +314,16 @@ openshift_logging_install_eventrouter | default(false) | bool +# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + +# Update asset config in openshift-web-console namespace +- name: Add Kibana route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: loggingPublicURL + value: "https://{{ openshift_logging_kibana_hostname }}" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 9949bb95d..60cc399fa 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -17,7 +17,11 @@ register: local_tmp changed_when: False check_mode: no - become: no + +- name: Chmod local temp directory for doing work in + local_action: command chmod 777 "{{ local_tmp.stdout }}" + changed_when: False + check_mode: no - include_tasks: install_logging.yaml when: @@ -31,4 +35,3 @@ local_action: file path="{{local_tmp.stdout}}" state=absent tags: logging_cleanup changed_when: False - become: no diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index b96b8e29d..c0f42ba97 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding Kibana route information to loggingPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml index c53a06019..c55e7c5ea 100644 --- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -15,3 +15,5 @@ - fail: msg: Invalid version specified for Elasticsearch when: es_version not in __allowed_es_versions + +- include_tasks: get_es_version.yml diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml new file mode 100644 index 000000000..9182bddb2 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml @@ -0,0 +1,42 @@ +--- +- command: > + oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + register: _cluster_pods + +- name: "Getting ES version for logging-es cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/' + register: _curl_output + when: _cluster_pods.stdout_lines | count > 0 + +- command: > + oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + register: _ops_cluster_pods + +- name: "Getting ES version for logging-es-ops cluster" + command: > + oc exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/' + register: _ops_curl_output + when: _ops_cluster_pods.stdout_lines | count > 0 + +- set_fact: + _es_output: "{{ _curl_output.stdout | from_json }}" + when: _curl_output.stdout is defined + +- set_fact: + _es_ops_output: "{{ _ops_curl_output.stdout | from_json }}" + when: _ops_curl_output.stdout is defined + +- set_fact: + _es_installed_version: "{{ _es_output.version.number }}" + when: + - _es_output is defined + - _es_output.version is defined + - _es_output.version.number is defined + +- set_fact: + _es_ops_installed_version: "{{ _es_ops_output.version.number }}" + when: + - _es_ops_output is defined + - _es_ops_output.version is defined + - _es_ops_output.version.number is defined diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 9bd37f33c..ff5ad1045 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -32,6 +32,18 @@ - include_tasks: determine_version.yaml +- set_fact: + full_restart_cluster: True + when: + - _es_installed_version is defined + - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int + +- set_fact: + full_restart_cluster: True + when: + - _es_ops_installed_version is defined + - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int + # allow passing in a tempdir - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX @@ -181,7 +193,9 @@ changed_when: no # create diff between current configmap files and our current files -- import_role: +# NOTE: include_role must be used instead of import_role because +# this task file is looped over from another role. +- include_role: name: openshift_logging tasks_from: patch_configmap_files.yaml vars: diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml index 4a32453e3..d55beec86 100644 --- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml +++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml @@ -1,4 +1,22 @@ --- +# Disable external communication for {{ _cluster_component }} +- name: Disable external communication for logging-{{ _cluster_component }} + oc_service: + state: present + name: "logging-{{ _cluster_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + selector: + component: "{{ _cluster_component }}" + provider: openshift + connection: blocked + labels: + logging-infra: 'support' + ports: + - port: 9200 + targetPort: "restapi" + when: + - full_restart_cluster | bool + ## get all pods for the cluster - command: > oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} @@ -11,17 +29,38 @@ changed_when: "'\"acknowledged\":true' in _disable_output.stdout" when: _cluster_pods.stdout_lines | count > 0 +# Flush ES +- name: "Flushing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced' + register: _flush_output + changed_when: "'\"acknowledged\":true' in _flush_output.stdout" + when: + - _cluster_pods.stdout_lines | count > 0 + - full_restart_cluster | bool + - command: > oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} register: _cluster_dcs +## restart all dcs for full restart +- name: "Restart ES node {{ _es_node }}" + include_tasks: restart_es_node.yml + with_items: "{{ _cluster_dcs }}" + loop_control: + loop_var: _es_node + when: + - full_restart_cluster | bool + ## restart the node if it's dc is in the list of nodes to restart? - name: "Restart ES node {{ _es_node }}" include_tasks: restart_es_node.yml with_items: "{{ _restart_logging_nodes }}" loop_control: loop_var: _es_node - when: _es_node in _cluster_dcs.stdout + when: + - not full_restart_cluster | bool + - _es_node in _cluster_dcs.stdout ## we may need a new first pod to run against -- fetch them all again - command: > @@ -33,3 +72,20 @@ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }' register: _enable_output changed_when: "'\"acknowledged\":true' in _enable_output.stdout" + +# Reenable external communication for {{ _cluster_component }} +- name: Reenable external communication for logging-{{ _cluster_component }} + oc_service: + state: present + name: "logging-{{ _cluster_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + selector: + component: "{{ _cluster_component }}" + provider: openshift + labels: + logging-infra: 'support' + ports: + - port: 9200 + targetPort: "restapi" + when: + - full_restart_cluster | bool diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml index b07b232ce..6d0df40c8 100644 --- a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml +++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml @@ -14,6 +14,8 @@ - _dc_output.results.results[0].status is defined - _dc_output.results.results[0].status.readyReplicas is defined - _dc_output.results.results[0].status.readyReplicas > 0 + - _dc_output.results.results[0].status.updatedReplicas is defined + - _dc_output.results.results[0].status.updatedReplicas > 0 retries: 60 delay: 30 diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 0e56a6eac..ef259cd3a 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -4,6 +4,7 @@ __allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] +__es_version: "2.4.4" __es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key" @@ -14,3 +15,4 @@ es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}" es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" +full_restart_cluster: False diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml index 1cef6c25e..2721438f0 100644 --- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -8,4 +8,3 @@ # wait half a second between labels - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} - become: no diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index db6f23126..dbf4549c4 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -30,6 +30,7 @@ openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}" openshift_logging_mux_port: 24284 +openshift_logging_mux_external_address: "{{ ansible_default_ipv4.address }}" # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this openshift_logging_mux_default_namespaces: ["mux-undefined"] diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 34bdb891c..7eba3cda4 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -148,7 +148,7 @@ port: "{{ openshift_logging_mux_port }}" targetPort: "mux-forward" external_ips: - - "{{ ansible_eth0.ipv4.address }}" + - "{{ openshift_logging_mux_external_address }}" when: openshift_logging_mux_allow_external | bool - name: Set logging-mux service for internal communication diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml index f72710832..7870f43e2 100644 --- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -12,11 +12,11 @@ package: name={{ master_pkgs | join(',') }} state=present vars: master_pkgs: - - "{{ openshift_service_type }}{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}" + - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" register: result until: result is succeeded diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 00cabe574..649a4bc5d 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -120,7 +120,11 @@ register: g_master_certs_mktemp changed_when: False when: master_certs_missing | bool - become: no + +- name: Chmod local temp directory for syncing certs + local_action: command chmod 777 "{{ g_master_certs_mktemp.stdout }}" + changed_when: False + when: master_certs_missing | bool - name: Create a tarball of the master certs command: > @@ -157,7 +161,6 @@ local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent changed_when: False when: master_certs_missing | bool - become: no - name: Lookup default group for ansible_ssh_user command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}" diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 106909941..0866fe0d2 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -67,8 +67,20 @@ with_items: "{{ hawkular_agent_object_defs.results }}" when: openshift_metrics_install_hawkular_agent | bool +# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml +# Update asset config in openshift-web-console namespace +- name: Add metrics route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: metricsPublicURL + value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" + when: openshift_web_console_install | default(true) | bool + - command: > {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 0ab0eec4b..610c7b4e5 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -18,3 +18,14 @@ clusterrolebinding/heapster-cluster-reader clusterrolebinding/hawkular-metrics changed_when: delete_metrics.stdout != 'No resources found' + +# Update asset config in openshift-web-console namespace +- name: Remove metrics route information from web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: metricsPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index 5059d8d94..6567fcb4f 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding metrics route information to metricsPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml index ad5472445..021fa8385 100644 --- a/roles/openshift_named_certificates/tasks/main.yml +++ b/roles/openshift_named_certificates/tasks/main.yml @@ -3,7 +3,6 @@ parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}" when: named_certificates | length > 0 delegate_to: localhost - become: no run_once: true - openshift_facts: diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 27fe2f5c0..c1fab4382 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -169,7 +169,7 @@ oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False openshift_use_crio: False -openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False) | bool) }}" openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 86a2ca16f..59e743dce 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -13,6 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift -- role: openshift_cloud_provider - when: not (openshift_node_upgrade_in_progress | default(False)) - role: lib_utils diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 55738d759..a4a9c1237 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,28 +1,18 @@ --- -- when: not openshift_is_containerized | bool - block: - - name: Install Node package - package: - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - register: result - until: result is succeeded - - - name: Install sdn-ovs package - package: - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" - state: present - when: - - openshift_node_use_openshift_sdn | bool - register: result - until: result is succeeded - - - name: Install conntrack-tools package - package: - name: "conntrack-tools" - state: present - register: result - until: result is succeeded +- name: Install Node package, sdn-ovs, conntrack packages + package: + name: "{{ item.name }}" + state: present + register: result + until: result is succeeded + with_items: + - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" + - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" + install: "{{ openshift_node_use_openshift_sdn | bool }}" + - name: "conntrack-tools" + when: + - not openshift_is_containerized | bool + - item['install'] | default(True) | bool - when: - openshift_is_containerized | bool diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 103572291..754ecacaf 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -4,7 +4,7 @@ when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - openshift_deployment_type == 'openshift-enterprise' - - not openshift_use_crio + - not openshift_use_crio | bool - include_tasks: dnsmasq_install.yml - include_tasks: dnsmasq.yml @@ -50,7 +50,7 @@ name: cri-o enabled: yes state: restarted - when: openshift_use_crio + when: openshift_use_crio | bool register: task_result failed_when: - task_result is failed diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index 30ef9ef44..d7dce6969 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,11 +1,11 @@ --- - set_fact: l_service_name: "cri-o" - when: openshift_use_crio + when: openshift_use_crio | bool - set_fact: l_service_name: "{{ openshift_docker_service_name }}" - when: not openshift_use_crio + when: not openshift_use_crio | bool - name: Pre-pull OpenVSwitch system container image command: > diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml index 50044eb3e..721656117 100644 --- a/roles/openshift_node/tasks/upgrade/config_changes.yml +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -1,7 +1,7 @@ --- - name: Update systemd units include_tasks: ../systemd_units.yml - when: openshift_is_containerized + when: openshift_is_containerized | bool - name: Update oreg value yedit: @@ -60,6 +60,7 @@ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" src: "node.service.j2" register: l_node_unit + when: not openshift_is_containerized | bool - name: Reset selinux context command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes @@ -74,4 +75,3 @@ # require a service to be part of the call. - name: Reload systemd units command: systemctl daemon-reload - when: l_node_unit is changed diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml index 0a14e5174..e5477f389 100644 --- a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml +++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml @@ -10,6 +10,6 @@ docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift_use_openshift_sdn | bool + when: openshift_node_use_openshift_sdn | bool - include_tasks: ../container_images.yml diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index 91a358095..d4b47bb9e 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -12,7 +12,7 @@ until: result is succeeded vars: openshift_node_upgrade_rpm_list: - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "PyYAML" - "dnsmasq" diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml index c9094e05a..ef5d8d662 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -14,6 +14,6 @@ until: result is succeeded vars: openshift_node_upgrade_rpm_list: - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "PyYAML" - "openvswitch" diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index da751bd65..777f4a449 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -8,7 +8,7 @@ Wants={{ openshift_docker_service_name }}.service Documentation=https://github.com/openshift/origin Requires=dnsmasq.service After=dnsmasq.service -{% if openshift_use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} [Service] Type=notify diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index f091263f5..5f2a94ea2 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -14,7 +14,7 @@ imageConfig: latest: {{ openshift_node_image_config_latest }} kind: NodeConfig kubeletArguments: {{ l2_openshift_node_kubelet_args | default(None) | lib_utils_to_padded_yaml(level=1) }} -{% if openshift_use_crio %} +{% if openshift_use_crio | bool %} container-runtime: - remote container-runtime-endpoint: diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 873744f34..9fe779057 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -3,7 +3,7 @@ Requires={{ openshift_docker_service_name }}.service After={{ openshift_docker_service_name }}.service PartOf={{ openshift_service_type }}-node.service Before={{ openshift_service_type }}-node.service -{% if openshift_use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} [Service] ExecStart=/bin/bash -c 'if [[ -f /usr/bin/docker-current ]]; \ diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index e95e38fdf..5f73f3bdc 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -94,13 +94,6 @@ delegate_to: "{{ openshift_ca_host }}" run_once: true -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: node_cert_mktemp - changed_when: False - when: node_certs_missing | bool - become: no - - name: Create a tarball of the node config directories command: > tar -czvf {{ openshift_node_generated_config_dir }}.tgz @@ -117,8 +110,7 @@ - name: Retrieve the node config tarballs from the master fetch: src: "{{ openshift_node_generated_config_dir }}.tgz" - dest: "{{ node_cert_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: node_certs_missing | bool @@ -132,15 +124,14 @@ - name: Unarchive the tarball on the node unarchive: - src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ openshift_node_generated_config_dir }}.tgz" dest: "{{ openshift_node_cert_dir }}" when: node_certs_missing | bool - name: Delete local temp directory - local_action: file path="{{ node_cert_mktemp.stdout }}" state=absent + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent changed_when: False when: node_certs_missing | bool - become: no - name: Copy OpenShift CA to system CA trust copy: diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md index cc674d3fd..a11219f6d 100644 --- a/roles/openshift_storage_nfs_lvm/README.md +++ b/roles/openshift_storage_nfs_lvm/README.md @@ -1,7 +1,7 @@ # openshift_storage_nfs_lvm This role is useful to create and export nfs disks for openshift persistent volumes. -It does so by creating lvm partitions on an already setup pv/vg, creating xfs +It does so by creating lvm partitions on an already setup pv/vg, creating xfs filesystem on each partition, mounting the partitions, exporting the mounts via NFS and creating a json file for each mount that an openshift master can use to create persistent volumes. @@ -20,7 +20,7 @@ create persistent volumes. osnl_nfs_export_options: "*(rw,sync,all_squash)" # Directory, where the created partitions should be mounted. They will be -# mounted as <osnl_mount_dir>/<lvm volume name> +# mounted as <osnl_mount_dir>/<lvm volume name> osnl_mount_dir: /exports/openshift # Volume Group to use. @@ -64,11 +64,10 @@ None ## Example Playbook With this playbook, 2 5Gig lvm partitions are created, named stg5g0003 and stg5g0004 -Both of them are mounted into `/exports/openshift` directory. Both directories are +Both of them are mounted into `/exports/openshift` directory. Both directories are exported via NFS. json files are created in /root. - hosts: nfsservers - become: no remote_user: root gather_facts: no roles: @@ -94,7 +93,6 @@ exported via NFS. json files are created in /root. * Create an ansible playbook, say `setupnfs.yaml`: ``` - hosts: nfsservers - become: no remote_user: root gather_facts: no roles: diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index 354699637..e2e6538c9 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -8,3 +8,5 @@ openshift_service_type_dict: openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" openshift_use_crio_only: False + +l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}" diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml new file mode 100644 index 000000000..bdbc63d27 --- /dev/null +++ b/roles/openshift_version/tasks/check_available_rpms.yml @@ -0,0 +1,10 @@ +--- +- name: Get available {{ openshift_service_type}} version + repoquery: + name: "{{ openshift_service_type}}" + ignore_excluders: true + register: rpm_results + +- fail: + msg: "Package {{ openshift_service_type}} not found" + when: not rpm_results.results.package_found diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml new file mode 100644 index 000000000..374725086 --- /dev/null +++ b/roles/openshift_version/tasks/first_master.yml @@ -0,0 +1,30 @@ +--- +# Determine the openshift_version to configure if none has been specified or set previously. + +# Protect the installed version by default unless explicitly told not to, or given an +# openshift_version already. +- name: Use openshift.common.version fact as version to configure if already installed + set_fact: + openshift_version: "{{ openshift.common.version }}" + when: + - openshift.common.version is defined + - openshift_version is not defined or openshift_version == "" + - openshift_protect_installed_version | bool + +- include_tasks: "{{ l_first_master_version_task_file }}" + +- block: + - debug: + msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}" + - set_fact: + openshift_pkg_version: -{{ openshift_version }} + when: + - openshift_pkg_version is not defined + - openshift_upgrade_target is not defined + +- block: + - debug: + msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" + - set_fact: + openshift_image_tag: v{{ openshift_version }} + when: openshift_image_tag is not defined diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml index e02a75eab..e02a75eab 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/first_master_containerized_version.yml diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml new file mode 100644 index 000000000..264baca65 --- /dev/null +++ b/roles/openshift_version/tasks/first_master_rpm_version.yml @@ -0,0 +1,16 @@ +--- +- name: Set rpm version to configure if openshift_pkg_version specified + set_fact: + # Expects a leading "-" in inventory, strip it off here, and remove trailing release, + openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}" + when: + - openshift_pkg_version is defined + - openshift_version is not defined + +# These tasks should only be run against masters and nodes +- name: Set openshift_version for rpm installation + include_tasks: check_available_rpms.yml + +- set_fact: + openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}" + when: openshift_version is not defined diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 97e58ffac..b42794858 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -1,206 +1,2 @@ --- -# Determine the openshift_version to configure if none has been specified or set previously. - -# Block attempts to install origin without specifying some kind of version information. -# This is because the latest tags for origin are usually alpha builds, which should not -# be used by default. Users must indicate what they want. -- name: Abort when we cannot safely guess what Origin image version the user wanted - fail: - msg: |- - To install a containerized Origin release, you must set openshift_release or - openshift_image_tag in your inventory to specify which version of the OpenShift - component images to use. You may want the latest (usually alpha) releases or - a more stable release. (Suggestion: add openshift_release="x.y" to inventory.) - when: - - openshift_is_containerized | bool - - openshift.common.deployment_type == 'origin' - - openshift_release is not defined - - openshift_image_tag is not defined - -# Normalize some values that we need in a certain format that might be confusing: -- set_fact: - openshift_release: "{{ openshift_release[1:] }}" - when: - - openshift_release is defined - - openshift_release[0] == 'v' - -- set_fact: - openshift_release: "{{ openshift_release | string }}" - when: - - openshift_release is defined - -# Verify that the image tag is in a valid format -- when: - - openshift_image_tag is defined - - openshift_image_tag != "latest" - block: - - # Verifies that when the deployment type is origin the version: - # - starts with a v - # - Has 3 integers seperated by dots - # It also allows for optional trailing data which: - # - must start with a dash - # - may contain numbers, letters, dashes and dots. - - name: (Origin) Verify openshift_image_tag is valid - when: openshift.common.deployment_type == 'origin' - assert: - that: - - "{{ openshift_image_tag is match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}" - msg: |- - openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 - You specified openshift_image_tag={{ openshift_image_tag }} - - # Verifies that when the deployment type is openshift-enterprise the version: - # - starts with a v - # - Has at least 2 integers seperated by dots - # It also allows for optional trailing data which: - # - must start with a dash - # - may contain numbers - # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192) - # - - name: (Enterprise) Verify openshift_image_tag is valid - when: openshift.common.deployment_type == 'openshift-enterprise' - assert: - that: - - "{{ openshift_image_tag is match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}" - msg: |- - openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, - v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6 - You specified openshift_image_tag={{ openshift_image_tag }} - -# Make sure we copy this to a fact if given a var: -- set_fact: - openshift_version: "{{ openshift_version | string }}" - when: openshift_version is defined - -# Protect the installed version by default unless explicitly told not to, or given an -# openshift_version already. -- name: Use openshift.common.version fact as version to configure if already installed - set_fact: - openshift_version: "{{ openshift.common.version }}" - when: - - openshift.common.version is defined - - openshift_version is not defined or openshift_version == "" - - openshift_protect_installed_version | bool - -# The rest of these tasks should only execute on -# masters and nodes as we can verify they have subscriptions -- when: - - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] - block: - - name: Set openshift_version for rpm installation - include_tasks: set_version_rpm.yml - when: not openshift_is_containerized | bool - - - name: Set openshift_version for containerized installation - include_tasks: set_version_containerized.yml - when: openshift_is_containerized | bool - - - block: - - name: Get available {{ openshift_service_type}} version - repoquery: - name: "{{ openshift_service_type}}" - ignore_excluders: true - register: rpm_results - - fail: - msg: "Package {{ openshift_service_type}} not found" - when: not rpm_results.results.package_found - - set_fact: - openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" - - name: Fail if rpm version and docker image version are different - fail: - msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" - # Both versions have the same string representation - when: - - openshift_rpm_version != openshift_version - # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ - - openshift_pkg_version is not defined - - openshift_image_tag is not defined - when: - - openshift_is_containerized | bool - - not openshift_is_atomic | bool - - # Warn if the user has provided an openshift_image_tag but is not doing a containerized install - # NOTE: This will need to be modified/removed for future container + rpm installations work. - - name: Warn if openshift_image_tag is defined when not doing a containerized install - debug: - msg: > - openshift_image_tag is used for containerized installs. If you are trying to - specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node. - when: - - not openshift_is_containerized | bool - - openshift_image_tag is defined - - # At this point we know openshift_version is set appropriately. Now we set - # openshift_image_tag and openshift_pkg_version, so all roles can always assume - # each of this variables *will* be set correctly and can use them per their - # intended purpose. - - - block: - - debug: - msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" - - - set_fact: - openshift_image_tag: v{{ openshift_version }} - - when: openshift_image_tag is not defined - - - block: - - debug: - msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}" - - - set_fact: - openshift_pkg_version: -{{ openshift_version }} - - when: - - openshift_pkg_version is not defined - - openshift_upgrade_target is not defined - - - fail: - msg: openshift_version role was unable to set openshift_version - name: Abort if openshift_version was not set - when: openshift_version is not defined - - - fail: - msg: openshift_version role was unable to set openshift_image_tag - name: Abort if openshift_image_tag was not set - when: openshift_image_tag is not defined - - - fail: - msg: openshift_version role was unable to set openshift_pkg_version - name: Abort if openshift_pkg_version was not set - when: - - openshift_pkg_version is not defined - - openshift_upgrade_target is not defined - - - - fail: - msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." - name: Abort if openshift_pkg_version was not set - when: - - not openshift_is_containerized | bool - - openshift_version == '0.0' - - # We can't map an openshift_release to full rpm version like we can with containers; make sure - # the rpm version we looked up matches the release requested and error out if not. - - name: For an RPM install, abort when the release requested does not match the available version. - when: - - not openshift_is_containerized | bool - - openshift_release is defined - assert: - that: - - openshift_version.startswith(openshift_release) | bool - msg: |- - You requested openshift_release {{ openshift_release }}, which is not matched by - the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }} - on host {{ inventory_hostname }}. - We will only install the latest RPMs, so please ensure you are getting the release - you expect. You may need to adjust your Ansible inventory, modify the repositories - available on the host, or run the appropriate OpenShift upgrade playbook. - - # The end result of these three variables is quite important so make sure they are displayed and logged: - - debug: var=openshift_release - - - debug: var=openshift_image_tag - - - debug: var=openshift_pkg_version +# This role is meant to be used with include_role. diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml new file mode 100644 index 000000000..fbeb22d8b --- /dev/null +++ b/roles/openshift_version/tasks/masters_and_nodes.yml @@ -0,0 +1,39 @@ +--- +# These tasks should only be run against masters and nodes + +- block: + - name: Check openshift_version for rpm installation + include_tasks: check_available_rpms.yml + - name: Fail if rpm version and docker image version are different + fail: + msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" + # Both versions have the same string representation + when: rpm_results.results.versions.available_versions.0 != openshift_version + # block when + when: not openshift_is_atomic | bool + +# We can't map an openshift_release to full rpm version like we can with containers; make sure +# the rpm version we looked up matches the release requested and error out if not. +- name: For an RPM install, abort when the release requested does not match the available version. + when: + - not openshift_is_containerized | bool + - openshift_release is defined + assert: + that: + - l_rpm_version.startswith(openshift_release) | bool + msg: |- + You requested openshift_release {{ openshift_release }}, which is not matched by + the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ l_rpm_version }} + on host {{ inventory_hostname }}. + We will only install the latest RPMs, so please ensure you are getting the release + you expect. You may need to adjust your Ansible inventory, modify the repositories + available on the host, or run the appropriate OpenShift upgrade playbook. + vars: + l_rpm_version: "{{ rpm_results.results.versions.available_versions.0 }}" + +# The end result of these three variables is quite important so make sure they are displayed and logged: +- debug: var=openshift_release + +- debug: var=openshift_image_tag + +- debug: var=openshift_pkg_version diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml deleted file mode 100644 index c7ca5ceae..000000000 --- a/roles/openshift_version/tasks/set_version_rpm.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Set rpm version to configure if openshift_pkg_version specified - set_fact: - # Expects a leading "-" in inventory, strip it off here, and remove trailing release, - openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}" - when: - - openshift_pkg_version is defined - - openshift_version is not defined - -- block: - - name: Get available {{ openshift_service_type}} version - repoquery: - name: "{{ openshift_service_type}}" - ignore_excluders: true - register: rpm_results - - - fail: - msg: "Package {{ openshift_service_type}} not found" - when: not rpm_results.results.package_found - - - set_fact: - openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" - when: - - openshift_version is not defined diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml new file mode 100644 index 000000000..4f395398c --- /dev/null +++ b/roles/openshift_web_console/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters +openshift_web_console_nodeselector: {"region":"infra"} diff --git a/roles/openshift_web_console/meta/main.yaml b/roles/openshift_web_console/meta/main.yaml new file mode 100644 index 000000000..033c1e3a3 --- /dev/null +++ b/roles/openshift_web_console/meta/main.yaml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: OpenShift Development <dev@lists.openshift.redhat.com> + description: Deploy OpenShift web console + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.4 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - openshift +dependencies: +- role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml new file mode 100644 index 000000000..8ee95e36b --- /dev/null +++ b/roles/openshift_web_console/tasks/install.yml @@ -0,0 +1,77 @@ +--- +# Fact setting +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set openshift_web_console facts + set_fact: + openshift_web_console_prefix: "{{ openshift_web_console_prefix | default(__openshift_web_console_prefix) }}" + openshift_web_console_version: "{{ openshift_web_console_version | default(__openshift_web_console_version) }}" + openshift_web_console_image_name: "{{ openshift_web_console_image_name | default(__openshift_web_console_image_name) }}" + # Default the replica count to the number of masters. + openshift_web_console_replica_count: "{{ openshift_web_console_replica_count | default(groups.oo_masters_to_config | length) }}" + +- name: Ensure openshift-web-console project exists + oc_project: + name: openshift-web-console + state: present + +- name: Make temp directory for asset config files + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp + changed_when: False + +- name: Copy asset config template to temp directory + copy: + src: "{{ __console_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __console_template_file }}" + - "{{ __console_config_file }}" + +- name: Update asset config properties + yedit: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + edits: + - key: logoutURL + value: "{{ openshift.master.logout_url | default('') }}" + - key: publicURL + # Must have a trailing slash + value: "{{ openshift.master.public_console_url }}/" + - key: masterPublicURL + value: "{{ openshift.master.public_api_url }}" + +- slurp: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + register: config + +- name: Apply template file + shell: > + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}" + --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" + --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}" + --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }} + --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}" + | {{ openshift_client_binary }} apply -f - + +- name: Verify that the web console is running + command: > + curl -k https://webconsole.openshift-web-console.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: console_health + until: console_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- name: Remove temp directory + file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False diff --git a/roles/openshift_web_console/tasks/main.yml b/roles/openshift_web_console/tasks/main.yml new file mode 100644 index 000000000..937bebf25 --- /dev/null +++ b/roles/openshift_web_console/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include_tasks: install.yml + when: openshift_web_console_install | default(true) | bool + +- include_tasks: remove.yml + when: not openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_web_console/tasks/remove.yml b/roles/openshift_web_console/tasks/remove.yml new file mode 100644 index 000000000..f0712a993 --- /dev/null +++ b/roles/openshift_web_console/tasks/remove.yml @@ -0,0 +1,5 @@ +--- +- name: Remove openshift-web-console project + oc_project: + name: openshift-web-console + state: absent diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_asset_config.yml new file mode 100644 index 000000000..0992b32e1 --- /dev/null +++ b/roles/openshift_web_console/tasks/update_asset_config.yml @@ -0,0 +1,68 @@ +--- +# This task updates asset config values in the webconsole-config config map in +# the openshift-web-console namespace. The values to set are pased in the +# variable `asset_config_edits`, which is an array of objects with `key` and +# `value` properties in the same format as `yedit` module `edits`. Only +# properties passed are updated. +# +# Note that this triggers a redeployment on the console and a brief downtime +# since it uses a `Recreate` strategy. +# +# Example usage: +# +# - include_role: +# name: openshift_web_console +# tasks_from: update_asset_config.yml +# vars: +# asset_config_edits: +# - key: loggingPublicURL +# value: "https://{{ openshift_logging_kibana_hostname }}" +# when: openshift_web_console_install | default(true) | bool + +- name: Read web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: list + register: webconsole_config + +- name: Make temp directory + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp + changed_when: False + +- name: Copy asset config to temp file + copy: + content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}" + dest: "{{ mktemp.stdout }}/webconsole-config.yaml" + +- name: Change asset config properties + yedit: + src: "{{ mktemp.stdout }}/webconsole-config.yaml" + edits: "{{asset_config_edits}}" + +- name: Update web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: present + from_file: + webconsole-config.yaml: "{{ mktemp.stdout }}/webconsole-config.yaml" + +- name: Remove temp directory + file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + +# There's currently no command to trigger a rollout for a k8s deployment +# without changing the pod spec. Add an annotation to force a rollout after +# the config map has been edited. +- name: Rollout updated web console deployment + oc_edit: + kind: deployments + name: webconsole + namespace: openshift-web-console + separator: '#' + content: + spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml new file mode 100644 index 000000000..7adb8a0d0 --- /dev/null +++ b/roles/openshift_web_console/vars/default_images.yml @@ -0,0 +1,4 @@ +--- +__openshift_web_console_prefix: "docker.io/openshift/" +__openshift_web_console_version: "latest" +__openshift_web_console_image_name: "origin-web-console" diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml new file mode 100644 index 000000000..80bc56a17 --- /dev/null +++ b/roles/openshift_web_console/vars/main.yml @@ -0,0 +1,5 @@ +--- +__console_files_location: "../../../files/origin-components/" + +__console_template_file: "console-template.yaml" +__console_config_file: "console-config.yaml" diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml new file mode 100644 index 000000000..721ac1d27 --- /dev/null +++ b/roles/openshift_web_console/vars/openshift-enterprise.yml @@ -0,0 +1,4 @@ +--- +__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/" +__openshift_web_console_version: "v3.9" +__openshift_web_console_image_name: "ose-web-console" diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 765263db5..604e94602 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -21,7 +21,6 @@ - command: mktemp -d /tmp/tsb-ansible-XXXXXX register: mktemp changed_when: False - become: no - copy: src: "{{ __tsb_files_location }}/{{ item }}" @@ -86,4 +85,3 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 8b4d798db..db1b558e4 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -2,7 +2,6 @@ - command: mktemp -d /tmp/tsb-ansible-XXXXXX register: mktemp changed_when: False - become: no - copy: src: "{{ __tsb_files_location }}/{{ item }}" @@ -32,4 +31,3 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False - become: no |