diff options
115 files changed, 1790 insertions, 683 deletions
diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml new file mode 100644 index 000000000..8f3f87c0b --- /dev/null +++ b/files/origin-components/console-config.yaml @@ -0,0 +1,21 @@ +kind: AssetConfig +apiVersion: v1 +extensionDevelopment: false +extensionProperties: null +extensionScripts: null +extensionStylesheets: null +extensions: null +loggingPublicURL: "" +logoutURL: "" +masterPublicURL: https://127.0.0.1:8443 +metricsPublicURL: "" +publicURL: https://127.0.0.1:8443/console/ +servingInfo: + bindAddress: 0.0.0.0:8443 + bindNetwork: tcp4 + certFile: /var/serving-cert/tls.crt + clientCA: "" + keyFile: /var/serving-cert/tls.key + maxRequestsInFlight: 0 + namedCertificates: null + requestTimeoutSeconds: 0
\ No newline at end of file diff --git a/files/origin-components/console-template.yaml b/files/origin-components/console-template.yaml new file mode 100644 index 000000000..b2a6569fd --- /dev/null +++ b/files/origin-components/console-template.yaml @@ -0,0 +1,114 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: openshift-web-console + annotations: + openshift.io/display-name: OpenShift Web Console + description: The server for the OpenShift web console. + iconClass: icon-openshift + tags: openshift,infra + openshift.io/documentation-url: https://github.com/openshift/origin-web-console-server + openshift.io/support-url: https://access.redhat.com + openshift.io/provider-display-name: Red Hat, Inc. +parameters: +- name: IMAGE + value: openshift/origin-web-console:latest +- name: NAMESPACE + value: openshift-web-console +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG +- name: NODE_SELECTOR + value: "{}" +- name: REPLICA_COUNT + value: "1" +objects: + +# to create the web console server +- apiVersion: apps/v1beta1 + kind: Deployment + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + webconsole: "true" + spec: + replicas: "${{REPLICA_COUNT}}" + strategy: + type: Recreate + template: + metadata: + name: webconsole + labels: + webconsole: "true" + spec: + serviceAccountName: webconsole + containers: + - name: webconsole + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/origin-web-console" + - "--audit-log-path=-" + - "--config=/var/webconsole-config/webconsole-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/webconsole-config + name: webconsole-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 420 + secretName: webconsole-serving-cert + - name: webconsole-config + configMap: + defaultMode: 420 + name: webconsole-config + +# to create the config for the web console +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: webconsole-config + labels: + app: openshift-web-console + data: + webconsole-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + +# to be able to expose web console inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + annotations: + service.alpha.openshift.io/serving-cert-secret-name: webconsole-serving-cert + spec: + selector: + webconsole: "true" + ports: + - name: https + port: 443 + targetPort: 8443 diff --git a/inventory/hosts.example b/inventory/hosts.example index bc85d1020..8c2590078 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -128,7 +128,7 @@ openshift_release=v3.7 #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" # NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used # unless you know what you are doing!! -# The following two variables are used when opneshift_use_crio is True +# The following two variables are used when openshift_use_crio is True # and cleans up after builds that pass through docker. # Enable docker garbage collection when using cri-o #openshift_crio_enable_docker_gc=false diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example index 2eb7d23d4..f6b1a6b5d 100644 --- a/playbooks/aws/provisioning_vars.yml.example +++ b/playbooks/aws/provisioning_vars.yml.example @@ -93,6 +93,11 @@ openshift_aws_ssh_key_name: # myuser_key # --------- # # Variables in this section apply to building a node AMI for use in your # openshift cluster. +# openshift-ansible will perform the container runtime storage setup when specified +# The current storage setup with require a drive if using a separate storage device +# for the container runtime. +container_runtime_docker_storage_type: overlay2 +container_runtime_docker_storage_setup_device: /dev/xvdb # must specify a base_ami when building an AMI openshift_aws_base_ami: # ami-12345678 diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 91d496ff4..50be0dee0 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -304,8 +304,6 @@ - import_role: name: openshift_node tasks_from: upgrade.yml - vars: - openshift_node_upgrade_in_progress: True - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index aba179c2b..464af3ae6 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -7,8 +7,6 @@ - import_role: name: openshift_node tasks_from: upgrade_pre.yml - vars: - openshift_node_upgrade_in_progress: True - name: Drain and upgrade nodes hosts: oo_nodes_to_upgrade:!oo_masters_to_config @@ -46,8 +44,6 @@ - import_role: name: openshift_node tasks_from: upgrade.yml - vars: - openshift_node_upgrade_in_progress: True - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml new file mode 100644 index 000000000..7fd60743c --- /dev/null +++ b/playbooks/container-runtime/private/build_container_groups.yml @@ -0,0 +1,6 @@ +--- +- name: create oo_hosts_containerized_managed_true host group + hosts: oo_all_hosts:!oo_nodes_to_config + tasks: + - group_by: + key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }} diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index dd13fa4a2..7a49adcf0 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -1,10 +1,7 @@ --- -- hosts: "{{ l_containerized_host_groups }}" - vars: - l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" - l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" - # role: container_runtime is necessary here to bring role default variables - # into the play scope. +- import_playbook: build_container_groups.yml + +- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true roles: - role: container_runtime tasks: diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index 357f67f0c..a6d396270 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -1,5 +1,7 @@ --- -- hosts: "{{ l_containerized_host_groups }}" +- import_playbook: build_container_groups.yml + +- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true vars: l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml index 0e6bde09a..5efdc486a 100644 --- a/playbooks/deploy_cluster.yml +++ b/playbooks/deploy_cluster.yml @@ -22,6 +22,9 @@ - import_playbook: openshift-hosted/private/config.yml +- import_playbook: openshift-web-console/private/config.yml + when: openshift_web_console_install | default(true) | bool + - import_playbook: openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml index f7007087c..15b3dd492 100644 --- a/playbooks/init/base_packages.yml +++ b/playbooks/init/base_packages.yml @@ -1,6 +1,6 @@ --- -- name: Ensure that all non-node hosts are accessible - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config +- name: Install packages necessary for installer + hosts: oo_all_hosts any_errors_fatal: true tasks: - when: diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index d5256f55c..d6b26647c 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -16,6 +16,7 @@ roles: - openshift_logging +# TODO: Remove when master config property is removed - name: Update Master configs hosts: oo_masters:!oo_first_master tasks: diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 4752ba78e..153ea9993 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -185,9 +185,6 @@ - role: openshift_builddefaults - role: openshift_buildoverrides - role: nickhammond.logrotate - - role: contiv - contiv_role: netmaster - when: openshift_use_contiv | default(False) | bool - role: openshift_master openshift_master_hosts: "{{ groups.oo_masters_to_config }}" r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml index 327f034d3..1e237e3f0 100644 --- a/playbooks/openshift-metrics/private/config.yml +++ b/playbooks/openshift-metrics/private/config.yml @@ -16,6 +16,7 @@ roles: - role: openshift_metrics +# TODO: Remove when master config property is removed - name: OpenShift Metrics hosts: oo_masters:!oo_first_master serial: 1 diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml index 54ed1927d..0881121c9 100644 --- a/playbooks/openshift-node/private/additional_config.yml +++ b/playbooks/openshift-node/private/additional_config.yml @@ -47,11 +47,17 @@ - role: nuage_node when: openshift_use_nuage | default(false) | bool -- name: Additional node config - hosts: oo_nodes_use_contiv +- name: Configure Contiv masters + hosts: oo_masters_to_config + roles: + - role: contiv + contiv_master: true + when: openshift_use_contiv | default(false) | bool + +- name: Configure rest of Contiv nodes + hosts: "{{ groups.oo_nodes_use_contiv | default([]) | difference(groups.oo_masters_to_config) }}" roles: - role: contiv - contiv_role: netplugin when: openshift_use_contiv | default(false) | bool - name: Configure Kuryr node diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 548ff7c4f..a13173e63 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -11,6 +11,7 @@ }}" roles: - role: openshift_clock + - role: openshift_cloud_provider - role: openshift_node - role: tuned - role: nickhammond.logrotate diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index dc68d7585..644e6a69c 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -13,6 +13,7 @@ roles: - role: openshift_clock + - role: openshift_cloud_provider - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" - role: nickhammond.logrotate diff --git a/playbooks/openshift-web-console/config.yml b/playbooks/openshift-web-console/config.yml new file mode 100644 index 000000000..c7814207c --- /dev/null +++ b/playbooks/openshift-web-console/config.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/config.yml diff --git a/playbooks/openshift-web-console/private/config.yml b/playbooks/openshift-web-console/private/config.yml new file mode 100644 index 000000000..ffd702d20 --- /dev/null +++ b/playbooks/openshift-web-console/private/config.yml @@ -0,0 +1,31 @@ +--- +- name: Web Console Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Web Console install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_web_console: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Web Console + hosts: oo_first_master + roles: + - openshift_web_console + vars: + first_master: "{{ groups.oo_first_master[0] }}" + +- name: Web Console Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Web Console install 'Complete' + run_once: true + set_stats: + data: + installer_phase_web_console: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-web-console/private/roles b/playbooks/openshift-web-console/private/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openshift-web-console/private/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml index 3211f619a..2ab7d14a0 100644 --- a/playbooks/openstack/openshift-cluster/install.yml +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -9,4 +9,7 @@ # some logic here? - name: run the cluster deploy + import_playbook: ../../prerequisites.yml + +- name: run the cluster deploy import_playbook: ../../deploy_cluster.yml diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 933117127..481807dc9 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -14,12 +14,12 @@ openshift_hosted_router_wait: True openshift_hosted_registry_wait: True ## Openstack credentials -#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_kind: openstack #openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" #openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" #openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" #openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" -#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" +#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}" ## Use Cinder volume for Openshift registry: diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 084b5c0a0..45cc4e15a 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -9,6 +9,7 @@ environment. from __future__ import print_function +from collections import Mapping import json import shade @@ -101,6 +102,10 @@ def build_inventory(): hostvars['glusterfs_devices'] = ['/dev/nvme0n1'] node_labels = server.metadata.get('node_labels') + # NOTE(shadower): the node_labels value must be a dict not string + if not isinstance(node_labels, Mapping): + node_labels = json.loads(node_labels) + if node_labels: hostvars['openshift_node_labels'] = node_labels diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml index b90190ebf..23fd8528a 100644 --- a/roles/container_runtime/tasks/common/post.yml +++ b/roles/container_runtime/tasks/common/post.yml @@ -22,5 +22,5 @@ - include_tasks: setup_docker_symlink.yml when: - - openshift_use_crio + - openshift_use_crio | bool - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index eedb18604..d588f2618 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -104,4 +104,4 @@ # 'docker login' - include_tasks: common/post.yml vars: - openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}" + openshift_docker_alternative_creds: "{{ openshift_use_crio_only | bool }}" diff --git a/roles/contiv/README.md b/roles/contiv/README.md index fa36039d9..ce414f9fb 100644 --- a/roles/contiv/README.md +++ b/roles/contiv/README.md @@ -19,8 +19,8 @@ Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Mini * ``openshift_use_contiv=True`` * ``openshift_use_openshift_sdn=False`` * ``os_sdn_network_plugin_name='cni'`` -* ``netmaster_interface=eth0`` -* ``netplugin_interface=eth1`` +* ``contiv_netmaster_interface=eth0`` +* ``contiv_netplugin_interface=eth1`` * ref. Openshift docs Contiv section for more details ## Example bare metal deployment of Openshift + Contiv diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index 0825af8a5..4869abc61 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -1,51 +1,63 @@ --- # The version of Contiv binaries to use -contiv_version: 1.1.1 +contiv_version: 1.2.0 # The version of cni binaries -cni_version: v0.4.0 +contiv_cni_version: v0.4.0 + +# If the node we are deploying to is to be a contiv master. +contiv_master: false contiv_default_subnet: "10.128.0.0/16" contiv_default_gw: "10.128.254.254" -# TCP port that Netmaster listens for network connections -netmaster_port: 9999 -# Default for contiv_role -contiv_role: netmaster +# Ports netmaster listens on +contiv_netmaster_port: 9999 +contiv_netmaster_port_proto: tcp +contiv_ofnet_master_port: 9001 +contiv_ofnet_master_port_proto: tcp +# Ports netplugin listens on +contiv_netplugin_port: 6640 +contiv_netplugin_port_proto: tcp +contiv_ofnet_vxlan_port: 9002 +contiv_ofnet_vxlan_port_proto: tcp +contiv_ovs_port: 9003 +contiv_ovs_port_proto: tcp -# TCP port that Netplugin listens for network connections -netplugin_port: 6640 -contiv_rpc_port1: 9001 -contiv_rpc_port2: 9002 -contiv_rpc_port3: 9003 +contiv_vxlan_port: 4789 +contiv_vxlan_port_proto: udp # Interface used by Netplugin for inter-host traffic when encap_mode is vlan. # The interface must support 802.1Q trunking. -netplugin_interface: "eno16780032" +contiv_netplugin_interface: "eno16780032" # IP address of the interface used for control communication within the cluster # It needs to be reachable from all nodes in the cluster. -netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}" +contiv_netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}" # IP used to terminate vxlan tunnels -netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}" +contiv_netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}" # Interface used to bind Netmaster service -netmaster_interface: "{{ netplugin_interface }}" +contiv_netmaster_interface: "{{ contiv_netplugin_interface }}" + +# IP address of the interface used for control communication within the cluster +# It needs to be reachable from all nodes in the cluster. +contiv_netmaster_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}" # Path to the contiv binaries -bin_dir: /usr/bin +contiv_bin_dir: /usr/bin # Path to the contivk8s cni binary -cni_bin_dir: /opt/cni/bin +contiv_cni_bin_dir: /opt/cni/bin # Path to cni archive download directory -cni_download_dir: /tmp +contiv_cni_download_dir: /tmp # URL for cni binaries -cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" -cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2" +contiv_cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +contiv_cni_bin_url: "{{ contiv_cni_bin_url_base }}/{{ contiv_cni_version }}/cni-{{ contiv_cni_version }}.tbz2" # Contiv config directory @@ -60,11 +72,11 @@ contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2" # This is where kubelet looks for plugin files -kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec +contiv_kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec # Specifies routed mode vs bridged mode for networking (bridge | routing) # if you are using an external router for all routing, you should select bridge here -netplugin_fwd_mode: bridge +contiv_netplugin_fwd_mode: routing # Contiv fabric mode aci|default contiv_fabric_mode: default @@ -73,10 +85,10 @@ contiv_fabric_mode: default contiv_vlan_range: "2900-3000" # Encapsulation type vlan|vxlan to use for instantiating container networks -contiv_encap_mode: vlan +contiv_encap_mode: vxlan # Backend used by Netplugin for instantiating container networks -netplugin_driver: ovs +contiv_netplugin_driver: ovs # Create a default Contiv network for use by pods contiv_default_network: true @@ -85,38 +97,80 @@ contiv_default_network: true contiv_default_network_tag: "" #SRFIXME (use the openshift variables) -https_proxy: "" -http_proxy: "" -no_proxy: "" +contiv_https_proxy: "" +contiv_http_proxy: "" +contiv_no_proxy: "" # The following are aci specific parameters when contiv_fabric_mode: aci is set. # Otherwise, you can ignore these. -apic_url: "" -apic_username: "" -apic_password: "" -apic_leaf_nodes: "" -apic_phys_dom: "" -apic_contracts_unrestricted_mode: no -apic_epg_bridge_domain: not_specified +contiv_apic_url: "" +contiv_apic_username: "" +contiv_apic_password: "" +contiv_apic_leaf_nodes: "" +contiv_apic_phys_dom: "" +contiv_apic_contracts_unrestricted_mode: no +contiv_apic_epg_bridge_domain: not_specified apic_configure_default_policy: false -apic_default_external_contract: "uni/tn-common/brc-default" -apic_default_app_profile: "contiv-infra-app-profile" -kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" -master_name: "{{ groups['masters'][0] }}" -contiv_etcd_port: 22379 -etcd_url: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ contiv_etcd_port }}" -kube_ca_cert: "{{ kube_cert_dir }}/ca.crt" -kube_key: "{{ kube_cert_dir }}/admin.key" -kube_cert: "{{ kube_cert_dir }}/admin.crt" -kube_master_api_port: 8443 +contiv_apic_default_external_contract: "uni/tn-common/brc-default" +contiv_apic_default_app_profile: "contiv-infra-app-profile" +contiv_kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" +contiv_kube_ca_cert: "{{ contiv_kube_cert_dir }}/ca.crt" +contiv_kube_key: "{{ contiv_kube_cert_dir }}/admin.key" +contiv_kube_cert: "{{ contiv_kube_cert_dir }}/admin.crt" +contiv_kube_master_api_port: 8443 +contiv_kube_master_api_port_proto: tcp # contivh1 default subnet and gateway -#contiv_h1_subnet_default: "132.1.1.0/24" -#contiv_h1_gw_default: "132.1.1.1" contiv_h1_subnet_default: "10.129.0.0/16" contiv_h1_gw_default: "10.129.0.1" # contiv default private subnet for ext access contiv_private_ext_subnet: "10.130.0.0/16" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" +contiv_openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" + +contiv_api_proxy_port: 10000 +contiv_api_proxy_port_proto: tcp +contiv_api_proxy_image_repo: contiv/auth_proxy +contiv_api_proxy_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}" + +contiv_etcd_system_user: contivetcd +contiv_etcd_system_uid: 823 +contiv_etcd_system_group: contivetcd +contiv_etcd_system_gid: 823 +contiv_etcd_port: 22379 +contiv_etcd_port_proto: tcp +contiv_etcd_peer_port: 22380 +contiv_etcd_peer_port_proto: tcp +contiv_etcd_url: "http://127.0.0.1:{{ contiv_etcd_port }}" +contiv_etcd_init_image_repo: ferest/etcd-initer +contiv_etcd_init_image_tag: latest +contiv_etcd_image_repo: quay.io/coreos/etcd +contiv_etcd_image_tag: v3.2.4 +contiv_etcd_conf_dir: /etc/contiv-etcd +contiv_etcd_data_dir: /var/lib/contiv-etcd +contiv_etcd_peers: |- + {% for host in groups.oo_masters_to_config -%} + {{ host }}=http://{{ hostvars[host]['ip'] | default(hostvars[host].ansible_default_ipv4['address']) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %} + {%- endfor %} + +# List of port/protocol pairs to allow inbound access to on every host +# netplugin runs on, from all host IPs in the cluster. +contiv_netplugin_internal: [ "{{ contiv_ofnet_vxlan_port }}/{{ contiv_ofnet_vxlan_port_proto }}", + "{{ contiv_ovs_port }}/{{ contiv_ovs_port_proto }}", + "{{ contiv_vxlan_port }}/{{ contiv_vxlan_port_proto }}" ] +# Allow all forwarded traffic in and out of these interfaces. +contiv_netplugin_forward_interfaces: [ contivh0, contivh1 ] + +# List of port/protocol pairs to allow inbound access to on every host +# netmaster runs on, from all host IPs in the cluster. Note that every host +# that runs netmaster also runs netplugin, so the above netplugin rules will +# apply as well. +contiv_netmaster_internal: [ "{{ contiv_ofnet_master_port }}/{{ contiv_ofnet_master_port_proto }}", + "{{ contiv_netmaster_port }}/{{ contiv_netmaster_port_proto }}", + "{{ contiv_etcd_port }}/{{ contiv_etcd_port_proto }}", + "{{ contiv_etcd_peer_port }}/{{ contiv_etcd_peer_port_proto }}", + "{{ contiv_kube_master_api_port }}/{{ contiv_kube_master_api_port_proto }}" ] +# List of port/protocol pairs to allow inbound access to on every host +# netmaster runs on, from any host anywhere. +contiv_netmaster_external: [ "{{ contiv_api_proxy_port }}/{{ contiv_api_proxy_port_proto }}" ] diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index 67fb23db8..e8607cc90 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -15,17 +15,3 @@ galaxy_info: dependencies: - role: lib_utils - role: contiv_facts -- role: etcd - etcd_service: contiv-etcd - etcd_is_thirdparty: True - etcd_peer_port: 22380 - etcd_client_port: 22379 - etcd_conf_dir: /etc/contiv-etcd/ - etcd_data_dir: /var/lib/contiv-etcd/ - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_config_dir: /etc/contiv-etcd/ - etcd_url_scheme: http - etcd_peer_url_scheme: http - when: contiv_role == "netmaster" -- role: contiv_auth_proxy - when: contiv_role == "netmaster" diff --git a/roles/contiv/tasks/aci.yml b/roles/contiv/tasks/aci.yml index 30d2eb339..8a56b3590 100644 --- a/roles/contiv/tasks/aci.yml +++ b/roles/contiv/tasks/aci.yml @@ -11,7 +11,7 @@ - name: ACI | Copy shell script used by aci-gw service template: src: aci_gw.j2 - dest: "{{ bin_dir }}/aci_gw.sh" + dest: "{{ contiv_bin_dir }}/aci_gw.sh" mode: u=rwx,g=rx,o=rx - name: ACI | Copy systemd units for aci-gw diff --git a/roles/contiv/tasks/api_proxy.yml b/roles/contiv/tasks/api_proxy.yml new file mode 100644 index 000000000..8b524dd6e --- /dev/null +++ b/roles/contiv/tasks/api_proxy.yml @@ -0,0 +1,120 @@ +--- +- name: API proxy | Create contiv-api-proxy openshift user + oc_serviceaccount: + state: present + name: contiv-api-proxy + namespace: kube-system + run_once: true + +- name: API proxy | Set contiv-api-proxy openshift user permissions + oc_adm_policy_user: + user: system:serviceaccount:kube-system:contiv-api-proxy + resource_kind: scc + resource_name: hostnetwork + state: present + run_once: true + +- name: API proxy | Create temp directory for doing work + command: mktemp -d /tmp/openshift-contiv-XXXXXX + register: mktemp + changed_when: False + # For things that pass temp files between steps, we want to make sure they + # run on the same node. + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Check for existing api proxy secret volume + oc_obj: + namespace: kube-system + kind: secret + state: list + selector: "name=contiv-api-proxy-secret" + register: existing_secret_volume + run_once: true + +- name: API proxy | Generate a self signed certificate for api proxy + command: openssl req -new -nodes -x509 -subj "/C=US/ST=/L=/O=/CN=localhost" -days 3650 -keyout "{{ mktemp.stdout }}/key.pem" -out "{{ mktemp.stdout }}/cert.pem" -extensions v3_ca + when: (contiv_api_proxy_cert is not defined or contiv_api_proxy_key is not defined) + and not existing_secret_volume.results.results[0]['items'] + register: created_self_signed_cert + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Read self signed certificate file + command: cat "{{ mktemp.stdout }}/cert.pem" + register: generated_cert + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Read self signed key file + command: cat "{{ mktemp.stdout }}/key.pem" + register: generated_key + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-secrets.yml from template using generated cert + template: + src: api-proxy-secrets.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml" + vars: + key: "{{ generated_key.stdout }}" + cert: "{{ generated_cert.stdout }}" + when: created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-secrets.yml from template using user defined cert + template: + src: api-proxy-secrets.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml" + vars: + key: "{{ lookup('file', contiv_api_proxy_key) }}" + cert: "{{ lookup('file', contiv_api_proxy_cert) }}" + when: contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create secret certificate volume + oc_obj: + state: present + namespace: "kube-system" + kind: secret + name: contiv-api-proxy-secret + files: + - "{{ mktemp.stdout }}/api-proxy-secrets.yml" + when: (contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined) + or created_self_signed_cert.changed + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Create api-proxy-daemonset.yml from template + template: + src: api-proxy-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/api-proxy-daemonset.yml" + vars: + etcd_host: "etcd://{{ groups.oo_etcd_to_config.0 }}:{{ contiv_etcd_port }}" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +# Always "import" this file, k8s won't do anything if it matches exactly what +# is already in the cluster. +- name: API proxy | Add API proxy daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-api-proxy + files: + - "{{ mktemp.stdout }}/api-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: API proxy | Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml index 8a928ea54..e9763d34a 100644 --- a/roles/contiv/tasks/default_network.yml +++ b/roles/contiv/tasks/default_network.yml @@ -1,71 +1,71 @@ --- -- name: Contiv | Wait for netmaster - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" tenant ls' +- name: Default network | Wait for netmaster + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" tenant ls' register: tenant_result until: tenant_result.stdout.find("default") != -1 retries: 9 delay: 10 -- name: Contiv | Set globals - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' +- name: Default network | Set globals + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ contiv_netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' run_once: true -- name: Contiv | Set arp mode to flood if ACI - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood' +- name: Default network | Set arp mode to flood if ACI + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --arp-mode flood' when: contiv_fabric_mode == "aci" run_once: true -- name: Contiv | Check if default-net exists - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls' +- name: Default network | Check if default-net exists + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net ls' register: net_result run_once: true -- name: Contiv | Create default-net - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' +- name: Default network | Create default-net + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' when: net_result.stdout.find("default-net") == -1 run_once: true -- name: Contiv | Create host access infra network for VxLan routing case - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' - when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing") +- name: Default network | Create host access infra network for VxLan routing case + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' + when: (contiv_encap_mode == "vxlan") and (contiv_netplugin_fwd_mode == "routing") run_once: true -#- name: Contiv | Create an allow-all policy for the default-group -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy' +#- name: Default network | Create an allow-all policy for the default-group +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy create ose-allow-all-policy' # when: contiv_fabric_mode == "aci" # run_once: true -- name: Contiv | Set up aci external contract to consume default external contract - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume' +- name: Default network | Set up aci external contract to consume default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -c -a {{ contiv_apic_default_external_contract }} oseExtToConsume' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) run_once: true -- name: Contiv | Set up aci external contract to provide default external contract - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide' +- name: Default network | Set up aci external contract to provide default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -p -a {{ contiv_apic_default_external_contract }} oseExtToProvide' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) run_once: true -- name: Contiv | Create aci default-group - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group' +- name: Default network | Create aci default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create default-net default-group' when: contiv_fabric_mode == "aci" run_once: true -- name: Contiv | Add external contracts to the default-group - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' +- name: Default network | Add external contracts to the default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) run_once: true -#- name: Contiv | Add policy rule 1 for allow-all policy -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' +#- name: Default network | Add policy rule 1 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' # when: contiv_fabric_mode == "aci" # run_once: true -#- name: Contiv | Add policy rule 2 for allow-all policy -# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' +#- name: Default network | Add policy rule 2 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' # when: contiv_fabric_mode == "aci" # run_once: true -- name: Contiv | Create default aci app profile - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}' +- name: Default network | Create default aci app profile + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" app-profile create -g default-group {{ contiv_apic_default_app_profile }}' when: contiv_fabric_mode == "aci" run_once: true diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml index 831fd360a..47d74da9c 100644 --- a/roles/contiv/tasks/download_bins.yml +++ b/roles/contiv/tasks/download_bins.yml @@ -4,7 +4,7 @@ path: "{{ contiv_current_release_directory }}" state: directory -- name: Install bzip2 +- name: Download Bins | Install bzip2 yum: name: bzip2 state: installed @@ -18,9 +18,9 @@ mode: 0755 validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" - name: Download Bins | Extract Contiv tar file unarchive: @@ -30,19 +30,19 @@ - name: Download Bins | Download cni tar file get_url: - url: "{{ cni_bin_url }}" - dest: "{{ cni_download_dir }}" + url: "{{ contiv_cni_bin_url }}" + dest: "{{ contiv_cni_download_dir }}" mode: 0755 validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" register: download_file - name: Download Bins | Extract cni tar file unarchive: src: "{{ download_file.dest }}" - dest: "{{ cni_download_dir }}" + dest: "{{ contiv_cni_download_dir }}" copy: no when: download_file.changed diff --git a/roles/contiv/tasks/etcd.yml b/roles/contiv/tasks/etcd.yml new file mode 100644 index 000000000..b08ead982 --- /dev/null +++ b/roles/contiv/tasks/etcd.yml @@ -0,0 +1,114 @@ +--- +# To run contiv-etcd in a container as non-root, we need to match the uid/gid +# with the filesystem permissions on the host. +- name: Contiv etcd | Create local unix group + group: + name: "{{ contiv_etcd_system_group }}" + gid: "{{ contiv_etcd_system_gid }}" + system: yes + +- name: Contiv etcd | Create local unix user + user: + name: "{{ contiv_etcd_system_user }}" + createhome: no + uid: "{{ contiv_etcd_system_uid }}" + group: "{{ contiv_etcd_system_group }}" + home: "{{ contiv_etcd_data_dir }}" + shell: /bin/false + system: yes + +- name: Contiv etcd | Create directories + file: + path: "{{ item }}" + state: directory + mode: g-rwx,o-rwx + owner: "{{ contiv_etcd_system_user }}" + group: "{{ contiv_etcd_system_group }}" + setype: svirt_sandbox_file_t + seuser: system_u + serole: object_r + selevel: s0 + recurse: yes + with_items: + - "{{ contiv_etcd_data_dir }}" + - "{{ contiv_etcd_conf_dir }}" + +- name: Contiv etcd | Create contiv-etcd openshift user + oc_serviceaccount: + state: present + name: contiv-etcd + namespace: kube-system + run_once: true + +- name: Contiv etcd | Create temp directory for doing work + command: mktemp -d /tmp/openshift-contiv-XXXXXX + register: mktemp + changed_when: False + # For things that pass temp files between steps, we want to make sure they + # run on the same node. + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd-scc.yml from template + template: + src: etcd-scc.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-scc.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd.yml from template + template: + src: etcd-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Create etcd-proxy.yml from template + template: + src: etcd-proxy-daemonset.yml.j2 + dest: "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Add etcd scc + oc_obj: + state: present + namespace: "kube-system" + kind: SecurityContextConstraints + name: contiv-etcd + files: + - "{{ mktemp.stdout }}/etcd-scc.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +# Always "import" this file, k8s won't do anything if it matches exactly what +# is already in the cluster. +- name: Contiv etcd | Add etcd daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-etcd + files: + - "{{ mktemp.stdout }}/etcd-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Add etcd-proxy daemonset + oc_obj: + state: present + namespace: "kube-system" + kind: daemonset + name: contiv-etcd-proxy + files: + - "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml" + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true + +- name: Contiv etcd | Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + delegate_to: "{{ groups.oo_masters_to_config.0 }}" + run_once: true diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml index cb9196a71..4d530ae90 100644 --- a/roles/contiv/tasks/main.yml +++ b/roles/contiv/tasks/main.yml @@ -1,14 +1,15 @@ --- -- name: Ensure bin_dir exists +- include_tasks: old_version_cleanup.yml + +- name: Ensure contiv_bin_dir exists file: - path: "{{ bin_dir }}" + path: "{{ contiv_bin_dir }}" recurse: yes state: directory - include_tasks: download_bins.yml - include_tasks: netmaster.yml - when: contiv_role == "netmaster" + when: contiv_master - include_tasks: netplugin.yml - when: contiv_role == "netplugin" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index 6f15af8c2..bb22fb801 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -1,34 +1,16 @@ --- - include_tasks: netmaster_firewalld.yml - when: has_firewalld + when: contiv_has_firewalld - include_tasks: netmaster_iptables.yml - when: not has_firewalld and has_iptables + when: not contiv_has_firewalld and contiv_has_iptables -- name: Netmaster | Check is /etc/hosts file exists - stat: - path: /etc/hosts - register: hosts - -- name: Netmaster | Create hosts file if it is not present - file: - path: /etc/hosts - state: touch - when: not hosts.stat.exists - -- name: Netmaster | Build hosts file - lineinfile: - dest: /etc/hosts - regexp: .*netmaster$ - line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster" - state: present - when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined - with_items: "{{ groups['masters'] }}" +- include_tasks: etcd.yml - name: Netmaster | Create netmaster symlinks file: src: "{{ contiv_current_release_directory }}/{{ item }}" - dest: "{{ bin_dir }}/{{ item }}" + dest: "{{ contiv_bin_dir }}/{{ item }}" state: link with_items: - netmaster @@ -36,7 +18,7 @@ - name: Netmaster | Copy environment file for netmaster template: - src: netmaster.env.j2 + src: netmaster.j2 dest: /etc/default/netmaster mode: 0644 notify: restart netmaster @@ -75,3 +57,5 @@ - include_tasks: default_network.yml when: contiv_default_network == true + +- include_tasks: api_proxy.yml diff --git a/roles/contiv/tasks/netmaster_firewalld.yml b/roles/contiv/tasks/netmaster_firewalld.yml index 2975351ac..0d52f821d 100644 --- a/roles/contiv/tasks/netmaster_firewalld.yml +++ b/roles/contiv/tasks/netmaster_firewalld.yml @@ -1,16 +1,17 @@ --- -- name: Netmaster Firewalld | Open Netmaster port +- name: Netmaster Firewalld | Add internal rules firewalld: - port: "{{ netmaster_port }}/tcp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes + immediate: true + permanent: true + port: "{{ item[0] }}" + source: "{{ item[1] }}" + with_nested: + - "{{ contiv_netmaster_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" -- name: Netmaster Firewalld | Save Netmaster port +- name: Netmaster Firewalld | Add external rules firewalld: - port: "{{ netmaster_port }}/tcp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes + port: "{{ item }}" + with_items: "{{ contiv_netmaster_external }}" diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml index c98e7b6a5..3b68ea0c3 100644 --- a/roles/contiv/tasks/netmaster_iptables.yml +++ b/roles/contiv/tasks/netmaster_iptables.yml @@ -1,27 +1,32 @@ --- -- name: Netmaster IPtables | Get iptables rules - command: iptables -L --wait - register: iptablesrules - check_mode: no - -- name: Netmaster IPtables | Enable iptables at boot - service: - name: iptables - enabled: yes - state: started - -- name: Netmaster IPtables | Open Netmaster with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ contiv_rpc_port1 }}" - - "{{ contiv_rpc_port2 }}" - - "{{ contiv_rpc_port3 }}" - when: iptablesrules.stdout.find("contiv") == -1 +- name: Netmaster IPtables | Add internal rules + iptables: + action: insert + chain: INPUT + # Parsed from the contiv_netmaster_internal list, this will be tcp or udp. + protocol: "{{ item[0].split('/')[1] }}" + match: "{{ item[0].split('/')[1] }}" + # Parsed from the contiv_netmaster_internal list, this will be a port number. + destination_port: "{{ item[0].split('/')[0] }}" + # This is an IP address from a node in the cluster. + source: "{{ item[1] }}" + jump: ACCEPT + comment: contiv + with_nested: + - "{{ contiv_netmaster_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" notify: Save iptables rules -- name: Netmaster IPtables | Open netmaster main port - command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}" - when: iptablesrules.stdout.find("contiv") == -1 +- name: Netmaster IPtables | Add external rules + iptables: + action: insert + chain: INPUT + # Parsed from the contiv_netmaster_external list, this will be tcp or udp. + protocol: "{{ item.split('/')[1] }}" + match: "{{ item.split('/')[1] }}" + # Parsed from the contiv_netmaster_external list, this will be a port number. + destination_port: "{{ item.split('/')[0] }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netmaster_external }}" notify: Save iptables rules diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 540f6e4bc..60f432202 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -1,9 +1,9 @@ --- - include_tasks: netplugin_firewalld.yml - when: has_firewalld + when: contiv_has_firewalld - include_tasks: netplugin_iptables.yml - when: has_iptables + when: not contiv_has_firewalld and contiv_has_iptables - name: Netplugin | Ensure localhost entry correct in /etc/hosts lineinfile: @@ -20,41 +20,40 @@ state: absent - include_tasks: ovs.yml - when: netplugin_driver == "ovs" + when: contiv_netplugin_driver == "ovs" - name: Netplugin | Create Netplugin bin symlink file: src: "{{ contiv_current_release_directory }}/netplugin" - dest: "{{ bin_dir }}/netplugin" + dest: "{{ contiv_bin_dir }}/netplugin" state: link - -- name: Netplugin | Ensure cni_bin_dir exists +- name: Netplugin | Ensure contiv_cni_bin_dir exists file: - path: "{{ cni_bin_dir }}" + path: "{{ contiv_cni_bin_dir }}" recurse: yes state: directory - name: Netplugin | Create CNI bin symlink file: src: "{{ contiv_current_release_directory }}/contivk8s" - dest: "{{ cni_bin_dir }}/contivk8s" + dest: "{{ contiv_cni_bin_dir }}/contivk8s" state: link - name: Netplugin | Copy CNI loopback bin copy: - src: "{{ cni_download_dir }}/loopback" - dest: "{{ cni_bin_dir }}/loopback" + src: "{{ contiv_cni_download_dir }}/loopback" + dest: "{{ contiv_cni_bin_dir }}/loopback" remote_src: True mode: 0755 -- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist +- name: Netplugin | Ensure contiv_kube_plugin_dir and cni/net.d directories exist file: path: "{{ item }}" recurse: yes state: directory with_items: - - "{{ kube_plugin_dir }}" + - "{{ contiv_kube_plugin_dir }}" - "/etc/cni/net.d" - name: Netplugin | Ensure contiv_config_dir exists @@ -68,7 +67,7 @@ src: contiv_cni.conf dest: "{{ item }}" with_items: - - "{{ kube_plugin_dir }}/contiv_cni.conf" + - "{{ contiv_kube_plugin_dir }}/contiv_cni.conf" - "/etc/cni/net.d" # notify: restart kubelet @@ -85,11 +84,11 @@ mode: 0644 notify: restart netplugin -- name: Docker | Make sure proxy setting exists +- name: Netplugin | Make sure docker proxy setting exists lineinfile: dest: /etc/sysconfig/docker-network regexp: '^https_proxy.*' - line: 'https_proxy={{ https_proxy }}' + line: 'https_proxy={{ contiv_https_proxy }}' state: present register: docker_updated @@ -103,9 +102,9 @@ command: systemctl daemon-reload when: docker_updated is changed -- name: Docker | Restart docker +- name: Netplugin | Restart docker service: - name: "{{ openshift_docker_service_name }}" + name: "{{ contiv_openshift_docker_service_name }}" state: restarted when: docker_updated is changed register: l_docker_restart_docker_in_contiv_result diff --git a/roles/contiv/tasks/netplugin_firewalld.yml b/roles/contiv/tasks/netplugin_firewalld.yml index 3aeffae56..5ac531ec6 100644 --- a/roles/contiv/tasks/netplugin_firewalld.yml +++ b/roles/contiv/tasks/netplugin_firewalld.yml @@ -1,34 +1,17 @@ --- -- name: Netplugin Firewalld | Open Netplugin port +- name: Netplugin Firewalld | Add internal rules firewalld: - port: "{{ netplugin_port }}/tcp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - -- name: Netplugin Firewalld | Save Netplugin port - firewalld: - port: "{{ netplugin_port }}/tcp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - -- name: Netplugin Firewalld | Open vxlan port - firewalld: - port: "8472/udp" - permanent: false - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - when: contiv_encap_mode == "vxlan" + port: "{{ item[0] }}" + source: "{{ item[1] }}" + with_nested: + - "{{ contiv_netplugin_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" -- name: Netplugin Firewalld | Save firewalld vxlan port for flanneld +- name: Netplugin Firewalld | Add dns rule firewalld: - port: "8472/udp" + immediate: true permanent: true - state: enabled - # in case this is also a node where firewalld turned off - ignore_errors: yes - when: contiv_encap_mode == "vxlan" + port: "53/udp" + interface: contivh0 diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml index 3ea34645d..9d376f4e5 100644 --- a/roles/contiv/tasks/netplugin_iptables.yml +++ b/roles/contiv/tasks/netplugin_iptables.yml @@ -1,58 +1,52 @@ --- -- name: Netplugin IPtables | Get iptables rules - command: iptables -L --wait - register: iptablesrules - check_mode: no +- name: Netplugin IPtables | Add internal rules + iptables: + action: insert + chain: INPUT + protocol: "{{ item[0].split('/')[1] }}" + match: "{{ item[0].split('/')[1] }}" + destination_port: "{{ item[0].split('/')[0] }}" + source: "{{ item[1] }}" + jump: ACCEPT + comment: contiv + with_nested: + - "{{ contiv_netplugin_internal }}" + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add [in] forward rules + iptables: + action: insert + chain: FORWARD + in_interface: "{{ item }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netplugin_forward_interfaces }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add [out] forward rules + iptables: + action: insert + chain: FORWARD + out_interface: "{{ item }}" + jump: ACCEPT + comment: contiv + with_items: "{{ contiv_netplugin_forward_interfaces }}" + notify: Save iptables rules + +- name: Netplugin IPtables | Add dns rule + iptables: + action: insert + chain: INPUT + protocol: udp + match: udp + destination_port: 53 + in_interface: contivh0 + jump: ACCEPT + comment: contiv + notify: Save iptables rules - name: Netplugin IPtables | Enable iptables at boot service: name: iptables enabled: yes - state: started - -- name: Netplugin IPtables | Open Netmaster with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" - with_items: - - "{{ netmaster_port }}" - - "{{ contiv_rpc_port1 }}" - - "{{ contiv_rpc_port2 }}" - - "{{ contiv_rpc_port3 }}" - - "{{ contiv_etcd_port }}" - - "{{ kube_master_api_port }}" - when: iptablesrules.stdout.find("contiv") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472" - when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789" - when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow from contivh0 - command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input" - when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow to contivh0 - command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output" - when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow from contivh1 - command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input" - when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow to contivh1 - command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output" - when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1 - notify: Save iptables rules - -- name: Netplugin IPtables | Allow dns - command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns" - when: iptablesrules.stdout.find("contiv dns") == -1 - notify: Save iptables rules diff --git a/roles/contiv/tasks/old_version_cleanup.yml b/roles/contiv/tasks/old_version_cleanup.yml new file mode 100644 index 000000000..8b3d88096 --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup.yml @@ -0,0 +1,43 @@ +--- +- name: Old version cleanup | Check if old auth proxy service exists + stat: + path: /etc/systemd/system/auth-proxy.service + register: auth_proxy_stat + +- name: Old version cleanup | Stop old auth proxy + service: + name: auth-proxy + enabled: no + state: stopped + when: auth_proxy_stat.stat.exists + +# Note(NB): The new containerized contiv-etcd service uses the same data +# directory on the host, so etcd data is not lost. +- name: Old version cleanup | Check if old contiv-etcd service exists + stat: + path: /etc/systemd/system/contiv-etcd.service + register: contiv_etcd_stat + +- name: Old version cleanup | Stop old contiv-etcd + service: + name: contiv-etcd + enabled: no + state: stopped + when: contiv_etcd_stat.stat.exists + +- name: Old version cleanup | Delete old files + file: + state: absent + path: "{{ item }}" + with_items: + - /etc/systemd/system/auth-proxy.service + - /var/contiv/certs + - /usr/bin/auth_proxy.sh + - /etc/systemd/system/contiv-etcd.service + - /etc/systemd/system/contiv-etcd.service.d + +- include_tasks: old_version_cleanup_iptables.yml + when: not contiv_has_firewalld and contiv_has_iptables + +- include_tasks: old_version_cleanup_firewalld.yml + when: contiv_has_firewalld diff --git a/roles/contiv/tasks/old_version_cleanup_firewalld.yml b/roles/contiv/tasks/old_version_cleanup_firewalld.yml new file mode 100644 index 000000000..675a6358a --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup_firewalld.yml @@ -0,0 +1,11 @@ +--- +- name: Old version cleanup | Delete old firewalld rules + firewalld: + state: absent + immediate: true + permanent: true + port: "{{ item }}" + with_items: + - "9999/tcp" + - "6640/tcp" + - "8472/udp" diff --git a/roles/contiv/tasks/old_version_cleanup_iptables.yml b/roles/contiv/tasks/old_version_cleanup_iptables.yml new file mode 100644 index 000000000..513357606 --- /dev/null +++ b/roles/contiv/tasks/old_version_cleanup_iptables.yml @@ -0,0 +1,44 @@ +--- +- name: Old version cleanup | Delete old forward [in] iptables rules + iptables: + state: absent + chain: FORWARD + in_interface: "{{ item }}" + jump: ACCEPT + comment: "{{ item }} FORWARD input" + with_items: + - contivh0 + - contivh1 + notify: Save iptables rules + +- name: Old version cleanup | Delete old forward [out] iptables rules + iptables: + state: absent + chain: FORWARD + out_interface: "{{ item }}" + jump: ACCEPT + comment: "{{ item }} FORWARD output" + with_items: + - contivh0 + - contivh1 + notify: Save iptables rules + +- name: Old version cleanup | Delete old input iptables rules + iptables: + state: absent + chain: INPUT + protocol: "{{ item.split('/')[1] }}" + match: "{{ item.split('/')[1] }}" + destination_port: "{{ item.split('/')[0] }}" + comment: "{{ item.split('/')[2] }}" + jump: ACCEPT + with_items: + - "53/udp/contiv dns" + - "4789/udp/netplugin vxlan 4789" + - "8472/udp/netplugin vxlan 8472" + - "9003/tcp/contiv" + - "9002/tcp/contiv" + - "9001/tcp/contiv" + - "9999/tcp/contiv" + - "10000/tcp/Contiv auth proxy service (10000)" + notify: Save iptables rules diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml index 5c92e90e9..21ba6ead4 100644 --- a/roles/contiv/tasks/ovs.yml +++ b/roles/contiv/tasks/ovs.yml @@ -1,6 +1,6 @@ --- - include_tasks: packageManagerInstall.yml - when: source_type == "packageManager" + when: contiv_source_type == "packageManager" tags: - binary-update diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index 3367844a8..8c8e7a7bd 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -4,10 +4,9 @@ did_install: false - include_tasks: pkgMgrInstallers/centos-install.yml - when: (ansible_os_family == "RedHat") and - not openshift_is_atomic + when: ansible_os_family == "RedHat" and not openshift_is_atomic | bool - name: Package Manager | Set fact saying we did CentOS package install set_fact: did_install: true - when: (ansible_os_family == "RedHat") + when: ansible_os_family == "RedHat" diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 53c5b4099..2c82973d6 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -12,9 +12,9 @@ dest: /tmp/rdo-release-ocata-2.noarch.rpm validate_certs: False environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" tags: - ovs_install @@ -30,9 +30,9 @@ pkg=openvswitch state=present environment: - http_proxy: "{{ http_proxy|default('') }}" - https_proxy: "{{ https_proxy|default('') }}" - no_proxy: "{{ no_proxy|default('') }}" + http_proxy: "{{ contiv_http_proxy|default('') }}" + https_proxy: "{{ contiv_https_proxy|default('') }}" + no_proxy: "{{ contiv_no_proxy|default('') }}" tags: - ovs_install register: result diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 9b3f12567..e2813c99d 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,10 +1,10 @@ [Unit] Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ contiv_openshift_docker_service_name }}.service [Service] -ExecStart={{ bin_dir }}/aci_gw.sh start -ExecStop={{ bin_dir }}/aci_gw.sh stop +ExecStart={{ contiv_bin_dir }}/aci_gw.sh start +ExecStop={{ contiv_bin_dir }}/aci_gw.sh stop KillMode=control-group Restart=always RestartSec=10 diff --git a/roles/contiv/templates/aci_gw.j2 b/roles/contiv/templates/aci_gw.j2 index ab4ad46a6..5ff349945 100644 --- a/roles/contiv/templates/aci_gw.j2 +++ b/roles/contiv/templates/aci_gw.j2 @@ -11,13 +11,13 @@ start) set -e docker run --net=host \ - -e "APIC_URL={{ apic_url }}" \ - -e "APIC_USERNAME={{ apic_username }}" \ - -e "APIC_PASSWORD={{ apic_password }}" \ - -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \ - -e "APIC_PHYS_DOMAIN={{ apic_phys_dom }}" \ - -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \ - -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \ + -e "APIC_URL={{ contiv_apic_url }}" \ + -e "APIC_USERNAME={{ contiv_apic_username }}" \ + -e "APIC_PASSWORD={{ contiv_apic_password }}" \ + -e "APIC_LEAF_NODE={{ contiv_apic_leaf_nodes }}" \ + -e "APIC_PHYS_DOMAIN={{ contiv_apic_phys_dom }}" \ + -e "APIC_EPG_BRIDGE_DOMAIN={{ contiv_apic_epg_bridge_domain }}" \ + -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ contiv_apic_contracts_unrestricted_mode }}" \ --name=contiv-aci-gw \ contiv/aci-gw ;; diff --git a/roles/contiv/templates/api-proxy-daemonset.yml.j2 b/roles/contiv/templates/api-proxy-daemonset.yml.j2 new file mode 100644 index 000000000..a15073580 --- /dev/null +++ b/roles/contiv/templates/api-proxy-daemonset.yml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-api-proxy + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-api-proxy + template: + metadata: + namespace: kube-system + labels: + name: contiv-api-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-api-proxy + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: contiv-api-proxy + image: "{{ contiv_api_proxy_image_repo }}:{{ contiv_version }}" + args: + - "--listen-address=0.0.0.0:{{ contiv_api_proxy_port }}" + - --tls-key-file=/var/contiv/api_proxy_key.pem + - --tls-certificate=/var/contiv/api_proxy_cert.pem + - "--data-store-address={{ etcd_host }}" + - --data-store-driver=etcd + - "--netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}" + ports: + - containerPort: "{{ contiv_api_proxy_port }}" + hostPort: "{{ contiv_api_proxy_port }}" + volumeMounts: + - name: secret-volume + mountPath: /var/contiv + readOnly: true + volumes: + - name: secret-volume + secret: + secretName: contiv-api-proxy-secret diff --git a/roles/contiv/templates/api-proxy-secrets.yml.j2 b/roles/contiv/templates/api-proxy-secrets.yml.j2 new file mode 100644 index 000000000..cd800c97d --- /dev/null +++ b/roles/contiv/templates/api-proxy-secrets.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: contiv-api-proxy-secret + namespace: kube-system + labels: + name: contiv-api-proxy-secret +# Use data+b64encode, because stringData doesn't preserve newlines. +data: + api_proxy_key.pem: "{{ key | b64encode }}" + api_proxy_cert.pem: "{{ cert | b64encode }}" diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2 index f0e99c556..1dce9fcc2 100644 --- a/roles/contiv/templates/contiv.cfg.j2 +++ b/roles/contiv/templates/contiv.cfg.j2 @@ -1,5 +1,5 @@ { - "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key", "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt", diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2 index fac8e3c4c..ca29b8001 100644 --- a/roles/contiv/templates/contiv.cfg.master.j2 +++ b/roles/contiv/templates/contiv.cfg.master.j2 @@ -1,5 +1,5 @@ { - "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key", "K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt", diff --git a/roles/contiv/templates/etcd-daemonset.yml.j2 b/roles/contiv/templates/etcd-daemonset.yml.j2 new file mode 100644 index 000000000..76937e670 --- /dev/null +++ b/roles/contiv/templates/etcd-daemonset.yml.j2 @@ -0,0 +1,83 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-etcd + template: + metadata: + namespace: kube-system + labels: + name: contiv-etcd + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-etcd + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + initContainers: + - name: contiv-etcd-init + image: "{{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}" + env: + - name: ETCD_INIT_ARGSFILE + value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args" + - name: ETCD_INIT_LISTEN_PORT + value: "{{ contiv_etcd_port }}" + - name: ETCD_INIT_PEER_PORT + value: "{{ contiv_etcd_peer_port }}" + - name: ETCD_INIT_CLUSTER + value: "{{ contiv_etcd_peers }}" + - name: ETCD_INIT_DATA_DIR + value: "{{ contiv_etcd_data_dir }}" + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: "{{ contiv_etcd_conf_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + containers: + - name: contiv-etcd + image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}" + command: + - sh + - -c + - 'exec etcd $(cat "$ETCD_INIT_ARGSFILE")' + env: + - name: ETCD_INIT_ARGSFILE + value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args" + volumeMounts: + - name: contiv-etcd-conf-dir + mountPath: "{{ contiv_etcd_conf_dir }}" + - name: contiv-etcd-data-dir + mountPath: "{{ contiv_etcd_data_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + volumes: + - name: contiv-etcd-data-dir + hostPath: + type: DirectoryOrCreate + path: "{{ contiv_etcd_data_dir }}" + - name: contiv-etcd-conf-dir + hostPath: + type: DirectoryOrCreate + path: "{{ contiv_etcd_conf_dir }}" diff --git a/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 new file mode 100644 index 000000000..4ec6cfd76 --- /dev/null +++ b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 @@ -0,0 +1,55 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd-proxy + namespace: kube-system +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + name: contiv-etcd-proxy + template: + metadata: + namespace: kube-system + labels: + name: contiv-etcd-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: contiv-etcd + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: +{% for node in groups.oo_masters_to_config %} + - "{{ node }}" +{% endfor %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: contiv-etcd-proxy + image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}" + command: + - etcd + - "--proxy=on" + - "--listen-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}" + - "--advertise-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}" + - "--initial-cluster={{ contiv_etcd_peers }}" + - "--data-dir={{ contiv_etcd_data_dir }}" + volumeMounts: + - name: contiv-etcd-data-dir + mountPath: "{{ contiv_etcd_data_dir }}" + securityContext: + runAsUser: "{{ contiv_etcd_system_uid }}" + fsGroup: "{{ contiv_etcd_system_gid }}" + volumes: + - name: contiv-etcd-data-dir + emptyDir: {} diff --git a/roles/contiv/templates/etcd-scc.yml.j2 b/roles/contiv/templates/etcd-scc.yml.j2 new file mode 100644 index 000000000..6c4bb1d1e --- /dev/null +++ b/roles/contiv/templates/etcd-scc.yml.j2 @@ -0,0 +1,42 @@ +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: [] +allowedFlexVolumes: [] +apiVersion: v1 +defaultAddCapabilities: [] +fsGroup: + ranges: + - max: "{{ contiv_etcd_system_gid }}" + min: "{{ contiv_etcd_system_gid }}" + type: MustRunAs +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'For contiv-etcd only.' + creationTimestamp: null + name: contiv-etcd +priority: null +readOnlyRootFilesystem: true +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAs + uid: "{{ contiv_etcd_system_uid }}" +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: MustRunAs +users: +- system:serviceaccount:kube-system:contiv-etcd +volumes: +- emptyDir +- hostPath +- secret diff --git a/roles/contiv/templates/netmaster.env.j2 b/roles/contiv/templates/netmaster.env.j2 deleted file mode 100644 index 5b5c84a2e..000000000 --- a/roles/contiv/templates/netmaster.env.j2 +++ /dev/null @@ -1,2 +0,0 @@ -NETMASTER_ARGS='--cluster-store etcd://{{ etcd_url }} --cluster-mode=kubernetes' - diff --git a/roles/contiv/templates/netmaster.j2 b/roles/contiv/templates/netmaster.j2 new file mode 100644 index 000000000..c9db122b5 --- /dev/null +++ b/roles/contiv/templates/netmaster.j2 @@ -0,0 +1 @@ +NETMASTER_ARGS='--etcd={{ contiv_etcd_url }} --listen-url=127.0.0.1:{{ contiv_netmaster_port }} --fwdmode={{ contiv_netplugin_fwd_mode }} --infra={{ contiv_fabric_mode }} --control-url={{ contiv_netmaster_ctrl_ip }}:{{ contiv_netmaster_port }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service index ce7d0c75e..b7289bc38 100644 --- a/roles/contiv/templates/netmaster.service +++ b/roles/contiv/templates/netmaster.service @@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service [Service] EnvironmentFile=/etc/default/netmaster -ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS +ExecStart={{ contiv_bin_dir }}/netmaster $NETMASTER_ARGS KillMode=control-group Restart=always RestartSec=10 diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2 index a4928cc3d..0fd727401 100644 --- a/roles/contiv/templates/netplugin.j2 +++ b/roles/contiv/templates/netplugin.j2 @@ -1,7 +1,6 @@ {% if contiv_encap_mode == "vlan" %} -NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='--vlan-if={{ contiv_netplugin_interface }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' {% endif %} {% if contiv_encap_mode == "vxlan" %} -NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='--vtep-ip={{ contiv_netplugin_ctrl_ip }} --vxlan-port={{ contiv_vxlan_port }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}' {% endif %} - diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service index 6358d89ec..2e1ca1bdf 100644 --- a/roles/contiv/templates/netplugin.service +++ b/roles/contiv/templates/netplugin.service @@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service [Service] EnvironmentFile=/etc/default/netplugin -ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS +ExecStart={{ contiv_bin_dir }}/netplugin $NETPLUGIN_ARGS KillMode=control-group Restart=always RestartSec=10 diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md deleted file mode 100644 index 287b6c148..000000000 --- a/roles/contiv_auth_proxy/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Role Name -========= - -Role to install Contiv API Proxy and UI - -Requirements ------------- - -Docker needs to be installed to run the auth proxy container. - -Role Variables --------------- - -auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container. -auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates. -auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address. - -Dependencies ------------- - -docker - -Example Playbook ----------------- - -- hosts: netplugin-node - become: true - roles: - - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 } diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml deleted file mode 100644 index e1d904c6a..000000000 --- a/roles/contiv_auth_proxy/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -auth_proxy_image: "contiv/auth_proxy:1.1.1" -auth_proxy_port: 10000 -contiv_certs: "/var/contiv/certs" -cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" -auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem" -auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem" -auth_proxy_datastore: "{{ cluster_store }}" -auth_proxy_binaries: "/var/contiv_cache" -auth_proxy_local_install: False -auth_proxy_rule_comment: "Contiv auth proxy service" -service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}" diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service deleted file mode 100644 index 7cd2edff1..000000000 --- a/roles/contiv_auth_proxy/files/auth-proxy.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Contiv Proxy and UI -After=auditd.service systemd-user-sessions.service time-sync.target docker.service - -[Service] -ExecStart=/usr/bin/auth_proxy.sh start -ExecStop=/usr/bin/auth_proxy.sh stop -KillMode=control-group -Restart=on-failure -RestartSec=10 - -[Install] -WantedBy=multi-user.target diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem deleted file mode 100644 index 63df4603f..000000000 --- a/roles/contiv_auth_proxy/files/cert.pem +++ /dev/null @@ -1,33 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM -BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j -YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL -MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG -A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0 -aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl -MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p -7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06 -grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl -yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L -DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje -XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4 -dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1 -hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N -wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq -FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV -HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot// -iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN -BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2 -nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R -/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW -SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB -PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It -X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ -yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf -0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv -DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM -XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX -jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA= ------END CERTIFICATE----- diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem deleted file mode 100644 index 7224e569c..000000000 --- a/roles/contiv_auth_proxy/files/key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d -5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj -v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8 -Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3 -3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn -2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md -qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL -+J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI -M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74 -Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh -41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA -AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP -2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk -PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5 -9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB -UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O -m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj -RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H -0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk -evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk -MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l -kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8 -nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68 -4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29 -5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh -YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf -M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR -wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh -rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn -yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo -20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc -RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1 -IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3 -7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3 -FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou -qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb -58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN -JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI -ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg -0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g -UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm -zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA -RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg -hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h -+YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd -1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm -qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS -QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt -d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft ------END RSA PRIVATE KEY----- diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml deleted file mode 100644 index 9cb9bea49..000000000 --- a/roles/contiv_auth_proxy/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for auth_proxy diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml deleted file mode 100644 index a29659cc9..000000000 --- a/roles/contiv_auth_proxy/tasks/cleanup.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: stop auth-proxy container - service: name=auth-proxy state=stopped - -- name: cleanup iptables for auth proxy - shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" - become: true - with_items: - - "{{ auth_proxy_port }}" diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml deleted file mode 100644 index 74e7bf794..000000000 --- a/roles/contiv_auth_proxy/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# tasks file for auth_proxy -- name: setup iptables for auth proxy - shell: > - ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \ - iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" - become: true - with_items: - - "{{ auth_proxy_port }}" - -# Load the auth-proxy-image from local tar. Ignore any errors to handle the -# case where the image is not built in -- name: copy auth-proxy image - copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar - when: auth_proxy_local_install == True - -- name: load auth-proxy image - shell: docker load -i /tmp/auth-proxy-image.tar - when: auth_proxy_local_install == True - -- name: create cert folder for proxy - file: path=/var/contiv/certs state=directory - -- name: copy shell script for starting auth-proxy - template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx - -- name: copy cert for starting auth-proxy - copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r - -- name: copy key for starting auth-proxy - copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r - -- name: copy systemd units for auth-proxy - copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service - -- name: start auth-proxy container - systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2 deleted file mode 100644 index 0ab8c831b..000000000 --- a/roles/contiv_auth_proxy/templates/auth_proxy.j2 +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -usage="$0 start/stop" -if [ $# -ne 1 ]; then - echo USAGE: $usage - exit 1 -fi - -case $1 in -start) - set -e - - /usr/bin/docker run --rm \ - -p 10000:{{ auth_proxy_port }} \ - --net=host --name=auth-proxy \ - -e NO_NETMASTER_STARTUP_CHECK=1 \ - -v /var/contiv:/var/contiv:z \ - {{ auth_proxy_image }} \ - --tls-key-file={{ auth_proxy_key }} \ - --tls-certificate={{ auth_proxy_cert }} \ - --data-store-address={{ auth_proxy_datastore }} \ - --netmaster-address={{ service_vip }}:9999 \ - --listen-address=:10000 - ;; - -stop) - # don't stop on error - /usr/bin/docker stop auth-proxy - /usr/bin/docker rm -f -v auth-proxy - ;; - -*) - echo USAGE: $usage - exit 1 - ;; -esac diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory deleted file mode 100644 index d18580b3c..000000000 --- a/roles/contiv_auth_proxy/tests/inventory +++ /dev/null @@ -1 +0,0 @@ -localhost
\ No newline at end of file diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml deleted file mode 100644 index 2af3250cd..000000000 --- a/roles/contiv_auth_proxy/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - auth_proxy diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml deleted file mode 100644 index 9032766c4..000000000 --- a/roles/contiv_auth_proxy/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for auth_proxy diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml index 7b8150954..c1622c56a 100644 --- a/roles/contiv_facts/defaults/main.yaml +++ b/roles/contiv_facts/defaults/main.yaml @@ -1,13 +1,10 @@ --- # The directory where binaries are stored on Ansible # managed systems. -bin_dir: /usr/bin +contiv_bin_dir: /usr/bin # The directory used by Ansible to temporarily store # files on Ansible managed systems. -ansible_temp_dir: /tmp/.ansible/files +contiv_ansible_temp_dir: /tmp/.ansible/files -source_type: packageManager - -# Whether or not to also install and enable the Contiv auth_proxy -contiv_enable_auth_proxy: false +contiv_source_type: packageManager diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml index 932ff091a..b8239a636 100644 --- a/roles/contiv_facts/tasks/fedora-install.yml +++ b/roles/contiv_facts/tasks/fedora-install.yml @@ -11,9 +11,9 @@ retries: 5 delay: 10 environment: - https_proxy: "{{ https_proxy }}" - http_proxy: "{{ http_proxy }}" - no_proxy: "{{ no_proxy }}" + https_proxy: "{{ contiv_https_proxy }}" + http_proxy: "{{ contiv_http_proxy }}" + no_proxy: "{{ contiv_no_proxy }}" - name: Install libselinux-python command: dnf install {{ item }} -y @@ -21,6 +21,6 @@ - python-dnf - libselinux-python environment: - https_proxy: "{{ https_proxy }}" - http_proxy: "{{ http_proxy }}" - no_proxy: "{{ no_proxy }}" + https_proxy: "{{ contiv_https_proxy }}" + http_proxy: "{{ contiv_http_proxy }}" + no_proxy: "{{ contiv_no_proxy }}" diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index ced04759d..11f1e1369 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -4,42 +4,28 @@ register: distro check_mode: no -- name: Init the is_coreos fact +- name: Init the contiv_is_coreos fact set_fact: - is_coreos: false + contiv_is_coreos: false -- name: Set the is_coreos fact +- name: Set the contiv_is_coreos fact set_fact: - is_coreos: true + contiv_is_coreos: true when: "'CoreOS' in distro.stdout" -- name: Set docker config file directory - set_fact: - docker_config_dir: "/etc/sysconfig" - -- name: Override docker config file directory for Debian - set_fact: - docker_config_dir: "/etc/default" - when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu" - -- name: Create config file directory - file: - path: "{{ docker_config_dir }}" - state: directory - - name: Set the bin directory path for CoreOS set_fact: - bin_dir: "/opt/bin" - when: is_coreos + contiv_bin_dir: "/opt/bin" + when: contiv_is_coreos - name: Create the directory used to store binaries file: - path: "{{ bin_dir }}" + path: "{{ contiv_bin_dir }}" state: directory - name: Create Ansible temp directory file: - path: "{{ ansible_temp_dir }}" + path: "{{ contiv_ansible_temp_dir }}" state: directory - name: Determine if has rpm @@ -48,26 +34,26 @@ changed_when: false check_mode: no -- name: Init the has_rpm fact +- name: Init the contiv_has_rpm fact set_fact: - has_rpm: false + contiv_has_rpm: false -- name: Set the has_rpm fact +- name: Set the contiv_has_rpm fact set_fact: - has_rpm: true + contiv_has_rpm: true when: s.stat.exists -- name: Init the has_firewalld fact +- name: Init the contiv_has_firewalld fact set_fact: - has_firewalld: false + contiv_has_firewalld: false -- name: Init the has_iptables fact +- name: Init the contiv_has_iptables fact set_fact: - has_iptables: false + contiv_has_iptables: false # collect information about what packages are installed - include_tasks: rpm.yml - when: has_rpm + when: contiv_has_rpm - include_tasks: fedora-install.yml when: not openshift_is_atomic and ansible_distribution == "Fedora" diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml index d12436f96..dc6c5d3b7 100644 --- a/roles/contiv_facts/tasks/rpm.yml +++ b/roles/contiv_facts/tasks/rpm.yml @@ -13,9 +13,9 @@ failed_when: false check_mode: no -- name: Set the has_firewalld fact +- name: Set the contiv_has_firewalld fact set_fact: - has_firewalld: true + contiv_has_firewalld: true when: s.rc == 0 and ss.rc == 0 - name: Determine if iptables-services installed @@ -25,7 +25,7 @@ failed_when: false check_mode: no -- name: Set the has_iptables fact +- name: Set the contiv_has_iptables fact set_fact: - has_iptables: true + contiv_has_iptables: true when: s.rc == 0 diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index d4518554c..78578a055 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -79,13 +79,6 @@ when: etcd_client_certs_missing | bool delegate_to: "{{ etcd_ca_host }}" -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX - register: g_etcd_client_mktemp - changed_when: False - when: etcd_client_certs_missing | bool - become: no - - name: Create a tarball of the etcd certs command: > tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz @@ -101,8 +94,7 @@ - name: Retrieve the etcd cert tarballs fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ g_etcd_client_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_client_certs_missing | bool @@ -116,10 +108,15 @@ - name: Unarchive etcd cert tarballs unarchive: - src: "{{ g_etcd_client_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" dest: "{{ etcd_cert_config_dir }}" when: etcd_client_certs_missing | bool +- name: Delete temporary directory + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent + changed_when: False + when: etcd_client_certs_missing | bool + - file: path: "{{ etcd_cert_config_dir }}/{{ item }}" owner: root @@ -130,9 +127,3 @@ - "{{ etcd_cert_prefix }}client.key" - "{{ etcd_cert_prefix }}ca.crt" when: etcd_client_certs_missing | bool - -- name: Delete temporary directory - local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent - changed_when: False - when: etcd_client_certs_missing | bool - become: no diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 59a6b6590..987380d0c 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -105,13 +105,6 @@ when: etcd_server_certs_missing | bool delegate_to: "{{ etcd_ca_host }}" -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX - become: no - register: g_etcd_server_mktemp - changed_when: False - when: etcd_server_certs_missing | bool - - name: Create a tarball of the etcd certs command: > tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz @@ -127,8 +120,7 @@ - name: Retrieve etcd cert tarball fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ g_etcd_server_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_server_certs_missing | bool @@ -144,7 +136,7 @@ - name: Unarchive cert tarball unarchive: - src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz" dest: "{{ etcd_cert_config_dir }}" when: etcd_server_certs_missing | bool @@ -161,8 +153,7 @@ - name: Retrieve etcd ca cert tarball fetch: src: "{{ etcd_generated_certs_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ g_etcd_server_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: etcd_server_certs_missing | bool @@ -177,8 +168,7 @@ when: etcd_server_certs_missing | bool - name: Delete temporary directory - local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent - become: no + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent changed_when: False when: etcd_server_certs_missing | bool diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 83ca83350..da7e7b1da 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -31,6 +31,7 @@ class CallbackModule(CallbackBase): 'installer_phase_node', 'installer_phase_glusterfs', 'installer_phase_hosted', + 'installer_phase_web_console', 'installer_phase_metrics', 'installer_phase_logging', 'installer_phase_prometheus', @@ -80,6 +81,10 @@ class CallbackModule(CallbackBase): 'title': 'Hosted Install', 'playbook': 'playbooks/openshift-hosted/config.yml' }, + 'installer_phase_web_console': { + 'title': 'Web Console Install', + 'playbook': 'playbooks/openshift-web-console/config.yml' + }, 'installer_phase_metrics': { 'title': 'Metrics Install', 'playbook': 'playbooks/openshift-metrics/config.yml' diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py index d810735f2..9d10c84f3 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_scale.py +++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py @@ -27,7 +27,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing a list ''' params = {'name': 'router', 'namespace': 'default', 'replicas': 2, @@ -71,8 +71,296 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_state_present(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a state present ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 2, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 2) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_up(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale up ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 3, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + dc_updated = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6559", + "generation": 9, + "creationTimestamp": "2017-01-24T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 3, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc_updated, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 3) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_down(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale down ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 1, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + dc_updated = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6560", + "generation": 9, + "creationTimestamp": "2017-01-24T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 1, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc_updated, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['changed']) + self.assertEqual(results['state'], 'present') + self.assertEqual(results['result'][0], 1) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_failed(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing a scale failure ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 1, + 'state': 'present', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + error_message = "foo" + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc replace', + 'results': error_message, + 'returncode': 1}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertTrue(results['failed']) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_state_unknown(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing an unknown state ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 2, + 'state': 'unknown-state', + 'kind': 'dc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + dc = '''{"kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 2, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get dc router -n default', + 'results': dc, + 'returncode': 0}] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse('changed' in results) + self.assertEqual(results['failed'], True) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') def test_scale(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing scale ''' params = {'name': 'router', 'namespace': 'default', 'replicas': 3, @@ -120,8 +408,57 @@ class OCScaleTest(unittest.TestCase): @mock.patch('oc_scale.Utils.create_tmpfile_copy') @mock.patch('oc_scale.OCScale.openshift_cmd') + def test_scale_rc(self, mock_openshift_cmd, mock_tmpfile_copy): + ''' Testing scale for replication controllers ''' + params = {'name': 'router', + 'namespace': 'default', + 'replicas': 3, + 'state': 'list', + 'kind': 'rc', + 'kubeconfig': '/etc/origin/master/admin.kubeconfig', + 'debug': False} + + rc = '''{"kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "router", + "namespace": "default", + "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router", + "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42", + "resourceVersion": "6558", + "generation": 8, + "creationTimestamp": "2017-01-23T20:58:07Z", + "labels": { + "router": "router" + } + }, + "spec": { + "replicas": 3, + } + }''' + + mock_openshift_cmd.side_effect = [ + {"cmd": '/usr/bin/oc get rc router -n default', + 'results': rc, + 'returncode': 0}, + {"cmd": '/usr/bin/oc create -f /tmp/router -n default', + 'results': '', + 'returncode': 0} + ] + + mock_tmpfile_copy.side_effect = [ + '/tmp/mocked_kubeconfig', + ] + + results = OCScale.run_ansible(params, False) + + self.assertFalse(results['changed']) + self.assertEqual(results['result'][0], 3) + + @mock.patch('oc_scale.Utils.create_tmpfile_copy') + @mock.patch('oc_scale.OCScale.openshift_cmd') def test_no_dc_scale(self, mock_openshift_cmd, mock_tmpfile_copy): - ''' Testing a get ''' + ''' Testing scale for inexisting dc ''' params = {'name': 'not_there', 'namespace': 'default', 'replicas': 3, @@ -205,7 +542,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup fallback ''' + ''' Testing binary lookup fallback in py3 ''' mock_env_get.side_effect = lambda _v, _d: '' @@ -217,7 +554,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in path ''' + ''' Testing binary lookup in path in py3 ''' oc_bin = '/usr/bin/oc' @@ -231,7 +568,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in /usr/local/bin ''' + ''' Testing binary lookup in /usr/local/bin in py3 ''' oc_bin = '/usr/local/bin/oc' @@ -245,7 +582,7 @@ class OCScaleTest(unittest.TestCase): @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): - ''' Testing binary lookup in ~/bin ''' + ''' Testing binary lookup in ~/bin in py3 ''' oc_bin = os.path.expanduser('~/bin/oc') diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 696b323c0..786db1570 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -14,11 +14,7 @@ instance_type: m4.xlarge vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" image: "{{ openshift_aws_base_ami }}" - volumes: - - device_name: /dev/sdb - volume_type: gp2 - volume_size: 100 - delete_on_termination: true + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" wait: yes exact_count: 1 count_tag: @@ -46,5 +42,5 @@ - name: add host to nodes add_host: - groups: nodes + groups: nodes,g_new_node_hosts name: "{{ instancesout.instances[0].public_dns_name }}" diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index 631a0455e..9faec639f 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -8,4 +8,4 @@ system_images_registry: "{{ system_images_registry_dict[openshift_deployment_typ openshift_use_crio_only: False l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}" -l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}" +l_use_cli_atomic_image: "{{ (openshift_use_crio_only | bool) or (l_is_system_container_image | bool) }}" diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index a09a598bd..ff04cdf9c 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -13,18 +13,23 @@ # use it either due to changes introduced in Ansible 2.x. - name: Create local temp dir for OpenShift examples copy local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - become: False register: copy_examples_mktemp run_once: True +- name: Create local temp dir for OpenShift examples copy + local_action: command chmod 755 "{{ copy_examples_mktemp.stdout }}" + run_once: True + - name: Create tar of OpenShift examples local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" . args: # Disables the following warning: # Consider using unarchive module rather than running tar warn: no - become: False - register: copy_examples_tar + +- name: Create local temp dir for OpenShift examples copy + local_action: command chmod 744 "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" + run_once: True - name: Create the remote OpenShift examples directory file: @@ -38,7 +43,6 @@ dest: "{{ examples_base }}/" - name: Cleanup the OpenShift Examples temp dir - become: False local_action: file dest="{{ copy_examples_mktemp.stdout }}" state=absent # Done copying examples diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index b7b16e0ea..83e551b5d 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -95,6 +95,13 @@ class OpenShiftCheck(object): # These are intended to be a sequential record of what the check observed and determined. self.logs = [] + def template_var(self, var_to_template): + """Return a templated variable if self._templar is not None, else + just return the variable as-is""" + if self._templar is not None: + return self._templar.template(var_to_template) + return var_to_template + @abstractproperty def name(self): """The name of this check, usually derived from the class name.""" diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 744b79c1a..7afb8f730 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -64,7 +64,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): self.registries["configured"] = regs # for the oreg_url registry there may be credentials specified - components = self.get_var("oreg_url", default="").split('/') + oreg_url = self.get_var("oreg_url", default="") + oreg_url = self.template_var(oreg_url) + components = oreg_url.split('/') self.registries["oreg"] = "" if len(components) < 3 else components[0] # Retrieve and template registry credentials, if provided @@ -72,9 +74,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): oreg_auth_user = self.get_var('oreg_auth_user', default='') oreg_auth_password = self.get_var('oreg_auth_password', default='') if oreg_auth_user != '' and oreg_auth_password != '': - if self._templar is not None: - oreg_auth_user = self._templar.template(oreg_auth_user) - oreg_auth_password = self._templar.template(oreg_auth_password) + oreg_auth_user = self.template_var(oreg_auth_user) + oreg_auth_password = self.template_var(oreg_auth_password) self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password)) # record whether we could reach a registry or not (and remember results) @@ -153,6 +154,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") image_url = self.get_var("oreg_url", default="") or image_url + image_url = self.template_var(image_url) if 'oo_nodes_to_config' in host_groups: for suffix in NODE_IMAGE_SUFFIXES: required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index fca3485fd..d7cc1e288 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml index b2313c297..672d25b4d 100644 --- a/roles/openshift_hosted_templates/tasks/main.yml +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -1,20 +1,25 @@ --- - name: Create local temp dir for OpenShift hosted templates copy local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - become: False register: copy_hosted_templates_mktemp run_once: True # AUDIT:changed_when: not set here because this task actually # creates something +- name: Create local temp dir for OpenShift examples copy + local_action: command chmod 755 "{{ copy_hosted_templates_mktemp.stdout }}" + run_once: True + - name: Create tar of OpenShift examples local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" . args: # Disables the following warning: # Consider using unarchive module rather than running tar warn: no - become: False - register: copy_hosted_templates_tar + +- name: Create local temp dir for OpenShift examples copy + local_action: command chmod 744 "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" + run_once: True - name: Create remote OpenShift hosted templates directory file: @@ -28,7 +33,6 @@ dest: "{{ hosted_base }}/" - name: Cleanup the OpenShift hosted templates temp dir - become: False local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent - name: Modify registry paths if registry_url is not registry.access.redhat.com diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index b1ceade88..fbc3e3fd1 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -130,3 +130,14 @@ name: openshift_logging_eventrouter when: not openshift_logging_install_eventrouter | default(false) | bool + +# Update asset config in openshift-web-console namespace +- name: Remove Kibana route information from web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: loggingPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 6aae251c1..67904a9d3 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -75,7 +75,7 @@ elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}" # We don't allow scaling down of ES nodes currently -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -103,7 +103,7 @@ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0 # Create any new DC that may be required -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -137,7 +137,7 @@ when: - openshift_logging_use_ops | bool -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -180,7 +180,7 @@ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0 # Create any new DC that may be required -- import_role: +- include_role: name: openshift_logging_elasticsearch vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -314,4 +314,16 @@ openshift_logging_install_eventrouter | default(false) | bool +# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + +# Update asset config in openshift-web-console namespace +- name: Add Kibana route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: loggingPublicURL + value: "https://{{ openshift_logging_kibana_hostname }}" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index b96b8e29d..c0f42ba97 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding Kibana route information to loggingPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 9bd37f33c..bf3b743af 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -181,7 +181,9 @@ changed_when: no # create diff between current configmap files and our current files -- import_role: +# NOTE: include_role must be used instead of import_role because +# this task file is looped over from another role. +- include_role: name: openshift_logging tasks_from: patch_configmap_files.yaml vars: diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 106909941..0866fe0d2 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -67,8 +67,20 @@ with_items: "{{ hawkular_agent_object_defs.results }}" when: openshift_metrics_install_hawkular_agent | bool +# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml +# Update asset config in openshift-web-console namespace +- name: Add metrics route information to web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: metricsPublicURL + value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" + when: openshift_web_console_install | default(true) | bool + - command: > {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 0ab0eec4b..610c7b4e5 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -18,3 +18,14 @@ clusterrolebinding/heapster-cluster-reader clusterrolebinding/hawkular-metrics changed_when: delete_metrics.stdout != 'No resources found' + +# Update asset config in openshift-web-console namespace +- name: Remove metrics route information from web console asset config + include_role: + name: openshift_web_console + tasks_from: update_asset_config.yml + vars: + asset_config_edits: + - key: metricsPublicURL + value: "" + when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index 5059d8d94..6567fcb4f 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -1,4 +1,5 @@ --- +# TODO: Remove when asset config is removed from master-config.yaml - name: Adding metrics route information to metricsPublicURL modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 27fe2f5c0..c1fab4382 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -169,7 +169,7 @@ oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False openshift_use_crio: False -openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False) | bool) }}" openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 86a2ca16f..59e743dce 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -13,6 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift -- role: openshift_cloud_provider - when: not (openshift_node_upgrade_in_progress | default(False)) - role: lib_utils diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 103572291..754ecacaf 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -4,7 +4,7 @@ when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - openshift_deployment_type == 'openshift-enterprise' - - not openshift_use_crio + - not openshift_use_crio | bool - include_tasks: dnsmasq_install.yml - include_tasks: dnsmasq.yml @@ -50,7 +50,7 @@ name: cri-o enabled: yes state: restarted - when: openshift_use_crio + when: openshift_use_crio | bool register: task_result failed_when: - task_result is failed diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index 30ef9ef44..d7dce6969 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,11 +1,11 @@ --- - set_fact: l_service_name: "cri-o" - when: openshift_use_crio + when: openshift_use_crio | bool - set_fact: l_service_name: "{{ openshift_docker_service_name }}" - when: not openshift_use_crio + when: not openshift_use_crio | bool - name: Pre-pull OpenVSwitch system container image command: > diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml index 50044eb3e..210d174c2 100644 --- a/roles/openshift_node/tasks/upgrade/config_changes.yml +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -60,6 +60,7 @@ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" src: "node.service.j2" register: l_node_unit + when: not openshift_is_containerized | bool - name: Reset selinux context command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes @@ -74,4 +75,3 @@ # require a service to be part of the call. - name: Reload systemd units command: systemctl daemon-reload - when: l_node_unit is changed diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml index 0a14e5174..e5477f389 100644 --- a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml +++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml @@ -10,6 +10,6 @@ docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift_use_openshift_sdn | bool + when: openshift_node_use_openshift_sdn | bool - include_tasks: ../container_images.yml diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index da751bd65..777f4a449 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -8,7 +8,7 @@ Wants={{ openshift_docker_service_name }}.service Documentation=https://github.com/openshift/origin Requires=dnsmasq.service After=dnsmasq.service -{% if openshift_use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} [Service] Type=notify diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index f091263f5..5f2a94ea2 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -14,7 +14,7 @@ imageConfig: latest: {{ openshift_node_image_config_latest }} kind: NodeConfig kubeletArguments: {{ l2_openshift_node_kubelet_args | default(None) | lib_utils_to_padded_yaml(level=1) }} -{% if openshift_use_crio %} +{% if openshift_use_crio | bool %} container-runtime: - remote container-runtime-endpoint: diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 873744f34..9fe779057 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -3,7 +3,7 @@ Requires={{ openshift_docker_service_name }}.service After={{ openshift_docker_service_name }}.service PartOf={{ openshift_service_type }}-node.service Before={{ openshift_service_type }}-node.service -{% if openshift_use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} [Service] ExecStart=/bin/bash -c 'if [[ -f /usr/bin/docker-current ]]; \ diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index e95e38fdf..5f73f3bdc 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -94,13 +94,6 @@ delegate_to: "{{ openshift_ca_host }}" run_once: true -- name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: node_cert_mktemp - changed_when: False - when: node_certs_missing | bool - become: no - - name: Create a tarball of the node config directories command: > tar -czvf {{ openshift_node_generated_config_dir }}.tgz @@ -117,8 +110,7 @@ - name: Retrieve the node config tarballs from the master fetch: src: "{{ openshift_node_generated_config_dir }}.tgz" - dest: "{{ node_cert_mktemp.stdout }}/" - flat: yes + dest: "/tmp" fail_on_missing: yes validate_checksum: yes when: node_certs_missing | bool @@ -132,15 +124,14 @@ - name: Unarchive the tarball on the node unarchive: - src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz" + src: "/tmp/{{ inventory_hostname }}/{{ openshift_node_generated_config_dir }}.tgz" dest: "{{ openshift_node_cert_dir }}" when: node_certs_missing | bool - name: Delete local temp directory - local_action: file path="{{ node_cert_mktemp.stdout }}" state=absent + local_action: file path="/tmp/{{ inventory_hostname }}" state=absent changed_when: False when: node_certs_missing | bool - become: no - name: Copy OpenShift CA to system CA trust copy: diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index e02a75eab..a808f050e 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -21,7 +21,7 @@ register: cli_image_version when: - openshift_version is not defined - - not openshift_use_crio_only + - not openshift_use_crio_only | bool # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) - set_fact: @@ -30,7 +30,7 @@ - openshift_version is not defined - openshift.common.deployment_type == 'origin' - cli_image_version.stdout_lines[0].split('-') | length > 1 - - not openshift_use_crio_only + - not openshift_use_crio_only | bool - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" @@ -45,14 +45,14 @@ when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not openshift_use_crio_only + - not openshift_use_crio_only | bool - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not openshift_use_crio_only + - not openshift_use_crio_only | bool # TODO: figure out a way to check for the openshift_version when using CRI-O. # We should do that using the images in the ostree storage so we don't have diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml new file mode 100644 index 000000000..4f395398c --- /dev/null +++ b/roles/openshift_web_console/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters +openshift_web_console_nodeselector: {"region":"infra"} diff --git a/roles/openshift_web_console/meta/main.yaml b/roles/openshift_web_console/meta/main.yaml new file mode 100644 index 000000000..033c1e3a3 --- /dev/null +++ b/roles/openshift_web_console/meta/main.yaml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: OpenShift Development <dev@lists.openshift.redhat.com> + description: Deploy OpenShift web console + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.4 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - openshift +dependencies: +- role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml new file mode 100644 index 000000000..8120c13e3 --- /dev/null +++ b/roles/openshift_web_console/tasks/install.yml @@ -0,0 +1,79 @@ +--- +# Fact setting +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set openshift_web_console facts + set_fact: + openshift_web_console_prefix: "{{ openshift_web_console_prefix | default(__openshift_web_console_prefix) }}" + openshift_web_console_version: "{{ openshift_web_console_version | default(__openshift_web_console_version) }}" + openshift_web_console_image_name: "{{ openshift_web_console_image_name | default(__openshift_web_console_image_name) }}" + # Default the replica count to the number of masters. + openshift_web_console_replica_count: "{{ openshift_web_console_replica_count | default(groups.oo_masters_to_config | length) }}" + +- name: Ensure openshift-web-console project exists + oc_project: + name: openshift-web-console + state: present + +- name: Make temp directory for asset config files + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- name: Copy asset config template to temp directory + copy: + src: "{{ __console_files_location }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __console_template_file }}" + - "{{ __console_config_file }}" + +- name: Update asset config properties + yedit: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + edits: + - key: logoutURL + value: "{{ openshift.master.logout_url | default('') }}" + - key: publicURL + # Must have a trailing slash + value: "{{ openshift.master.public_console_url }}/" + - key: masterPublicURL + value: "{{ openshift.master.public_api_url }}" + +- slurp: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + register: config + +- name: Apply template file + shell: > + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}" + --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" + --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}" + --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }} + --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}" + | {{ openshift_client_binary }} apply -f - + +- name: Verify that the web console is running + command: > + curl -k https://webconsole.openshift-web-console.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: console_health + until: console_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- name: Remove temp directory + file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no diff --git a/roles/openshift_web_console/tasks/main.yml b/roles/openshift_web_console/tasks/main.yml new file mode 100644 index 000000000..937bebf25 --- /dev/null +++ b/roles/openshift_web_console/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include_tasks: install.yml + when: openshift_web_console_install | default(true) | bool + +- include_tasks: remove.yml + when: not openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_web_console/tasks/remove.yml b/roles/openshift_web_console/tasks/remove.yml new file mode 100644 index 000000000..f0712a993 --- /dev/null +++ b/roles/openshift_web_console/tasks/remove.yml @@ -0,0 +1,5 @@ +--- +- name: Remove openshift-web-console project + oc_project: + name: openshift-web-console + state: absent diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_asset_config.yml new file mode 100644 index 000000000..36e37e35d --- /dev/null +++ b/roles/openshift_web_console/tasks/update_asset_config.yml @@ -0,0 +1,70 @@ +--- +# This task updates asset config values in the webconsole-config config map in +# the openshift-web-console namespace. The values to set are pased in the +# variable `asset_config_edits`, which is an array of objects with `key` and +# `value` properties in the same format as `yedit` module `edits`. Only +# properties passed are updated. +# +# Note that this triggers a redeployment on the console and a brief downtime +# since it uses a `Recreate` strategy. +# +# Example usage: +# +# - include_role: +# name: openshift_web_console +# tasks_from: update_asset_config.yml +# vars: +# asset_config_edits: +# - key: loggingPublicURL +# value: "https://{{ openshift_logging_kibana_hostname }}" +# when: openshift_web_console_install | default(true) | bool + +- name: Read web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: list + register: webconsole_config + +- name: Make temp directory + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp + changed_when: False + become: no + +- name: Copy asset config to temp file + copy: + content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}" + dest: "{{ mktemp.stdout }}/webconsole-config.yaml" + +- name: Change asset config properties + yedit: + src: "{{ mktemp.stdout }}/webconsole-config.yaml" + edits: "{{asset_config_edits}}" + +- name: Update web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: present + from_file: + webconsole-config.yaml: "{{ mktemp.stdout }}/webconsole-config.yaml" + +- name: Remove temp directory + file: + state: absent + name: "{{ mktemp.stdout }}" + changed_when: False + become: no + +# There's currently no command to trigger a rollout for a k8s deployment +# without changing the pod spec. Add an annotation to force a rollout after +# the config map has been edited. +- name: Rollout updated web console deployment + oc_edit: + kind: deployments + name: webconsole + namespace: openshift-web-console + separator: '#' + content: + spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml new file mode 100644 index 000000000..7adb8a0d0 --- /dev/null +++ b/roles/openshift_web_console/vars/default_images.yml @@ -0,0 +1,4 @@ +--- +__openshift_web_console_prefix: "docker.io/openshift/" +__openshift_web_console_version: "latest" +__openshift_web_console_image_name: "origin-web-console" diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml new file mode 100644 index 000000000..80bc56a17 --- /dev/null +++ b/roles/openshift_web_console/vars/main.yml @@ -0,0 +1,5 @@ +--- +__console_files_location: "../../../files/origin-components/" + +__console_template_file: "console-template.yaml" +__console_config_file: "console-config.yaml" diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml new file mode 100644 index 000000000..721ac1d27 --- /dev/null +++ b/roles/openshift_web_console/vars/openshift-enterprise.yml @@ -0,0 +1,4 @@ +--- +__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/" +__openshift_web_console_version: "v3.9" +__openshift_web_console_image_name: "ose-web-console" diff --git a/test/tox-inventory.txt b/test/tox-inventory.txt index 6e57d224b..ed9e946ab 100644 --- a/test/tox-inventory.txt +++ b/test/tox-inventory.txt @@ -13,6 +13,7 @@ oo_first_etcd oo_etcd_hosts_to_backup oo_etcd_hosts_to_upgrade oo_etcd_to_migrate +oo_hosts_containerized_managed_true oo_masters oo_masters_to_config oo_first_master @@ -103,3 +104,6 @@ localhost [glusterfs_registry] localhost + +[oo_hosts_containerized_managed_true] +localhost |