summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_origin.md15
-rw-r--r--filter_plugins/openshift_master.py4
-rw-r--r--inventory/byo/hosts.example9
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml5
-rw-r--r--playbooks/adhoc/uninstall.yml37
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml8
-rw-r--r--playbooks/byo/openshift_facts.yml3
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml7
-rw-r--r--playbooks/common/openshift-master/config.yml16
-rw-r--r--roles/ansible/tasks/main.yml7
-rw-r--r--roles/cockpit/tasks/main.yml12
-rw-r--r--roles/copr_cli/tasks/main.yml6
-rw-r--r--roles/docker/tasks/main.yml5
-rw-r--r--roles/etcd/README.md2
-rw-r--r--roles/etcd/tasks/main.yml5
-rw-r--r--roles/flannel/README.md3
-rw-r--r--roles/flannel/tasks/main.yml6
-rw-r--r--roles/fluentd_master/tasks/main.yml7
-rw-r--r--roles/fluentd_node/tasks/main.yml7
-rw-r--r--roles/haproxy/tasks/main.yml7
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml5
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml5
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml10
-rw-r--r--roles/openshift_cluster_metrics/tasks/main.yml6
-rw-r--r--roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml4
-rw-r--r--roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml4
-rw-r--r--roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml4
-rw-r--r--roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml4
-rw-r--r--roles/openshift_expand_partition/README.md2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml5
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py8
-rw-r--r--roles/openshift_facts/tasks/main.yml7
-rw-r--r--roles/openshift_master/tasks/main.yml28
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j22
-rw-r--r--roles/openshift_master_ca/tasks/main.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml20
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml7
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml9
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo8
-rw-r--r--roles/openshift_repos/handlers/main.yml6
-rw-r--r--roles/openshift_repos/tasks/main.yaml37
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml6
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml5
-rw-r--r--roles/os_env_extras/tasks/main.yaml7
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml8
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml11
-rw-r--r--roles/os_update_latest/tasks/main.yml5
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml104
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml31
-rw-r--r--roles/yum_repos/README.md2
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh9
-rw-r--r--utils/src/ooinstall/cli_installer.py210
-rw-r--r--utils/src/ooinstall/oo_config.py30
-rw-r--r--utils/src/ooinstall/openshift_ansible.py37
-rw-r--r--utils/test/cli_installer_tests.py441
-rw-r--r--utils/test/fixture.py221
59 files changed, 1064 insertions, 434 deletions
diff --git a/README_origin.md b/README_origin.md
index cb213a93a..343ecda3d 100644
--- a/README_origin.md
+++ b/README_origin.md
@@ -39,6 +39,12 @@ subscription-manager repos \
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
+* Fedora 23+ doesn't come with python2 and will need a quick bootstrap. Setup
+ your inventory as described below and run the following (substituting the
+ `$PATH_TO_INVENTORY_FILE` with the actual path to your inventory file):
+```sh
+ansible-playbook ./playbooks/adhoc/bootstrap-fedora.yml -i $PATH_TO_INVENTORY_FILE
+```
## Configuring the host inventory
[Ansible docs](http://docs.ansible.com/intro_inventory.html)
@@ -59,6 +65,7 @@ nodes
# Set variables common for all OSEv3 hosts
[OSv3:vars]
+
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=root
@@ -75,6 +82,14 @@ osv3-master.example.com
[nodes]
osv3-master.example.com
osv3-node[1:2].example.com
+
+# host group for etcd
+[etcd]
+osv3-etcd[1:3].example.com
+
+[lb]
+osv3-lb.example.com
+
```
The hostnames above should resolve both from the hosts themselves and
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 76fe610a0..f12017967 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -290,8 +290,8 @@ class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
- self._required += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
- self._optional += [['key']]
+ self._required += [['url']]
+ self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class IdentityProviderOauthBase(IdentityProviderBase):
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index ef0736b63..799725a37 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -36,6 +36,10 @@ deployment_type=atomic-enterprise
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+# Origin Fedora copr repo
+# Use this if you are installing on Fedora
+#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}]
+
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
@@ -154,6 +158,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# entries for the interfaces attached to the host.
#openshift_set_hostname=True
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
@@ -166,7 +173,7 @@ ose3-lb-ansible.test.example.com
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
[nodes]
ose3-master[1:3]-ansible.test.example.com
ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
new file mode 100644
index 000000000..de9f36c8a
--- /dev/null
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -0,0 +1,5 @@
+- hosts: OSv3
+ gather_facts: false
+ tasks:
+ - name: install python and deps for ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 538414508..08a2ea6fb 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -48,7 +48,39 @@
- pcsd
- yum: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: ansible_pkg_mgr == "yum" and not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - corosync
+ - etcd
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - pacemaker
+ - pcs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - dnf: name={{ item }} state=absent
+ when: ansible_pkg_mgr == "dnf" and not is_atomic | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
@@ -181,5 +213,8 @@
- name: Reload systemd manager configuration
command: systemctl daemon-reload
+- hosts: nodes
+ sudo: yes
+ tasks:
- name: restart docker
service: name=docker state=restarted
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 15e775770..99f0577fc 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -33,25 +33,25 @@
when: ec2_assign_public_ip is not defined
- set_fact:
- ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type))) }}"
+ ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_master_security_groups
| default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "master" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type))) }}"
+ ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_etcd_security_groups
| default(deployment_vars[deployment_type].security_groups, true)}}"
when: host_type == "etcd" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type))) }}"
+ ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_infra_security_groups
| default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "infra"
- set_fact:
- ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type))) }}"
+ ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_node_security_groups
| default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "compute"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 6d7c12fd4..babdfb952 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,7 +1,6 @@
---
- name: Gather Cluster facts
- hosts: all
- gather_facts: no
+ hosts: OSEv3
roles:
- openshift_facts
tasks:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index a8bd634d3..482fa8441 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -6,6 +6,3 @@
- include: ../openshift-master/config.yml
- include: ../openshift-node/config.yml
- vars:
- osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
index f90719cab..c7c966b60 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
@@ -2,9 +2,9 @@
yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
-yum_available=$(yum list available "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+yum_available=$(yum list available -q "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
echo "---"
-echo "curr_version: ${yum_installed}"
+echo "curr_version: ${yum_installed}"
echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index eea147229..00ebf4ce6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -517,24 +517,28 @@
- _default_router.rc == 0
- "'false' in _scc.stdout"
command: >
- {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ {{ oc_cmd }} patch scc/privileged -p
+ '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
- name: Update deployment config to 1.0.4/3.0.1 spec
when: _default_router.rc == 0
command: >
{{ oc_cmd }} patch dc/router -p
'{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ --api-version=v1
- name: Switch to hostNetwork=true
when: _default_router.rc == 0
command: >
{{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ --api-version=v1
- name: Update router image to current version
when: _default_router.rc == 0
command: >
{{ oc_cmd }} patch dc/router -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
- name: Check for default registry
command: >
@@ -548,3 +552,4 @@
command: >
{{ oc_cmd }} patch dc/docker-registry -p
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7bdaca2c9..becd68dbe 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -244,6 +244,8 @@
- fail:
msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
+ - name: Install OpenSSL package
+ action: "{{ansible_pkg_mgr}} pkg=openssl state=present"
- name: Generate session authentication key
command: /usr/bin/openssl rand -base64 24
register: session_auth_output
@@ -353,20 +355,6 @@
- role: openshift_manageiq
when: openshift.common.use_manageiq | bool
-- name: Determine cluster dns ip
- hosts: oo_first_master
- tasks:
- - name: Get master service ip
- command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}"
- register: master_service_ip_output
- when: openshift.common.version_greater_than_3_1_or_1_1 | bool
- - set_fact:
- cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
- when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
- - set_fact:
- cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
- when: openshift.common.version_greater_than_3_1_or_1_1 | bool
-
- name: Enable cockpit
hosts: oo_first_master
vars:
diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml
index 5d20a3b35..f79273824 100644
--- a/roles/ansible/tasks/main.yml
+++ b/roles/ansible/tasks/main.yml
@@ -5,6 +5,13 @@
yum:
pkg: ansible
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install Ansible
+ dnf:
+ pkg: ansible
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
- include: config.yml
vars:
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 875cbad21..8410e7c90 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -8,6 +8,18 @@
- cockpit-shell
- cockpit-bridge
- "{{ cockpit_plugins }}"
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install cockpit-ws
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - cockpit-ws
+ - cockpit-shell
+ - cockpit-bridge
+ - "{{ cockpit_plugins }}"
+ when: ansible_pkg_mgr == "dnf"
- name: Enable cockpit-ws
service:
diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml
index f7ef1c26e..f8496199d 100644
--- a/roles/copr_cli/tasks/main.yml
+++ b/roles/copr_cli/tasks/main.yml
@@ -2,3 +2,9 @@
- yum:
name: copr-cli
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- dnf:
+ name: copr-cli
+ state: present
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 96949230d..dd4401389 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -2,6 +2,11 @@
# tasks file for docker
- name: Install docker
yum: pkg=docker
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install docker
+ dnf: pkg=docker
+ when: ansible_pkg_mgr == "dnf"
- name: enable and start the docker service
service: name=docker enabled=yes state=started
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
index 88e4ff874..329a926c0 100644
--- a/roles/etcd/README.md
+++ b/roles/etcd/README.md
@@ -7,7 +7,7 @@ Requirements
------------
This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'etcd' available via yum.
+named 'etcd' available via yum or dnf (conditionally).
Role Variables
--------------
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fcbdecd37..efaab5f31 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -9,6 +9,11 @@
- name: Install etcd
yum: pkg=etcd-2.* state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install etcd
+ dnf: pkg=etcd* state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Validate permissions on the config dir
file:
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index b8aa830ac..8f271aada 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -7,7 +7,8 @@ Requirements
------------
This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'flannel' available via yum, in version superior to 0.3.
+named 'flannel' available via yum or dnf (conditionally), in version superior
+to 0.3.
Role Variables
--------------
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index acfb009ec..86e1bc96e 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,6 +2,12 @@
- name: Install flannel
sudo: true
yum: pkg=flannel state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install flannel
+ sudo: true
+ dnf: pkg=flannel state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Set flannel etcd url
sudo: true
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index 55cd94460..43c499b4d 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -4,6 +4,13 @@
yum:
name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: download and install td-agent
+ dnf:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
index f9ef30b83..827a1c075 100644
--- a/roles/fluentd_node/tasks/main.yml
+++ b/roles/fluentd_node/tasks/main.yml
@@ -4,6 +4,13 @@
yum:
name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: download and install td-agent
+ dnf:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 5638b7313..5d015fadd 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -3,6 +3,13 @@
yum:
pkg: haproxy
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install haproxy
+ dnf:
+ pkg: haproxy
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Configure haproxy
template:
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index d1dcf261a..3fcb9fd18 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -1,6 +1,11 @@
---
- name: Install pyparted (RedHat/Fedora)
yum: name=pyparted,python-httplib2 state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install pyparted (RedHat/Fedora)
+ dnf: name=pyparted,python-httplib2 state=present
+ when: ansible_pkg_mgr == "dnf"
- name: partition the drives
partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
index 559fcf17c..a58a7b824 100644
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -1,6 +1,11 @@
---
- name: Install NFS server on Fedora/Red Hat
yum: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install NFS server on Fedora/Red Hat
+ dnf: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Start rpcbind on Fedora/Red Hat
service: name=rpcbind state=started enabled=yes
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index f6919dada..2b99f8bcd 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -2,6 +2,16 @@
- yum:
name: "{{ item }}"
state: present
+ when: ansible_pkg_mgr == "yum"
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
+
+- dnf:
+ name: "{{ item }}"
+ state: present
+ when: ansible_pkg_mgr == "dnf"
with_items:
- openshift-ansible-inventory
- openshift-ansible-inventory-aws
diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml
index 3938aba4c..9b7735e54 100644
--- a/roles/openshift_cluster_metrics/tasks/main.yml
+++ b/roles/openshift_cluster_metrics/tasks/main.yml
@@ -7,7 +7,7 @@
- name: Create InfluxDB Services
command: >
- {{ openshift.common.client_binary }} create -f
+ {{ openshift.common.client_binary }} create -f
/etc/openshift/cluster-metrics/influxdb.yaml
register: oex_influxdb_services
failed_when: "'already exists' not in oex_influxdb_services.stderr and oex_influxdb_services.rc != 0"
@@ -15,14 +15,14 @@
- name: Create Heapster Service Account
command: >
- {{ openshift.common.client_binary }} create -f
+ {{ openshift.common.client_binary }} create -f
/etc/openshift/cluster-metrics/heapster-serviceaccount.yaml
register: oex_heapster_serviceaccount
failed_when: "'already exists' not in oex_heapster_serviceaccount.stderr and oex_heapster_serviceaccount.rc != 0"
changed_when: false
- name: Add cluster-reader role to Heapster
- command: >
+ command: >
{{ openshift.common.admin_binary }} policy
add-cluster-role-to-user
cluster-reader
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml
index d823b2587..ddd9f2f75 100644
--- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -81,11 +81,11 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "hawkular/"
+ value: "registry.access.redhat.com/openshift3/"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
+ value: "3.1.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml
index d823b2587..3e9bcde5b 100644
--- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml
@@ -81,11 +81,11 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "hawkular/"
+ value: "docker.io/openshift/origin-"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
+ value: "latest"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml
index d823b2587..ddd9f2f75 100644
--- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -81,11 +81,11 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "hawkular/"
+ value: "registry.access.redhat.com/openshift3/"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
+ value: "3.1.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml
index d823b2587..3e9bcde5b 100644
--- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml
@@ -81,11 +81,11 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "hawkular/"
+ value: "docker.io/openshift/origin-"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
+ value: "latest"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index cd394e1ba..aed4ec871 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -8,7 +8,7 @@ partition, and then expanding the file system on the partition.
* A machine with a disk that is not fully utilized
-* cloud-utils-growpart rpm (either installed or avialable via yum)
+* cloud-utils-growpart rpm (either installed or avialable via yum or dnf)
* The partition you are expanding needs to be at the end of the partition list
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 8bc399070..42e7903fd 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -1,6 +1,11 @@
---
- name: Ensure growpart is installed
yum: pkg=cloud-utils-growpart state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Ensure growpart is installed
+ dnf: pkg=cloud-utils-growpart state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Grow the partitions
command: "growpart {{oep_drive}} {{oep_partition}}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0100aa54f..085d59b71 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -528,9 +528,9 @@ def set_aggregate_facts(facts):
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
+ cluster_domain = facts['common']['dns_domain']
+
if 'master' in facts:
- # FIXME: not sure why but facts['dns']['domain'] fails
- cluster_domain = 'cluster.local'
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
@@ -985,7 +985,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd']
def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
@@ -1056,6 +1056,7 @@ class OpenShiftFacts(object):
public_hostname=hostname, use_manageiq=False)
common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
+ common['dns_domain'] = 'cluster.local'
defaults['common'] = common
if 'master' in roles:
@@ -1076,7 +1077,6 @@ class OpenShiftFacts(object):
node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
iptables_sync_period='5s', set_node_ip=False)
defaults['node'] = node
-
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 913f0dc78..2e889d7d5 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -8,6 +8,13 @@
- name: Ensure PyYaml is installed
yum: pkg={{ item }} state=installed
+ when: ansible_pkg_mgr == "yum"
+ with_items:
+ - PyYAML
+
+- name: Ensure PyYaml is installed
+ dnf: pkg={{ item }} state=installed
+ when: ansible_pkg_mgr == "dnf"
with_items:
- PyYAML
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 2cf2a53c4..8a78f8f2a 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -79,16 +79,16 @@
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
register: install_result
-# TODO: These values need to be configurable
-- name: Set dns facts
+- name: Install Master package
+ dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
+ register: install_result
+
+- name: Re-gather package dependent master facts
openshift_facts:
- role: dns
- local_facts:
- ip: "{{ openshift_master_cluster_vip | default(openshift.common.ip, true) | default(None) }}"
- domain: cluster.local
- when: openshift.master.embedded_dns
- name: Create config parent directory if it does not exist
file:
@@ -118,7 +118,12 @@
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
- when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider')
+ with_items: openshift.master.identity_providers
+
+- name: Install httpd-tools if needed
+ dnf: pkg=httpd-tools state=present
+ when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider')
with_items: openshift.master.identity_providers
- name: Ensure htpasswd directory exists
@@ -263,7 +268,12 @@
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+ when: (ansible_pkg_mgr == "yum") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+ register: install_result
+
+- name: Install cluster packages
+ dnf: pkg=pcs state=present
+ when: (ansible_pkg_mgr == "dnf") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 9f4a17f0a..cadb02fa3 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -83,7 +83,7 @@ kubernetesMasterConfig:
{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
- masterCount: {{ openshift.master.master_count }}
+ masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
proxyClientInfo:
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 314f068e7..caac13be3 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -1,6 +1,12 @@
---
- name: Install the base package for admin tooling
yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install the base package for admin tooling
+ dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Reload generated facts
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 42d984a09..29e7eb532 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,12 +1,6 @@
---
# TODO: allow for overriding default ports where possible
- fail:
- msg: This role requres that osn_cluster_dns_domain is set
- when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain
-- fail:
- msg: This role requres that osn_cluster_dns_ip is set
- when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
-- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
@@ -20,6 +14,7 @@
hostname: "{{ openshift_hostname | default(none) }}"
public_hostname: "{{ openshift_public_hostname | default(none) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip | default(None, true), true) }}"
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -40,12 +35,23 @@
# problems because the rpms don't pin the version properly.
- name: Install Node package
yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: node_install_result
+
+- name: Install Node package
+ dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: node_install_result
- name: Install sdn-ovs package
yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
register: sdn_install_result
- when: openshift.common.use_openshift_sdn
+ when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn
+
+- name: Install sdn-ovs package
+ dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
+ register: sdn_install_result
+ when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index b6936618a..b5146dcac 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -3,3 +3,10 @@
yum:
pkg: ceph-common
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install Ceph storage plugin dependencies
+ dnf:
+ pkg: ceph-common
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 5cd4a6041..a357023e1 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -3,6 +3,13 @@
yum:
pkg: glusterfs-fuse
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install GlusterFS storage plugin dependencies
+ dnf:
+ pkg: glusterfs-fuse
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
- name: Set sebooleans to allow gluster storage plugin access from containers
seboolean:
@@ -14,4 +21,4 @@
- virt_use_fusefs
- virt_sandbox_use_fusefs
register: sebool_result
- failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean item does not exist' not in sebool_result.msg"
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 41a303dee..23bd81f91 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,7 +1,9 @@
allowDisabledDocker: false
apiVersion: v1
-dnsDomain: {{ osn_cluster_dns_domain }}
-dnsIP: {{ osn_cluster_dns_ip }}
+dnsDomain: {{ openshift.common.dns_domain }}
+{% if 'dns_ip' in openshift.common %}
+dnsIP: {{ openshift.common.dns_ip }}
+{% endif %}
dockerConfig:
execHandlerName: ""
iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
new file mode 100644
index 000000000..bc0435d82
--- /dev/null
+++ b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
@@ -0,0 +1,8 @@
+[maxamillion-fedora-openshift]
+name=Copr repo for fedora-openshift owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
+enabled=1
+enabled_metadata=1 \ No newline at end of file
diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml
new file mode 100644
index 000000000..fed4ab2f0
--- /dev/null
+++ b/roles/openshift_repos/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: refresh yum cache
+ command: yum clean all
+
+- name: refresh dnf cache
+ command: dnf clean all
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index aa696ae12..c55b5df89 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -14,33 +14,64 @@
yum:
pkg: libselinux-python
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Ensure libselinux-python is installed
+ dnf:
+ pkg: libselinux-python
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Create any additional repos that are defined
template:
src: yum_repo.j2
dest: /etc/yum.repos.d/openshift_additional.repo
when: openshift_additional_repos | length > 0
+ notify: refresh yum cache
- name: Remove the additional repos if no longer defined
file:
dest: /etc/yum.repos.d/openshift_additional.repo
state: absent
when: openshift_additional_repos | length == 0
+ notify: refresh yum cache
-- name: Remove any yum repo files for other deployment types
+- name: Remove any yum repo files for other deployment types RHEL/CentOS
file:
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- '*/repos/*'
- when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and
+ (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+ notify: refresh yum cache
+
+- name: Remove any yum repo files for other deployment types Fedora
+ file:
+ path: "/etc/yum.repos.d/{{ item | basename }}"
+ state: absent
+ with_fileglob:
+ - '*/repos/*'
+ when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and
+ (ansible_distribution == "Fedora")
+ notify: refresh dnf cache
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- "{{ openshift_deployment_type }}/gpg_keys/*"
+ notify: refresh yum cache
-- name: Configure yum repositories
+- name: Configure yum repositories RHEL/CentOS
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- "{{ openshift_deployment_type }}/repos/*"
+ notify: refresh yum cache
+ when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+
+- name: Configure yum repositories Fedora
+ copy: src={{ item }} dest=/etc/yum.repos.d/
+ with_fileglob:
+ - "fedora-{{ openshift_deployment_type }}/repos/*"
+ notify: refresh dnf cache
+ when: (ansible_distribution == "Fedora")
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index d93a25a21..e558a83a2 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -13,7 +13,9 @@
changed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc == 0"
- name: Get current security context constraints
- shell: "{{ openshift.common.client_binary }} get scc privileged -o yaml > /tmp/scc.yaml"
+ shell: >
+ {{ openshift.common.client_binary }} get scc privileged -o yaml
+ --output-version=v1 > /tmp/scc.yaml
- name: Add security context constraint for {{ item }}
lineinfile:
@@ -23,4 +25,4 @@
with_items: accounts
- name: Apply new scc rules for service accounts
- command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml"
+ command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml --api-version=v1"
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index 65ae069df..bf23dfe98 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -1,6 +1,11 @@
---
- name: Install NFS server
yum: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install NFS server
+ dnf: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Start rpcbind
service: name=rpcbind state=started enabled=yes
diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml
index 96b12ad5b..29599559c 100644
--- a/roles/os_env_extras/tasks/main.yaml
+++ b/roles/os_env_extras/tasks/main.yaml
@@ -15,3 +15,10 @@
yum:
pkg: bash-completion
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Bash Completion
+ dnf:
+ pkg: bash-completion
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 5089eb3e0..cf2a2c733 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -3,6 +3,14 @@
yum:
name: firewalld
state: present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install firewalld packages
+ dnf:
+ name: firewalld
+ state: present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Check if iptables-services is installed
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 9af9d8d29..36d51504c 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -6,6 +6,17 @@
with_items:
- iptables
- iptables-services
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install iptables packages
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iptables
+ - iptables-services
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Check if firewalld is installed
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
index 4a2c3d47a..40eec8d35 100644
--- a/roles/os_update_latest/tasks/main.yml
+++ b/roles/os_update_latest/tasks/main.yml
@@ -1,3 +1,8 @@
---
- name: Update all packages
yum: name=* state=latest
+ when: ansible_pkg_mgr == "yum"
+
+- name: Update all packages
+ dnf: name=* state=latest
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 8236cf135..514d6fd24 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -7,6 +7,12 @@ g_template_openshift_master:
- Openshift Master
key: create_app
+ - key: openshift.master.registry.healthz
+ description: "Shows the health status of the cluster's docker registry"
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.process.count
description: Shows number of master processes running
type: int
@@ -62,6 +68,36 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.pv.total.count
+ description: Total number of Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.available.count
+ description: Total number of Available Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.released.count
+ description: Total number of Released Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.bound.count
+ description: Total number of Bound Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.failed.count
+ description: Total number of Failed Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.etcd.create.success
description: Show number of successful create actions
type: int
@@ -195,26 +231,6 @@ g_template_openshift_master:
- Openshift Master Metrics
ztriggers:
- - name: 'Application creation has failed on {HOST.NAME}'
- expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- priority: avg
-
- - name: 'Openshift Master API health check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Openshift Master API PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Openshift Master metric PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: avg
-
- name: 'Openshift Master process not running on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
@@ -225,6 +241,16 @@ g_template_openshift_master:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: high
+ - name: 'Low number of etcd watchers on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: avg
+
+ - name: 'Etcd ping failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: high
+
- name: 'Number of users for Openshift Master on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.user.count.last()}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
@@ -235,14 +261,40 @@ g_template_openshift_master:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: info
- - name: 'Low number of etcd watchers on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ # Put triggers that depend on other triggers here (deps must be created first)
+ - name: 'Application creation has failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: avg
- - name: 'Etcd ping failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ - name: 'Openshift Master API health check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'Openshift Master API PING check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'Openshift Master metric PING check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: avg
+
+ - name: 'Docker Registry check failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.registry.healthz.max(#2)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: high
zgraphs:
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 79d52ef9b..778e4341d 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -258,26 +258,31 @@ g_template_os_linux:
- Network
ztriggerprototypes:
- - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: warn
-
- name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
- - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ # This has a dependency on the previous trigger
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: warn
+ dependencies:
+ - 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
- name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: warn
+ dependencies:
+ - 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
+
ztriggers:
- name: 'Too many TOTAL processes on {HOST.NAME}'
expression: '{Template OS Linux:proc.nprocs.last()}>5000'
@@ -304,15 +309,3 @@ g_template_os_linux:
description: 'CPU is less than 10% idle'
dependencies:
- 'CPU idle less than 5% on {HOST.NAME}'
-
- zgraphprototypes:
- - name: Network Interface Usage
- width: 1000
- height: 400
- graph_items:
- - item_name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
- item_type: prototype
- color: red
- - item_name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
- item_type: prototype
- color: blue
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
index 51ecd5d34..908ab4972 100644
--- a/roles/yum_repos/README.md
+++ b/roles/yum_repos/README.md
@@ -6,7 +6,7 @@ This role allows easy deployment of yum repository config files.
Requirements
------------
-Yum
+Yum or dnf
Role Variables
--------------
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
index e1b2cec90..3847c029a 100755
--- a/utils/site_assets/oo-install-bootstrap.sh
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -9,6 +9,13 @@ cmdlnargs="$@"
: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
+if rpm -q dnf;
+then
+ PKG_MGR="dnf"
+else
+ PKG_MGR="yum"
+fi
+
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
clear
@@ -18,7 +25,7 @@ if [ -e /etc/redhat-release ]
then
for i in python python-virtualenv openssh-clients gcc
do
- rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; }
+ rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; }
done
fi
for i in python virtualenv ssh gcc
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index d7c06745e..8cabe5431 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -79,21 +79,32 @@ def collect_hosts(version=None, masters_set=False, print_summary=True):
Returns: a list of host information collected from the user
"""
- min_masters_for_ha = 3
click.clear()
- click.echo('***Host Configuration***')
+ click.echo('*** Host Configuration ***')
message = """
-The OpenShift Master serves the API and web console. It also coordinates the
-jobs that have to run across the environment. It can even run the datastore.
-For wizard based installations the database will be embedded. It's possible to
-change this later using etcd from Red Hat Enterprise Linux 7.
+You must now specify the hosts that will compose your OpenShift cluster.
+
+Please enter an IP or hostname to connect to for each system in the cluster.
+You will then be prompted to identify what role you would like this system to
+serve in the cluster.
+
+OpenShift Masters serve the API and web console and coordinate the jobs to run
+across the environment. If desired you can specify multiple Master systems for
+an HA deployment, in which case you will be prompted to identify a *separate*
+system to act as the load balancer for your cluster after all Masters and Nodes
+are defined.
+
+If only one Master is specified, an etcd instance embedded within the OpenShift
+Master service will be used as the datastore. This can be later replaced with a
+separate etcd instance if desired. If multiple Masters are specified, a
+separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
-from the API. By default this Node will be unscheduleable but this can be changed
+from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
-The OpenShift Node provides the runtime environments for containers. It will
+OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
@@ -106,8 +117,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
num_masters = 0
while more_hosts:
host_props = {}
- host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
- default='',
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
@@ -115,7 +125,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
host_props['master'] = True
num_masters += 1
- if num_masters >= min_masters_for_ha or version == '3.0':
+ if version == '3.0':
masters_set = True
host_props['node'] = True
@@ -134,31 +144,111 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts.append(host)
if print_summary:
- click.echo('')
- click.echo('Current Masters: {}'.format(num_masters))
- click.echo('Current Nodes: {}'.format(len(hosts)))
- click.echo('Additional Masters required for HA: {}'.format(max(min_masters_for_ha - num_masters, 0)))
- click.echo('')
+ print_installation_summary(hosts)
- if num_masters <= 1 or num_masters >= min_masters_for_ha:
+ # If we have one master, this is enough for an all-in-one deployment,
+ # thus we can start asking if you wish to proceed. Otherwise we assume
+ # you must.
+ if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
- if num_masters > 1:
- hosts.append(collect_master_lb())
+ if num_masters >= 3:
+ collect_master_lb(hosts)
return hosts
-def collect_master_lb():
+
+def print_installation_summary(hosts):
+ """
+ Displays a summary of all hosts configured thus far, and what role each
+ will play.
+
+ Shows total nodes/masters, hints for performing/modifying the deployment
+ with additional setup, warnings for invalid or sub-optimal configurations.
+ """
+ click.clear()
+ click.echo('*** Installation Summary ***\n')
+ click.echo('Hosts:')
+ for host in hosts:
+ print_host_summary(hosts, host)
+
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ dedicated_nodes = [host for host in hosts if host.node and not host.master]
+ click.echo('')
+ click.echo('Total OpenShift Masters: %s' % len(masters))
+ click.echo('Total OpenShift Nodes: %s' % len(nodes))
+
+ if len(masters) == 1:
+ ha_hint_message = """
+NOTE: Add a total of 3 or more Masters to perform an HA installation."""
+ click.echo(ha_hint_message)
+ elif len(masters) == 2:
+ min_masters_message = """
+WARNING: A minimum of 3 masters are required to perform an HA installation.
+Please add one more to proceed."""
+ click.echo(min_masters_message)
+ elif len(masters) >= 3:
+ ha_message = """
+NOTE: Multiple Masters specified, this will be an HA deployment with a separate
+etcd cluster. You will be prompted to provide the FQDN of a load balancer once
+finished entering hosts."""
+ click.echo(ha_message)
+
+ dedicated_nodes_message = """
+WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
+Nodes are specified, each configured Master will be marked as a schedulable
+Node."""
+
+ min_ha_nodes_message = """
+WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
+deployment."""
+ if len(dedicated_nodes) == 0:
+ click.echo(dedicated_nodes_message)
+ elif len(dedicated_nodes) < 3:
+ click.echo(min_ha_nodes_message)
+
+ click.echo('')
+
+
+def print_host_summary(all_hosts, host):
+ click.echo("- %s" % host.connect_to)
+ if host.master:
+ click.echo(" - OpenShift Master")
+ if host.node:
+ if host.is_dedicated_node():
+ click.echo(" - OpenShift Node (Dedicated)")
+ elif host.is_schedulable_node(all_hosts):
+ click.echo(" - OpenShift Node")
+ else:
+ click.echo(" - OpenShift Node (Unscheduled)")
+ if host.master_lb:
+ if host.preconfigured:
+ click.echo(" - Load Balancer (Preconfigured)")
+ else:
+ click.echo(" - Load Balancer (HAProxy)")
+ if host.master:
+ if host.is_etcd_member(all_hosts):
+ click.echo(" - Etcd Member")
+ else:
+ click.echo(" - Etcd (Embedded)")
+
+
+def collect_master_lb(hosts):
"""
- Get an HA proxy from the user
+ Get a valid load balancer from the user and append it to the list of
+ hosts.
+
+ Ensure user does not specify a system already used as a master/node as
+ this is an invalid configuration.
"""
message = """
Setting up High Availability Masters requires a load balancing solution.
-Please provide a host that will be configured as a proxy. This can either be
-an existing load balancer configured to balance all masters on port 8443 or a
-new host that will have HAProxy installed on it.
+Please provide a the FQDN of a host that will be configured as a proxy. This
+can be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that will have HAProxy installed on it.
-If the host provided does is not yet configured a reference haproxy load
+If the host provided does is not yet configured, a reference haproxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
@@ -166,17 +256,28 @@ hostname.
"""
click.echo(message)
host_props = {}
- host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
- default='',
- value_proc=validate_prompt_hostname)
+
+ # Using an embedded function here so we have access to the hosts list:
+ def validate_prompt_lb(hostname):
+ # Run the standard hostname check first:
+ hostname = validate_prompt_hostname(hostname)
+
+ # Make sure this host wasn't already specified:
+ for host in hosts:
+ if host.connect_to == hostname and (host.master or host.node):
+ raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
+ 'please specify a separate host' % hostname)
+ return hostname
+
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_lb)
install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['master'] = False
host_props['node'] = False
host_props['master_lb'] = True
master_lb = Host(**host_props)
-
- return master_lb
+ hosts.append(master_lb)
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
@@ -249,35 +350,44 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended
-def check_hosts_config(oo_cfg):
+def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.hosts if host.master]
+
+ if len(masters) == 2:
+ click.echo("A minimum of 3 Masters are required for HA deployments.")
+ sys.exit(1)
+
if len(masters) > 1:
master_lb = [host for host in oo_cfg.hosts if host.master_lb]
if len(master_lb) > 1:
- click.echo('More than one Master load balancer specified. Only one is allowed.')
- sys.exit(0)
+ click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
+ sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].master or master_lb[0].node:
- click.echo('The Master load balancer is configured as a master or node. Please correct this.')
- sys.exit(0)
+ click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
+ sys.exit(1)
else:
message = """
-No HAProxy given in config. Either specify one or provide a load balancing solution
-of your choice to balance the master API (port 8443) on all master hosts.
+ERROR: No master load balancer specified in config. You must provide the FQDN
+of a load balancer to balance the API (port 8443) on all Master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
- confirm_continue(message)
+ click.echo(message)
+ sys.exit(1)
- nodes = [host for host in oo_cfg.hosts if host.node]
- if len(masters) == len(nodes):
+ dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
+ if len(dedicated_nodes) == 0:
message = """
-No dedicated Nodes specified. By default, colocated Masters have their Nodes
-set to unscheduleable. Continuing at this point will label all nodes as
-scheduleable.
+WARNING: No dedicated Nodes specified. By default, colocated Masters have
+their Nodes set to unschedulable. If you proceed all nodes will be labelled
+as schedulable.
"""
- confirm_continue(message)
+ if unattended:
+ click.echo(message)
+ else:
+ confirm_continue(message)
return
@@ -301,7 +411,8 @@ def get_variant_and_version(multi_master=False):
return product, version
def confirm_continue(message):
- click.echo(message)
+ if message:
+ click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
@@ -391,7 +502,7 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
def collect_new_nodes():
click.clear()
- click.echo('***New Node Configuration***')
+ click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
@@ -639,9 +750,10 @@ def install(ctx, force):
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
- check_hosts_config(oo_cfg)
+ check_hosts_config(oo_cfg, ctx.obj['unattended'])
click.echo('Gathering information from hosts...')
+ print_installation_summary(oo_cfg.hosts)
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
@@ -666,8 +778,8 @@ def install(ctx, force):
click.echo('Ready to run installation process.')
message = """
-If changes are needed to the values recorded by the installer please update {}.
-""".format(oo_cfg.config_path)
+If changes are needed please edit the config file above and re-run.
+"""
if not ctx.obj['unattended']:
confirm_continue(message)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index b6f0cdce3..1be85bc1d 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -50,8 +50,8 @@ class Host(object):
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
- raise OOConfigInvalidHostError("You must specify either and 'ip' " \
- "or 'hostname' to connect to.")
+ raise OOConfigInvalidHostError("You must specify either an ip " \
+ "or hostname as 'connect_to'")
if self.master is False and self.node is False and self.master_lb is False:
raise OOConfigInvalidHostError(
@@ -73,6 +73,32 @@ class Host(object):
d[prop] = getattr(self, prop)
return d
+ def is_etcd_member(self, all_hosts):
+ """ Will this host be a member of a standalone etcd cluster. """
+ if not self.master:
+ return False
+ masters = [host for host in all_hosts if host.master]
+ if len(masters) > 1:
+ return True
+ return False
+
+ def is_dedicated_node(self):
+ """ Will this host be a dedicated node. (not a master) """
+ return self.node and not self.master
+
+ def is_schedulable_node(self, all_hosts):
+ """ Will this host be a node marked as schedulable. """
+ if not self.node:
+ return False
+ if not self.master:
+ return True
+
+ masters = [host for host in all_hosts if host.master]
+ nodes = [host for host in all_hosts if host.node]
+ if len(masters) == len(nodes):
+ return True
+ return False
+
class OOConfig(object):
default_dir = os.path.normpath(
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 9afc9a644..17196a813 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -58,19 +58,14 @@ def generate_inventory(hosts):
base_inventory.write('\n[nodes]\n')
- # TODO: It would be much better to calculate the scheduleability elsewhere
- # and store it on the Node object.
- if set(nodes) == set(masters):
- for node in nodes:
- write_host(node, base_inventory)
- else:
- for node in nodes:
- # TODO: Until the Master can run the SDN itself we have to configure the Masters
- # as Nodes too.
- scheduleable = True
- if node in masters:
- scheduleable = False
- write_host(node, base_inventory, scheduleable)
+ for node in nodes:
+ # Let the fact defaults decide if we're not a master:
+ schedulable = None
+
+ # If the node is also a master, we must explicitly set schedulablity:
+ if node.master:
+ schedulable = node.is_schedulable_node(hosts)
+ write_host(node, base_inventory, schedulable)
if not getattr(proxy, 'preconfigured', True):
base_inventory.write('\n[lb]\n')
@@ -106,13 +101,13 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy):
base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
if CFG.settings['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=true\n')
- if multiple_masters:
+ if multiple_masters and proxy is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
-def write_host(host, inventory, scheduleable=True):
+def write_host(host, inventory, schedulable=None):
global CFG
facts = ''
@@ -126,8 +121,16 @@ def write_host(host, inventory, scheduleable=True):
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
# TODO: For not write_host is handles both master and nodes.
# Technically only nodes will ever need this.
- if not scheduleable:
- facts += ' openshift_scheduleable=False'
+
+ # Distinguish between three states, no schedulability specified (use default),
+ # explicitly set to True, or explicitly set to False:
+ if schedulable is None:
+ pass
+ elif schedulable:
+ facts += ' openshift_schedulable=True'
+ elif not schedulable:
+ facts += ' openshift_schedulable=False'
+
installer_host = socket.gethostname()
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index c951b6580..d028bf472 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -5,12 +5,10 @@
import copy
import os
import ConfigParser
-import yaml
import ooinstall.cli_installer as cli
-from click.testing import CliRunner
-from test.oo_config_tests import OOInstallFixture
+from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml
from mock import patch
@@ -76,8 +74,32 @@ MOCK_FACTS_QUICKHA = {
},
}
-# Substitute in a product name before use:
-SAMPLE_CONFIG = """
+# Missing connect_to on some hosts:
+BAD_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+QUICKHA_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -93,6 +115,7 @@ hosts:
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
+ master: true
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
@@ -100,9 +123,22 @@ hosts:
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ master: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ node: true
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ master_lb: true
"""
-BAD_CONFIG = """
+QUICKHA_2_MASTER_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -113,20 +149,56 @@ hosts:
public_hostname: master.example.com
master: true
node: true
- - ip: 10.0.0.2
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
node: true
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ master_lb: true
+"""
+
+QUICKHA_CONFIG_REUSED_LB = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ master_lb: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ master: true
"""
-QUICKHA_CONFIG = """
+QUICKHA_CONFIG_NO_LB = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -150,116 +222,9 @@ hosts:
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
- - connect_to: 10.0.0.4
- ip: 10.0.0.4
- hostname: proxy-private.example.com
- public_ip: 24.222.0.4
- public_hostname: proxy.example.com
- master_lb: true
+ master: true
"""
-class OOCliFixture(OOInstallFixture):
-
- def setUp(self):
- OOInstallFixture.setUp(self)
- self.runner = CliRunner()
-
- # Add any arguments you would like to test here, the defaults ensure
- # we only do unattended invocations here, and using temporary files/dirs.
- self.cli_args = ["-a", self.work_dir]
-
- def run_cli(self):
- return self.runner.invoke(cli.cli, self.cli_args)
-
- def assert_result(self, result, exit_code):
- if result.exception is not None or result.exit_code != exit_code:
- print "Unexpected result from CLI execution"
- print "Exit code: %s" % result.exit_code
- print "Exception: %s" % result.exception
- print result.exc_info
- import traceback
- traceback.print_exception(*result.exc_info)
- print "Output:\n%s" % result.output
- self.fail("Exception during CLI execution")
-
- def _read_yaml(self, config_file_path):
- f = open(config_file_path, 'r')
- config = yaml.safe_load(f.read())
- f.close()
- return config
-
- def _verify_load_facts(self, load_facts_mock):
- """ Check that we ran load facts with expected inputs. """
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
-
- def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
- """ Check that we ran playbook with expected inputs. """
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
- def _verify_config_hosts(self, written_config, host_count):
- print written_config['hosts']
- self.assertEquals(host_count, len(written_config['hosts']))
- for h in written_config['hosts']:
- self.assertTrue('hostname' in h)
- self.assertTrue('public_hostname' in h)
- if 'preconfigured' not in h:
- self.assertTrue(h['node'])
- self.assertTrue('ip' in h)
- self.assertTrue('public_ip' in h)
-
- #pylint: disable=too-many-arguments
- def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
- run_playbook_mock, cli_input,
- exp_hosts_len=None, exp_hosts_to_run_on_len=None,
- force=None):
- """
- Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
- few subtle branches in the logic. The goal with this method is simply
- to handle all the messy stuff here and allow the main test cases to be
- easily read. The basic idea is to modify mock_facts to return a
- version indicating OpenShift is already installed on particular hosts.
- """
- load_facts_mock.return_value = (mock_facts, 0)
- run_playbook_mock.return_value = 0
-
- if cli_input:
- self.cli_args.append("install")
- result = self.runner.invoke(cli.cli,
- self.cli_args,
- input=cli_input)
- else:
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
-
- self.cli_args.extend(["-c", config_file, "install"])
- if force:
- self.cli_args.append("--force")
- result = self.runner.invoke(cli.cli, self.cli_args)
- written_config = self._read_yaml(config_file)
- self._verify_config_hosts(written_config, exp_hosts_len)
-
- self.assert_result(result, 0)
- self._verify_load_facts(load_facts_mock)
- self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
-
- # Make sure we ran on the expected masters and nodes:
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
class UnattendedCliTests(OOCliFixture):
def setUp(self):
@@ -438,7 +403,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# We didn't specify a version so the latest should have been assumed,
@@ -467,7 +432,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# Make sure our older version was preserved:
@@ -569,10 +534,11 @@ class UnattendedCliTests(OOCliFixture):
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
- assert result.exit_code == 1
- assert result.output == "You must specify either and 'ip' or 'hostname' to connect to.\n"
+ self.assertEquals(1, result.exit_code)
+ self.assertTrue("You must specify either an ip or hostname"
+ in result.output)
- #unattended with two masters, one node, and haproxy
+ #unattended with three masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
@@ -586,25 +552,62 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
- # If user running test has rpm installed, this might be set to default:
- self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
- env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
-
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][0]
hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(4, len(hosts))
- self.assertEquals(4, len(hosts_to_run_on))
+ self.assertEquals(5, len(hosts))
+ self.assertEquals(5, len(hosts_to_run_on))
+
+ #unattended with two masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is an invalid config:
+ self.assert_result(result, 1)
+ self.assertTrue("A minimum of 3 Masters are required" in result.output)
+
+ #unattended with three masters, one node, but no load balancer specified:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid input:
+ self.assert_result(result, 1)
+ self.assertTrue('No master load balancer specified in config' in result.output)
+
+ #unattended with three masters, one node, and one of the masters reused as load balancer:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid configuration:
+ self.assert_result(result, 1)
+
class AttendedCliTests(OOCliFixture):
@@ -614,84 +617,13 @@ class AttendedCliTests(OOCliFixture):
self.config_file = os.path.join(self.work_dir, 'config.yml')
self.cli_args.extend(["-c", self.config_file])
- #pylint: disable=too-many-arguments,too-many-branches
- def _build_input(self, ssh_user=None, hosts=None, variant_num=None,
- add_nodes=None, confirm_facts=None, scheduleable_masters_ok=None,
- master_lb=None):
- """
- Builds a CLI input string with newline characters to simulate
- the full run.
- This gives us only one place to update when the input prompts change.
- """
-
- inputs = [
- 'y', # let's proceed
- ]
- if ssh_user:
- inputs.append(ssh_user)
-
- if variant_num:
- inputs.append(str(variant_num)) # Choose variant + version
-
- num_masters = 0
- if hosts:
- i = 0
- min_masters_for_ha = 3
- for (host, is_master) in hosts:
- inputs.append(host)
- if is_master:
- inputs.append('y')
- num_masters += 1
- else:
- inputs.append('n')
- #inputs.append('rpm')
- if i < len(hosts) - 1:
- if num_masters <= 1 or num_masters >= min_masters_for_ha:
- inputs.append('y') # Add more hosts
- else:
- inputs.append('n') # Done adding hosts
- i += 1
-
- if master_lb:
- inputs.append(master_lb[0])
- inputs.append('y' if master_lb[1] else 'n')
-
- # TODO: support option 2, fresh install
- if add_nodes:
- if scheduleable_masters_ok:
- inputs.append('y')
- inputs.append('1') # Add more nodes
- i = 0
- for (host, is_master) in add_nodes:
- inputs.append(host)
- #inputs.append('rpm')
- if i < len(add_nodes) - 1:
- inputs.append('y') # Add more hosts
- else:
- inputs.append('n') # Done adding hosts
- i += 1
-
- if add_nodes is None:
- total_hosts = hosts
- else:
- total_hosts = hosts + add_nodes
- if total_hosts is not None and num_masters == len(total_hosts):
- inputs.append('y')
-
- inputs.extend([
- confirm_facts,
- 'y', # lets do this
- ])
-
- return '\n'.join(inputs)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
('10.0.0.3', False)],
@@ -706,13 +638,13 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('False',
- inventory.get('nodes', '10.0.0.1 openshift_scheduleable'))
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
self.assertEquals(None,
inventory.get('nodes', '10.0.0.2'))
self.assertEquals(None,
@@ -732,7 +664,7 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
],
@@ -744,13 +676,12 @@ class AttendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
- print result
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 2)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -762,7 +693,7 @@ class AttendedCliTests(OOCliFixture):
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'),
SAMPLE_CONFIG % 'openshift-enterprise')
- cli_input = self._build_input(confirm_facts='y')
+ cli_input = build_input(confirm_facts='y')
self.cli_args.extend(["-c", config_file])
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
@@ -773,7 +704,7 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
#interactive with config file and all installed hosts
@@ -784,13 +715,13 @@ class AttendedCliTests(OOCliFixture):
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
],
add_nodes=[('10.0.0.2', False)],
ssh_user='root',
variant_num=1,
- scheduleable_masters_ok=True,
+ schedulable_masters_ok=True,
confirm_facts='y')
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
@@ -803,15 +734,15 @@ class AttendedCliTests(OOCliFixture):
#interactive multimaster: one more node than master
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_quick_ha1(self, load_facts_mock, run_playbook_mock):
+ def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
- ('10.0.0.3', False),
- ('10.0.0.4', True)],
+ ('10.0.0.3', True),
+ ('10.0.0.4', False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -824,30 +755,31 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 5, 5)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 5)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
self.assertEquals('False',
- inventory.get('nodes', '10.0.0.1 openshift_scheduleable'))
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
self.assertEquals('False',
- inventory.get('nodes', '10.0.0.2 openshift_scheduleable'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.3'))
+ inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
self.assertEquals('False',
- inventory.get('nodes', '10.0.0.4 openshift_scheduleable'))
+ inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.4'))
- return
+ self.assertTrue(inventory.has_section('etcd'))
+ self.assertEquals(3, len(inventory.items('etcd')))
- #interactive multimaster: equal number masters and nodes
+ #interactive multimaster: identical masters and nodes
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_quick_ha2(self, load_facts_mock, run_playbook_mock):
+ def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
('10.0.0.3', True)],
@@ -863,19 +795,38 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 4, 4)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.1'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.2'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.3'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
+
+ #interactive multimaster: attempting to use a master as the load balancer should fail:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
- return
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', False),
+ ('10.0.0.4', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
#interactive all-in-one
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -884,7 +835,7 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True)],
ssh_user='root',
variant_num=1,
@@ -897,15 +848,13 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 1, 1)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 1)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.1'))
-
- return
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
new file mode 100644
index 000000000..90bd9e1ef
--- /dev/null
+++ b/utils/test/fixture.py
@@ -0,0 +1,221 @@
+# pylint: disable=missing-docstring
+import os
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from test.oo_config_tests import OOInstallFixture
+from click.testing import CliRunner
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+def read_yaml(config_file_path):
+ cfg_f = open(config_file_path, 'r')
+ config = yaml.safe_load(cfg_f.read())
+ cfg_f.close()
+ return config
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.cli, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for host in written_config['hosts']:
+ self.assertTrue('hostname' in host)
+ self.assertTrue('public_hostname' in host)
+ if 'preconfigured' not in host:
+ self.assertTrue(host['node'])
+ self.assertTrue('ip' in host)
+ self.assertTrue('public_ip' in host)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(
+ os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+
+#pylint: disable=too-many-arguments,too-many-branches
+def build_input(ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
+ master_lb=None):
+ """
+ Build an input string simulating a user entering values in an interactive
+ attended install.
+
+ This is intended to give us one place to update when the CLI prompts change.
+ We should aim to keep this dependent on optional keyword arguments with
+ sensible defaults to keep things from getting too fragile.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ num_masters = 0
+ if hosts:
+ i = 0
+ for (host, is_master) in hosts:
+ inputs.append(host)
+ if is_master:
+ inputs.append('y')
+ num_masters += 1
+ else:
+ inputs.append('n')
+ #inputs.append('rpm')
+ # We should not be prompted to add more hosts if we're currently at
+ # 2 masters, this is an invalid HA configuration, so this question
+ # will not be asked, and the user must enter the next host:
+ if num_masters != 2:
+ if i < len(hosts) - 1:
+ if num_masters >= 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ # You can pass a single master_lb or a list if you intend for one to get rejected:
+ if master_lb:
+ if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
+ inputs.extend(master_lb[0])
+ else:
+ inputs.append(master_lb[0])
+ inputs.append('y' if master_lb[1] else 'n')
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ if schedulable_masters_ok:
+ inputs.append('y')
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master) in add_nodes:
+ inputs.append(host)
+ #inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if add_nodes is None:
+ total_hosts = hosts
+ else:
+ total_hosts = hosts + add_nodes
+ if total_hosts is not None and num_masters == len(total_hosts):
+ inputs.append('y')
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ ])
+
+ return '\n'.join(inputs)
+