summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_openstack.md1
-rw-r--r--docs/best_practices_guide.adoc50
-rw-r--r--filter_plugins/oo_filters.py4
-rw-r--r--inventory/byo/hosts.origin.example4
-rw-r--r--playbooks/adhoc/uninstall.yml10
-rw-r--r--playbooks/aws/openshift-cluster/list.yml9
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml36
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml6
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml5
-rw-r--r--playbooks/common/openshift-node/config.yml34
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml14
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml2
-rw-r--r--roles/cockpit/tasks/main.yml2
-rw-r--r--roles/dns/tasks/main.yml3
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/etcd/tasks/etcdctl.yml3
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd_ca/tasks/main.yml2
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml2
-rw-r--r--roles/flannel/tasks/main.yml2
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml5
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml2
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml2
-rw-r--r--roles/nuage_ca/tasks/main.yaml4
-rw-r--r--roles/nuage_node/handlers/main.yaml4
-rw-r--r--roles/nuage_node/meta/main.yml13
-rw-r--r--roles/nuage_node/tasks/iptables.yml17
-rw-r--r--roles/nuage_node/tasks/main.yaml2
-rw-r--r--roles/openshift_ca/tasks/main.yml4
-rw-r--r--roles/openshift_cli/tasks/main.yml4
-rw-r--r--roles/openshift_clock/tasks/main.yaml2
-rw-r--r--roles/openshift_common/tasks/main.yml4
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rw-r--r--roles/openshift_facts/tasks/main.yml13
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml5
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml2
-rw-r--r--roles/openshift_manage_node/tasks/main.yml52
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml10
-rw-r--r--roles/openshift_manageiq/vars/main.yml15
-rw-r--r--roles/openshift_master/tasks/main.yml8
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml24
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j210
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j23
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/install.yml27
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_node/tasks/main.yml21
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml4
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml2
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh15
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml3
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml4
-rw-r--r--roles/openshift_storage_nfs_lvm/templates/nfs.json.j22
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml2
-rw-r--r--roles/os_update_latest/tasks/main.yml2
70 files changed, 354 insertions, 748 deletions
diff --git a/README_openstack.md b/README_openstack.md
index d3d1f9052..2578488c7 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -50,6 +50,7 @@ The following options are used only by `heat_stack.yaml`. They are so used only
* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort
+* `heat_timeout` (default to `3`): Timeout (in minutes) passed to heat for create or update stack.
Creating a cluster
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index cac9645a6..7f3d85d40 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -2,7 +2,7 @@
= openshift-ansible Best Practices Guide
-The purpose of this guide is to describe the preferred patterns and best practices used in this repository (both in ansible and python).
+The purpose of this guide is to describe the preferred patterns and best practices used in this repository (both in Ansible and Python).
It is important to note that this repository may not currently comply with all best practices, but the intention is that it will.
@@ -52,11 +52,11 @@ If mode lines for other editors are needed, please open a GitHub issue.
=== Method Signatures
'''
-[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]]
+[[When-adding-a-new-parameter-to-an-existing-method-a-default-value-SHOULD-be-used]]
[cols="2v,v"]
|===
-| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
-| When adding a new paramemter to an existing method, a default value SHOULD be used
+| <<When-adding-a-new-parameter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
+| When adding a new parameter to an existing method, a default value SHOULD be used
|===
The purpose of this rule is to make it so that method signatures are backwards compatible.
@@ -76,7 +76,7 @@ def add_person(first_name, last_name, age=None):
=== PyLint
-http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as manageable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
+http://www.pylint.org/[PyLint] is used in an attempt to keep the Python code as clean and as manageable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
'''
[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]]
@@ -194,7 +194,7 @@ The purpose of this rule is to make it easy to include custom modules in our pla
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
|===
-When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each parameter.
.Bad:
[source,yaml]
@@ -222,7 +222,7 @@ When a module has several parameters that are being passed in, it's hard to see
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
|===
-Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each parameter.
.Bad:
[source,yaml]
@@ -338,9 +338,9 @@ If an Ansible role requires certain variables to be set, it's best to check for
[cols="2v,v"]
|===
| <<Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks, Rule>>
-| Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks.
+| Ansible tasks SHOULD NOT be used in Ansible playbooks. Instead, use pre_tasks and post_tasks.
|===
-An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles.
+An Ansible play is defined as a Yaml dictionary. Because of that, Ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles.
This can be quite confusing if the tasks list is defined in the playbook before the roles list because people assume in order execution in Ansible.
@@ -432,7 +432,7 @@ This is very useful when developing and debugging new tasks. It can also signifi
[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]]
[cols="2v,v"]
|===
-| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]]
+| <<Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule>>
| Ansible Roles SHOULD be named like technology_component[_subcomponent].
|===
@@ -484,31 +484,23 @@ If you want to use default with variables that evaluate to false you have to set
----
-In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in python, so None, empty list, empty string, etc.
+In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in Python, so None, empty list, empty string, etc.
This is almost always more desirable than an empty list, string, etc.
=== Yum and DNF
'''
-[[Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum]]
+[[Package-installation-MUST-use-ansible-package-module-to-abstract-away-dnf-yum]]
[cols="2v,v"]
|===
-| <<Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum, Rule>>
-| Package installation MUST use ansible action module to abstract away dnf/yum.
+| <<Package-installation-MUST-use-ansible-package-module-to-abstract-away-dnf-yum, Rule>>
+| Package installation MUST use Ansible `package` module to abstract away dnf/yum.
|===
-[[Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively]]
-[cols="2v,v"]
-|===
-| <<Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively, Rule>>
-| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
-|===
+The Ansible `package` module calls the associated package manager for the underlying OS.
-This is done primarily because if you're registering the result of the
-installation and you have two conditional tasks based on whether or not yum or
-dnf are in use you'll end up inadvertently overwriting the value. It also
-reduces duplication. name= and state=present are common between dnf and yum
-modules.
+.Reference
+* https://docs.ansible.com/ansible/package_module.html[Ansible package module]
.Bad:
[source,yaml]
@@ -516,12 +508,12 @@ modules.
---
# tasks.yml
- name: Install etcd (for etcdctl)
- yum: name=etcd state=latest"
+ yum: name=etcd state=latest
when: "ansible_pkg_mgr == yum"
register: install_result
- name: Install etcd (for etcdctl)
- dnf: name=etcd state=latest"
+ dnf: name=etcd state=latest
when: "ansible_pkg_mgr == dnf"
register: install_result
----
@@ -533,6 +525,6 @@ modules.
---
# tasks.yml
- name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ package: name=etcd state=latest
register: install_result
- ----
+----
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 97eacf9bf..38bc3ad6b 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -608,8 +608,8 @@ class FilterModule(object):
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
- 'public IP': host['ansible_ssh_host'],
- 'private IP': host['ansible_default_ipv4']['address']})
+ 'public IP': host['oo_public_ipv4'],
+ 'private IP': host['oo_private_ipv4']})
except KeyError:
pass
return clusters
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index e769537f9..5a95ecf94 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -634,6 +634,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 1.3
#openshift_master_audit_config={"basicAuditEnabled": true}
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 4ea639cbe..be1070f73 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -84,7 +84,7 @@
- firewalld
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ package: name={{ item }} state=absent
when: not is_atomic | bool
with_items:
- atomic-enterprise
@@ -114,7 +114,7 @@
- tuned-profiles-origin-node
- name: Remove flannel package
- action: "{{ ansible_pkg_mgr }} name=flannel state=absent"
+ package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool and not is_atomic | bool
- shell: systemctl reset-failed
@@ -247,7 +247,7 @@
- atomic-openshift-master
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ package: name={{ item }} state=absent
when: not is_atomic | bool
with_items:
- atomic-enterprise
@@ -349,7 +349,7 @@
failed_when: false
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ package: name={{ item }} state=absent
when: not is_atomic | bool
with_items:
- etcd
@@ -388,7 +388,7 @@
- firewalld
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ package: name={{ item }} state=absent
when: not is_atomic | bool
with_items:
- haproxy
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 4934ae6d0..ed8aac398 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -16,11 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
+ oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
- gather_facts: no
- tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
new file mode 100644
index 000000000..8c810096f
--- /dev/null
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts }}"
+
+- hosts: l_oo_all_hosts
+ become: yes
+ tasks:
+ - name: install NetworkManager
+ package:
+ name: 'NetworkManager'
+ state: present
+
+ - name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
+
+ - name: enable and start NetworkManager
+ service:
+ name: 'NetworkManager'
+ state: started
+ enabled: yes \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 6d83d2527..18f99728c 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -1,7 +1,11 @@
---
+- name: Ensure that all non-node hosts are accessible
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
+ any_errors_fatal: true
+ tasks:
+
- name: Initialize host facts
hosts: oo_all_hosts
- any_errors_fatal: true
roles:
- openshift_facts
tasks:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..a1bd1bd92 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,5 +1,22 @@
---
# NOTE: requires openshift_facts be run
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
+ # https://github.com/openshift/openshift-ansible/issues/1138
+ - name: Check for bad combinations of yum and subscription-manager
+ command: >
+ {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
+ register: yum_ver_test
+ changed_when: false
+ when: not openshift.common.is_atomic | bool
+ - fail:
+ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
+ when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 417096dd0..5d753447c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -35,7 +35,7 @@
- service: name=docker state=stopped
- name: Upgrade Docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present"
+ package: name=docker{{ '-' + docker_version }} state=present
- service: name=docker state=started
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 57b156b1c..57d4fe4b6 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -42,7 +42,7 @@
when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
- name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ package: name=etcd state=present
when: not openshift.common.is_atomic | bool
- name: Generate etcd backup
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
index 35f391f8c..f88981a0b 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -30,7 +30,7 @@
## will fail on atomic host. We need to revisit how to do etcd backups there as
## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
- name: Upgrade etcd for etcdctl when not atomic
- action: "{{ ansible_pkg_mgr }} name=etcd ensure=latest"
+ package: name=etcd state=latest
when: not openshift.common.is_atomic | bool
- name: Verify cluster is healthy
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index cd1139b29..d7d1fe548 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,9 +1,10 @@
+---
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
- name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
- name: Restart node service
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 4824eeef3..e28da5713 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -139,6 +139,8 @@
- role: nuage_node
when: openshift.common.use_nuage | bool
- role: nickhammond.logrotate
+ - role: openshift_manage_node
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
@@ -152,35 +154,3 @@
tasks:
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
-
-- name: Set node schedulability
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ groups.oo_nodes_to_config | default([]) }}"
- pre_tasks:
- # Necessary because when you're on a node that's also a master the master will be
- # restarted after the node restarts docker and it will take up to 60 seconds for
- # systemd to start the master again
- - name: Wait for master API to become available before proceeding
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when: openshift.common.is_containerized | bool
- roles:
- - openshift_manage_node
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
deleted file mode 100644
index fcaa3b850..000000000
--- a/playbooks/gce/openshift-cluster/library/gce.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: gce
-version_added: "1.4"
-short_description: create or terminate GCE instances
-description:
- - Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
- Full install/configuration instructions for the gce* modules can
- be found in the comments of ansible/test/gce_tests.py.
-options:
- image:
- description:
- - image string to use for the instance
- required: false
- default: "debian-7"
- instance_names:
- description:
- - a comma-separated list of instance names to create or destroy
- required: false
- default: null
- machine_type:
- description:
- - machine type to use for the instance, use 'n1-standard-1' by default
- required: false
- default: "n1-standard-1"
- metadata:
- description:
- - a hash/dictionary of custom data for the instance;
- '{"key":"value", ...}'
- required: false
- default: null
- service_account_email:
- version_added: "1.5.1"
- description:
- - service account email
- required: false
- default: null
- service_account_permissions:
- version_added: "2.0"
- description:
- - service account permissions (see
- U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
- --scopes section for detailed information)
- required: false
- default: null
- choices: [
- "bigquery", "cloud-platform", "compute-ro", "compute-rw",
- "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
- "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
- "storage-rw", "taskqueue", "userinfo-email"
- ]
- pem_file:
- version_added: "1.5.1"
- description:
- - path to the pem file associated with the service account email
- required: false
- default: null
- project_id:
- version_added: "1.5.1"
- description:
- - your GCE project ID
- required: false
- default: null
- name:
- description:
- - identifier when working with a single instance
- required: false
- network:
- description:
- - name of the network, 'default' will be used if not specified
- required: false
- default: "default"
- persistent_boot_disk:
- description:
- - if set, create the instance with a persistent boot disk
- required: false
- default: "false"
- disks:
- description:
- - a list of persistent disks to attach to the instance; a string value
- gives the name of the disk; alternatively, a dictionary value can
- define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
- will be the boot disk (which must be READ_WRITE).
- required: false
- default: null
- version_added: "1.7"
- state:
- description:
- - desired state of the resource
- required: false
- default: "present"
- choices: ["active", "present", "absent", "deleted"]
- tags:
- description:
- - a comma-separated list of tags to associate with the instance
- required: false
- default: null
- zone:
- description:
- - the GCE zone to use
- required: true
- default: "us-central1-a"
- ip_forward:
- version_added: "1.9"
- description:
- - set to true if the instance can forward ip packets (useful for
- gateways)
- required: false
- default: "false"
- external_ip:
- version_added: "1.9"
- description:
- - type of external ip, ephemeral by default
- required: false
- default: "ephemeral"
- disk_auto_delete:
- version_added: "1.9"
- description:
- - if set boot disk will be removed after instance destruction
- required: false
- default: "true"
-
-requirements:
- - "python >= 2.6"
- - "apache-libcloud >= 0.13.3"
-notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
-'''
-
-EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-- name: Create a sandbox instance
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }} project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- sudo: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-'''
-
-try:
- import libcloud
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
- ResourceExistsError, ResourceInUseError, ResourceNotFoundError
- _ = Provider.GCE
- HAS_LIBCLOUD = True
-except ImportError:
- HAS_LIBCLOUD = False
-
-try:
- from ast import literal_eval
- HAS_PYTHON26 = True
-except ImportError:
- HAS_PYTHON26 = False
-
-
-def get_instance_info(inst):
- """Retrieves instance information from an instance object and returns it
- as a dictionary.
-
- """
- metadata = {}
- if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
- for md in inst.extra['metadata']['items']:
- metadata[md['key']] = md['value']
-
- try:
- netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- except:
- netname = None
- if 'disks' in inst.extra:
- disk_names = [disk_info['source'].split('/')[-1]
- for disk_info
- in sorted(inst.extra['disks'],
- key=lambda disk_info: disk_info['index'])]
- else:
- disk_names = []
-
- if len(inst.public_ips) == 0:
- public_ip = None
- else:
- public_ip = inst.public_ips[0]
-
- return({
- 'image': inst.image is not None and inst.image.split('/')[-1] or None,
- 'disks': disk_names,
- 'machine_type': inst.size,
- 'metadata': metadata,
- 'name': inst.name,
- 'network': netname,
- 'private_ip': inst.private_ips[0],
- 'public_ip': public_ip,
- 'status': ('status' in inst.extra) and inst.extra['status'] or None,
- 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
- 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
- })
-
-
-def create_instances(module, gce, instance_names):
- """Creates new instances. Attributes other than instance_names are picked
- up from 'module'
-
- module : AnsibleModule object
- gce: authenticated GCE libcloud driver
- instance_names: python list of instance names to create
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched.
-
- """
- image = module.params.get('image')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- disks = module.params.get('disks')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- external_ip = module.params.get('external_ip')
- disk_auto_delete = module.params.get('disk_auto_delete')
- service_account_permissions = module.params.get('service_account_permissions')
- service_account_email = module.params.get('service_account_email')
-
- if external_ip == "none":
- external_ip = None
-
- new_instances = []
- changed = False
-
- lc_image = gce.ex_get_image(image)
- lc_disks = []
- disk_modes = []
- for i, disk in enumerate(disks or []):
- if isinstance(disk, dict):
- lc_disks.append(gce.ex_get_volume(disk['name']))
- disk_modes.append(disk['mode'])
- else:
- lc_disks.append(gce.ex_get_volume(disk))
- # boot disk is implicitly READ_WRITE
- disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
- lc_network = gce.ex_get_network(network)
- lc_machine_type = gce.ex_get_size(machine_type)
- lc_zone = gce.ex_get_zone(zone)
-
- # Try to convert the user's metadata value into the format expected
- # by GCE. First try to ensure user has proper quoting of a
- # dictionary-like syntax using 'literal_eval', then convert the python
- # dict into a python list of 'key' / 'value' dicts. Should end up
- # with:
- # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
- if metadata:
- if isinstance(metadata, dict):
- md = metadata
- else:
- try:
- md = literal_eval(str(metadata))
- if not isinstance(md, dict):
- raise ValueError('metadata must be a dict')
- except ValueError as e:
- module.fail_json(msg='bad metadata: %s' % str(e))
- except SyntaxError as e:
- module.fail_json(msg='bad metadata syntax')
-
- if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
- items = []
- for k, v in md.items():
- items.append({"key": k, "value": v})
- metadata = {'items': items}
- else:
- metadata = md
-
- ex_sa_perms = []
- bad_perms = []
- if service_account_permissions:
- for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
- bad_perms.append(perm)
- if len(bad_perms) > 0:
- module.fail_json(msg='bad permissions: %s' % str(bad_perms))
- if service_account_email:
- ex_sa_perms.append({'email': service_account_email})
- else:
- ex_sa_perms.append({'email': "default"})
- ex_sa_perms[0]['scopes'] = service_account_permissions
-
- # These variables all have default values but check just in case
- if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
- module.fail_json(msg='Missing required create instance variable',
- changed=False)
-
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
- try:
- pd = gce.create_volume(None, "%s" % name, image=lc_image)
- except ResourceExistsError:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
- inst = None
- try:
- inst = gce.create_node(
- name, lc_machine_type, lc_image, location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- changed = True
- except ResourceExistsError:
- inst = gce.ex_get_node(name, lc_zone)
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
-
- for i, lc_disk in enumerate(lc_disks):
- # Check whether the disk is already attached
- if (len(inst.extra['disks']) > i):
- attached_disk = inst.extra['disks'][i]
- if attached_disk['source'] != lc_disk.extra['selfLink']:
- module.fail_json(
- msg=("Disk at index %d does not match: requested=%s found=%s" % (
- i, lc_disk.extra['selfLink'], attached_disk['source'])))
- elif attached_disk['mode'] != disk_modes[i]:
- module.fail_json(
- msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
- i, disk_modes[i], attached_disk['mode'])))
- else:
- continue
- gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
- # Work around libcloud bug: attached volumes don't get added
- # to the instance metadata. get_instance_info() only cares about
- # source and index.
- if len(inst.extra['disks']) != i+1:
- inst.extra['disks'].append(
- {'source': lc_disk.extra['selfLink'], 'index': i})
-
- if inst:
- new_instances.append(inst)
-
- instance_names = []
- instance_json_data = []
- for inst in new_instances:
- d = get_instance_info(inst)
- instance_names.append(d['name'])
- instance_json_data.append(d)
-
- return (changed, instance_json_data, instance_names)
-
-
-def terminate_instances(module, gce, instance_names, zone_name):
- """Terminates a list of instances.
-
- module: Ansible module object
- gce: authenticated GCE connection object
- instance_names: a list of instance names to terminate
- zone_name: the zone where the instances reside prior to termination
-
- Returns a dictionary of instance names that were terminated.
-
- """
- changed = False
- terminated_instance_names = []
- for name in instance_names:
- inst = None
- try:
- inst = gce.ex_get_node(name, zone_name)
- except ResourceNotFoundError:
- pass
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst:
- gce.destroy_node(inst)
- terminated_instance_names.append(inst.name)
- changed = True
-
- return (changed, terminated_instance_names)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- image=dict(default='debian-7'),
- instance_names=dict(),
- machine_type=dict(default='n1-standard-1'),
- metadata=dict(),
- name=dict(),
- network=dict(default='default'),
- persistent_boot_disk=dict(type='bool', default=False),
- disks=dict(type='list'),
- state=dict(choices=['active', 'present', 'absent', 'deleted'],
- default='present'),
- tags=dict(type='list'),
- zone=dict(default='us-central1-a'),
- service_account_email=dict(),
- service_account_permissions=dict(type='list'),
- pem_file=dict(),
- project_id=dict(),
- ip_forward=dict(type='bool', default=False),
- external_ip=dict(choices=['ephemeral', 'none'],
- default='ephemeral'),
- disk_auto_delete=dict(type='bool', default=True),
- )
- )
-
- if not HAS_PYTHON26:
- module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
- if not HAS_LIBCLOUD:
- module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
-
- gce = gce_connect(module)
-
- image = module.params.get('image')
- instance_names = module.params.get('instance_names')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- name = module.params.get('name')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- changed = False
-
- inames = []
- if isinstance(instance_names, list):
- inames = instance_names
- elif isinstance(instance_names, str):
- inames = instance_names.split(',')
- if name:
- inames.append(name)
- if not inames:
- module.fail_json(msg='Must specify a "name" or "instance_names"',
- changed=False)
- if not zone:
- module.fail_json(msg='Must specify a "zone"', changed=False)
-
- json_output = {'zone': zone}
- if state in ['absent', 'deleted']:
- json_output['state'] = 'absent'
- (changed, terminated_instance_names) = terminate_instances(
- module, gce, inames, zone)
-
- # based on what user specified, return the same variable, although
- # value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = terminated_instance_names
- elif name:
- json_output['name'] = name
-
- elif state in ['active', 'present']:
- json_output['state'] = 'present'
- (changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
- json_output['instance_data'] = instance_data
- if instance_names:
- json_output['instance_names'] = instance_name_list
- elif name:
- json_output['name'] = name
-
- json_output['changed'] = changed
- module.exit_json(**json_output)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.gce import *
-if __name__ == '__main__':
- main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 34dcd2496..34ab09533 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
+ oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 7c8189224..87b30aee4 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -9,10 +9,11 @@
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
+ subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
# unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
- - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+ - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
- environment-{{ cluster_env }}
- clusterid-{{ cluster_id }}
- host-type-{{ type }}
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 86d5d0aad..579cd7ac6 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: ""
+ oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index eb2c4269a..7e037f2af 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -25,7 +25,7 @@
- name: Create or Update OpenStack Stack
command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
- --timeout 3
+ --timeout {{ openstack_heat_timeout }}
-P cluster_env={{ cluster_env }}
-P cluster_id={{ cluster_id }}
-P subnet_24_prefix={{ openstack_subnet_24_prefix }}
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index de68f5207..6c6f671be 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,18 +17,8 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
+ oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 62111dacf..79b336ce7 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -14,6 +14,8 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
default('0.0.0.0/0', True) }}"
+openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
+ default('3', True) }}"
openstack_flavor:
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 681029332..1975b92e6 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install cockpit-ws
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ package: name={{ item }} state=present
with_items:
- cockpit-ws
- cockpit-shell
diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml
index 57a7e6269..2abe0d9dd 100644
--- a/roles/dns/tasks/main.yml
+++ b/roles/dns/tasks/main.yml
@@ -1,5 +1,6 @@
+---
- name: Install Bind
- action: "{{ ansible_pkg_mgr }} name=bind"
+ package: name=bind state=present
when: not openshift.common.is_containerized | bool
- name: Create docker build dir
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 9b7ef0830..a2b18baa1 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -40,7 +40,7 @@
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
- name: Install Docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present"
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
- name: Ensure docker.service.d directory exists
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml
index 32c176449..bb6fabf64 100644
--- a/roles/etcd/tasks/etcdctl.yml
+++ b/roles/etcd/tasks/etcdctl.yml
@@ -1,5 +1,6 @@
+---
- name: Install etcd for etcdctl
- action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ package: name=etcd state=present
when: not openshift.common.is_atomic | bool
- name: Configure etcd profile.d alises
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 790eb3c5a..7b61e9b73 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -7,7 +7,7 @@
etcd_ip: "{{ etcd_ip }}"
- name: Install etcd
- action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ package: name=etcd state=present
when: not etcd_is_containerized | bool
- name: Pull etcd container
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index 4e68bc962..c4d5efa14 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install openssl
- action: "{{ ansible_pkg_mgr }} name=openssl state=present"
+ package: name=openssl state=present
when: not etcd_is_atomic | bool
delegate_to: "{{ etcd_ca_host }}"
run_once: true
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index d66a0a7bf..b0fd117ed 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd
- action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ package: name=etcd state=present
when: not etcd_is_containerized | bool
- name: Check status of etcd certificates
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index bf400cfe8..a51455bae 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Install flannel
become: yes
- action: "{{ ansible_pkg_mgr }} name=flannel state=present"
+ package: name=flannel state=present
when: not openshift.common.is_atomic | bool
- name: Set flannel etcd options
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index 5eff30f6f..67f709c8c 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -4,7 +4,10 @@
when: openshift.common.is_atomic | bool
- name: Install pyparted (RedHat/Fedora)
- action: "{{ ansible_pkg_mgr }} name=pyparted,python-httplib2 state=present"
+ package: name={{ item }} state=present
+ with_items:
+ - pyparted
+ - python-httplib2
when: not openshift.common.is_containerized | bool
- name: partition the drives
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
index 474ec69e5..ebd3d349a 100644
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -1,6 +1,6 @@
---
- name: Install NFS server
- action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+ package: name=nfs-utils state=present
when: not openshift.common.is_containerized | bool
- name: Start rpcbind on Fedora/Red Hat
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 1979c851f..657cb10ec 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: nickhammond.logrotate | Install logrotate
- action: "{{ ansible_pkg_mgr }} name=logrotate state=present"
+ package: name=logrotate state=present
when: not openshift.common.is_atomic | bool
- name: nickhammond.logrotate | Setup logrotate.d scripts
diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml
index 9cfa40b8a..8d73e6840 100644
--- a/roles/nuage_ca/tasks/main.yaml
+++ b/roles/nuage_ca/tasks/main.yaml
@@ -1,6 +1,6 @@
---
- name: Install openssl
- action: "{{ ansible_pkg_mgr }} name=openssl state=present"
+ package: name=openssl state=present
when: not openshift.common.is_atomic | bool
- name: Create CA directory
@@ -41,6 +41,6 @@
delegate_to: "{{ nuage_ca_master }}"
- name: Copy SSL config file
- copy: src=openssl.cnf dest="{{ nuage_ca_dir }}/openssl.cnf"
+ copy: src=openssl.cnf dest="{{ nuage_ca_dir }}/openssl.cnf"
run_once: true
delegate_to: "{{ nuage_ca_master }}"
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index 5f2b97ae2..fd06d9025 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -6,3 +6,7 @@
- name: restart node
become: yes
service: name={{ openshift.common.service_type }}-node state=restarted
+
+- name: save iptable rules
+ become: yes
+ command: iptables-save
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index 9f84eacf6..a6fbcba61 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -13,8 +13,11 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: nuage_ca
-- role: os_firewall
- os_firewall_allow:
- - service: vxlan
- port: 4789/udp
+ - role: nuage_common
+ - role: nuage_ca
+ - role: os_firewall
+ os_firewall_allow:
+ - service: vxlan
+ port: 4789/udp
+ - service: nuage-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_node/tasks/iptables.yml b/roles/nuage_node/tasks/iptables.yml
new file mode 100644
index 000000000..52935f075
--- /dev/null
+++ b/roles/nuage_node/tasks/iptables.yml
@@ -0,0 +1,17 @@
+---
+- name: IPtables | Get iptables rules
+ command: iptables -L --wait
+ register: iptablesrules
+ always_run: yes
+
+- name: Allow traffic from overlay to underlay
+ command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
+ when: "'nuage-overlay-underlay' not in iptablesrules.stdout"
+ notify:
+ - save iptable rules
+
+- name: Allow traffic from underlay to overlay
+ command: /sbin/iptables --wait -I FORWARD 1 -d {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-underlay-overlay"
+ when: "'nuage-underlay-overlay' not in iptablesrules.stdout"
+ notify:
+ - save iptable rules
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 1146573d3..2ec4be2c2 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -37,3 +37,5 @@
notify:
- restart vrs
- restart node
+
+- include: iptables.yml
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index b6d403067..e2a12e5ff 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -8,7 +8,9 @@
when: openshift_master_ca_certificate is defined and ('certfile' not in openshift_master_ca_certificate or 'keyfile' not in openshift_master_ca_certificate)
- name: Install the base package for admin tooling
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+ package:
+ name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
when: not openshift.common.is_containerized | bool
register: install_result
delegate_to: "{{ openshift_ca_host }}"
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 11c73b25c..07a00189c 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-clients state=present"
+ package: name={{ openshift.common.service_type }}-clients state=present
when: not openshift.common.is_containerized | bool
- name: Pull CLI Image
@@ -20,5 +20,5 @@
openshift_facts:
- name: Install bash completion for oc tools
- action: "{{ ansible_pkg_mgr }} name=bash-completion state=present"
+ package: name=bash-completion state=present
when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml
index 5a8403f68..3911201ea 100644
--- a/roles/openshift_clock/tasks/main.yaml
+++ b/roles/openshift_clock/tasks/main.yaml
@@ -6,7 +6,7 @@
enabled: "{{ openshift_clock_enabled | default(None) }}"
- name: Install ntp package
- action: "{{ ansible_pkg_mgr }} name=ntp state=present"
+ package: name=ntp state=present
when: openshift.clock.enabled | bool and not openshift.clock.chrony_installed | bool
- name: Start and enable ntpd/chronyd
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 3f8ea5dce..c9a44b3f5 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -29,7 +29,9 @@
use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
- name: Install the base package for versioning
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+ package:
+ name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
when: not openshift.common.is_containerized | bool
- name: Set version facts
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index cdd813e6a..00603f4fa 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Ensure growpart is installed
- action: "{{ ansible_pkg_mgr }} name=cloud-utils-growpart state=present"
+ package: name=cloud-utils-growpart state=present
when: not openshift.common.is_containerized | bool
- name: Determine if growpart is installed
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index b0785a9e4..70cf49dd4 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -10,12 +10,11 @@
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-- name: Ensure PyYaml is installed
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not l_is_atomic | bool
-
-- name: Ensure yum-utils is installed
- action: "{{ ansible_pkg_mgr }} name=yum-utils state=present"
+- name: Ensure PyYaml and yum-utils are installed
+ package: name={{ item }} state=present
+ with_items:
+ - PyYAML
+ - yum-utils
when: not l_is_atomic | bool
- name: Gather Cluster facts and set is_containerized if needed
@@ -38,6 +37,8 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: Set repoquery command
set_fact:
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index e1d78cfd0..0b29df2a0 100644
--- a/roles/openshift_loadbalancer/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -17,4 +17,9 @@ dependencies:
port: "9000/tcp"
- service: haproxy balance
port: "{{ openshift_master_api_port | default(8443) }}/tcp"
+- role: os_firewall
+ os_firewall_allow:
+ - service: nuage mon
+ port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
+ when: openshift_use_nuage | default(false) | bool
- role: openshift_repos
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 863738143..1d2804279 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -3,7 +3,7 @@
when: openshift.common.is_containerized | bool
- name: Install haproxy
- action: "{{ ansible_pkg_mgr }} name=haproxy state=present"
+ package: name=haproxy state=present
- name: Configure systemd service directory for haproxy
file:
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index 28e4e46e9..88cdd2d89 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -3,18 +3,51 @@
command: mktemp -d /tmp/openshift-ansible-XXXXXX
register: mktemp
changed_when: False
+ delegate_to: "{{ openshift_master_host }}"
+ run_once: true
- set_fact:
openshift_manage_node_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ delegate_to: "{{ openshift_master_host }}"
+ run_once: true
- name: Copy the admin client config(s)
command: >
cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_manage_node_kubeconfig }}
changed_when: False
+ delegate_to: "{{ openshift_master_host }}"
+ run_once: true
+
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift_node_master_api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
+ delegate_to: "{{ openshift_master_host }}"
+ run_once: true
- name: Wait for Node Registration
command: >
- {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.node.nodename }}
+ {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
register: omd_get_node
@@ -22,26 +55,29 @@
retries: 50
delay: 5
changed_when: false
- with_items: "{{ openshift_nodes }}"
+ when: "'nodename' in openshift.node"
+ delegate_to: "{{ openshift_master_host }}"
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable={{ 'true' if openshift.node.schedulable | bool else 'false' }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
- with_items: "{{ openshift_nodes }}"
- when: hostvars[item].openshift.node.nodename is defined
+ when: "'nodename' in openshift.node"
+ delegate_to: "{{ openshift_master_host }}"
- name: Label nodes
command: >
- {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.node.nodename }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }}
+ {{ openshift.common.client_binary }} label --overwrite node {{ openshift.node.nodename }} {{ openshift.node.labels | oo_combine_dict }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
- with_items: "{{ openshift_nodes }}"
- when: hostvars[item].openshift.node.nodename is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {}
+ when: "'nodename' in openshift.node and 'labels' in openshift.node and openshift.node.labels != {}"
+ delegate_to: "{{ openshift_master_host }}"
- name: Delete temp directory
file:
name: "{{ mktemp.stdout }}"
state: absent
changed_when: False
+ delegate_to: "{{ openshift_master_host }}"
+ run_once: true
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index bdaf64b3f..a7214482f 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -50,6 +50,16 @@
failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
changed_when: osmiq_create_cluster_role.rc == 0
+- name: Create Hawkular Metrics Admin Cluster Role
+ shell: >
+ echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} |
+ {{ openshift.common.client_binary }}
+ --config={{manage_iq_tmp_conf}}
+ create -f -
+ register: oshawkular_create_cluster_role
+ failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
+ changed_when: oshawkular_create_cluster_role.rc == 0
+
- name: Configure role/user permissions
command: >
{{ openshift.common.client_binary }} adm {{item}}
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 6a0c5b41b..37d4679ef 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -9,6 +9,20 @@ manageiq_cluster_role:
verbs:
- '*'
+manageiq_metrics_admin_clusterrole:
+ apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: hawkular-metrics-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - hawkular-metrics
+ - hawkular-alerts
+ verbs:
+ - '*'
+
manageiq_service_account:
apiVersion: v1
kind: ServiceAccount
@@ -31,6 +45,7 @@ manage_iq_tasks:
- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
+ - policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 1d6758c4a..79c62e985 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -24,7 +24,9 @@
when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool
- name: Install Master package
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+ package:
+ name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
when: not openshift.common.is_containerized | bool
- name: Pull master image
@@ -77,7 +79,7 @@
- restart master controllers
- name: Install httpd-tools if needed
- action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present"
+ package: name=httpd-tools state=present
when: (item.kind == 'HTPasswdPasswordIdentityProvider') and
not openshift.common.is_atomic | bool
with_items: "{{ openshift.master.identity_providers }}"
@@ -292,7 +294,7 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Install cluster packages
- action: "{{ ansible_pkg_mgr }} name=pcs state=present"
+ package: name=pcs state=present
when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
and not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 56110c28f..11e010716 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -127,16 +127,22 @@
- name: Preserve Master Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master
- register: master_proxy
+ register: master_proxy_result
failed_when: false
changed_when: false
+- set_fact:
+ master_proxy: "{{ master_proxy_result.stdout_lines | default([]) }}"
+
- name: Preserve Master AWS options
command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master
- register: master_aws
+ register: master_aws_result
failed_when: false
changed_when: false
+- set_fact:
+ master_aws: "{{ master_aws_result.stdout_lines | default([]) }}"
+
- name: Create the master service env file
template:
src: "atomic-openshift-master.j2"
@@ -144,17 +150,3 @@
backup: true
notify:
- restart master
-
-- name: Restore Master Proxy Config Options
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
- line: "{{ item }}"
- with_items: "{{ master_proxy.stdout_lines | default([]) }}"
- when: master_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
-
-- name: Restore Master AWS Options
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
- line: "{{ item }}"
- with_items: "{{ master_aws.stdout_lines | default([]) }}"
- when: master_aws.rc == 0 and not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 26f0240ec..7aea89578 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -8,6 +8,11 @@ IMAGE_VERSION={{ openshift_image_tag }}
AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
+{% if not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) %}
+{% for item in master_aws %}
+{{ item }}
+{% endfor %}
+{% endif %}
{% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%}
{% for key, value in openshift.master.api_env_vars.items() | default([]) | union(openshift.master.controllers_env_vars.items() | default([])) -%}
@@ -26,3 +31,8 @@ HTTPS_PROXY={{ openshift.common.https_proxy | default('')}}
{% if 'no_proxy' in openshift.common %}
NO_PROXY={{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}
{% endif %}
+{% if not ('https_proxy' in openshift.common or 'https_proxy' in openshift.common or 'no_proxy' in openshift.common) %}
+{% for item in master_proxy %}
+{{ item }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 4d45e8591..a52ae578c 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -131,6 +131,7 @@ kubernetesMasterConfig:
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
+ schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
servicesSubnet: {{ openshift.common.portal_net }}
@@ -158,7 +159,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index e0c0fc644..62ac1aef5 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -80,3 +80,4 @@
controllers_env_vars: "{{ openshift_master_controllers_env_vars | default(None) }}"
audit_config: "{{ openshift_master_audit_config | default(None) }}"
metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
+ scheduler_args: "{{ openshift_master_scheduler_args | default(None) }}"
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml
index 4976c7153..98e21375a 100644
--- a/roles/openshift_metrics/tasks/install.yml
+++ b/roles/openshift_metrics/tasks/install.yml
@@ -37,6 +37,24 @@
system:serviceaccount:openshift-infra:metrics-deployer
when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
+- name: Test hawkular view permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
+ register: view_rolebindings
+ changed_when: false
+
+- name: Add view permissions to hawkular SA
+ command: >
+ {{ openshift.common.client_binary }} adm
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-role-to-user view
+ system:serviceaccount:openshift-infra:hawkular
+ when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
+
- name: Test cluster-reader permissions
command: >
{{ openshift.common.client_binary }}
@@ -71,7 +89,14 @@
set_fact:
deployer_cmd: "{{ openshift.common.client_binary }} process -f \
{{ hosted_base }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \
+ HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }} \
+ -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
+ -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
+ -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
+ -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
+ {{ image_prefix }} \
+ {{ image_version }} \
+ -v MODE={{ deployment_mode }} \
| {{ openshift.common.client_binary }} --namespace openshift-infra \
--config={{ openshift_metrics_kubeconfig }} \
create -o name -f -"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 88432a9f8..26af279b1 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -36,10 +36,8 @@
metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- cassandra_nodes: "{{ ',CASSANDRA_NODES=' ~ openshift.hosted.metrics.cassandra_nodes if 'cassandra' in openshift.hosted.metrics else '' }}"
- cassandra_pv_size: "{{ ',CASSANDRA_PV_SIZE=' ~ openshift.hosted.metrics.storage_volume_size if openshift.hosted.metrics.storage_volume_size | default(none) is not none else '' }}"
- image_prefix: "{{ ',IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer_prefix if 'deployer_prefix' in openshift.hosted.metrics else '' }}"
- image_version: "{{ ',IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer_version if 'deployer_version' in openshift.hosted.metrics else '' }}"
+ image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
+ image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
- name: Check for existing metrics pods
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 474df497e..612cc0e20 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -35,15 +35,25 @@
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- name: Install Node package
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+ package:
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
when: not openshift.common.is_containerized | bool
+- name: Check for tuned package
+ command: rpm -q tuned
+ register: tuned_installed
+ changed_when: false
+ failed_when: false
+
- name: Set atomic-guest tuned profile
command: "tuned-adm profile atomic-guest"
- when: openshift.common.is_atomic | bool
+ when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
- name: Install sdn-ovs package
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+ package:
+ name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
- name: Pull node image
@@ -77,6 +87,11 @@
- set_fact:
ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+- file:
+ dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ state: directory
+ when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
template:
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index eed3c99a3..037efe81a 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -1,4 +1,4 @@
---
- name: Install Ceph storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
- when: not openshift.common.is_atomic | bool \ No newline at end of file
+ package: name=ceph-common state=present
+ when: not openshift.common.is_atomic | bool
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 4fd9cd10b..7d8c42ee2 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -1,6 +1,6 @@
---
- name: Install GlusterFS storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
+ package: name=glusterfs-fuse state=present
when: not openshift.common.is_atomic | bool
- name: Check for existence of virt_use_fusefs seboolean
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index d6684b34a..1c5478c55 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -1,4 +1,4 @@
---
- name: Install iSCSI storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=iscsi-initiator-utils state=present"
+ package: name=iscsi-initiator-utils state=present
when: not openshift.common.is_atomic | bool
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 5f99f129c..d40ae66cb 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -1,6 +1,6 @@
---
- name: Install NFS storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+ package: name=nfs-utils state=present
when: not openshift.common.is_atomic | bool
- name: Check for existence of seboolean
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 9bcaf4d84..55ae4bf54 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -27,7 +27,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool %}
+{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index ced0fa663..c3d5efb9e 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -36,6 +36,7 @@ if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
UPSTREAM_DNS_TMP=`mktemp`
UPSTREAM_DNS_TMP_SORTED=`mktemp`
CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
+ NEW_RESOLV_CONF=`mktemp`
######################################################################
# couldn't find an existing method to determine if the interface owns the
@@ -85,13 +86,17 @@ EOF
systemctl restart dnsmasq
fi
- sed -i '0,/^nameserver/ s/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
-
- if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
- echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ # Only if dnsmasq is running properly make it our only nameserver
+ if `systemctl -q is-active dnsmasq.service`; then
+ sed -e '/^nameserver.*$/d' /etc/resolv.conf > ${NEW_RESOLV_CONF}
+ echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
+ if ! grep -q '99-origin-dns.sh' ${NEW_RESOLV_CONF}; then
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
+ fi
+ cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
fi
# Clean up after yourself
- rm -f $UPSTREAM_DNS_TMP $UPSTREAM_DNS_TMP_SORTED $CURRENT_UPSTREAM_DNS_SORTED
+ rm -f $UPSTREAM_DNS_TMP $UPSTREAM_DNS_TMP_SORTED $CURRENT_UPSTREAM_DNS_SORTED $NEW_RESOLV_CONF
fi
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index 396c27295..0167b02b1 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -4,13 +4,14 @@
systemctl show NetworkManager
register: nm_show
changed_when: false
+ ignore_errors: True
- name: Set fact using_network_manager
set_fact:
network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
- name: Install dnsmasq
- action: "{{ ansible_pkg_mgr }} name=dnsmasq state=installed"
+ package: name=dnsmasq state=installed
when: not openshift.common.is_atomic | bool
- name: Install dnsmasq configuration
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 9be168611..d5ed9c09d 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -12,7 +12,7 @@
when: not openshift.common.is_containerized | bool
- name: Ensure libselinux-python is installed
- action: "{{ ansible_pkg_mgr }} name=libselinux-python state=present"
+ package: name=libselinux-python state=present
when: not openshift.common.is_containerized | bool
- name: Create any additional repos that are defined
@@ -37,6 +37,7 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
+ and openshift_enable_origin_repo | default(true)
- name: Configure origin yum repositories RHEL/CentOS
copy:
@@ -46,3 +47,4 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
+ and openshift_enable_origin_repo | default(true)
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 4716c77ae..ecc52e4af 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install nfs-utils
- action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+ package: name=nfs-utils state=present
- name: Configure NFS
lineinfile:
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index fc8de1cb5..e0be9f0b7 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -1,8 +1,8 @@
---
- name: Install NFS server
- action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+ package: name=nfs-utils state=present
when: not openshift.common.is_containerized | bool
-
+
- name: Start rpcbind
service: name=rpcbind state=started enabled=yes
diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
index 0f3d84e75..3c4d2f56c 100644
--- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
+++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
@@ -11,7 +11,7 @@
"capacity": {
"storage": "{{ osnl_volume_size }}Gi"
},
- "accessModes": [ "ReadWriteMany" ],
+ "accessModes": [ "ReadWriteOnce", "ReadWriteMany" ],
"persistentVolumeReclaimPolicy": "Recycle",
"nfs": {
"Server": "{{ inventory_hostname }}",
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 5ddca1fc0..a5b733cb7 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -1,6 +1,6 @@
---
- name: Install firewalld packages
- action: "{{ ansible_pkg_mgr }} name=firewalld state=present"
+ package: name=firewalld state=present
when: not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 470d4f4f9..366ede8fd 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -25,7 +25,7 @@
ignore_errors: yes
- name: Install iptables packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ package: name={{ item }} state=present
with_items:
- iptables
- iptables-services
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
index ff2b52275..6b5fd0106 100644
--- a/roles/os_update_latest/tasks/main.yml
+++ b/roles/os_update_latest/tasks/main.yml
@@ -1,3 +1,3 @@
---
- name: Update all packages
- action: "{{ ansible_pkg_mgr }} name=* state=latest"
+ package: name=* state=latest