diff options
Diffstat (limited to 'roles')
39 files changed, 411 insertions, 82 deletions
diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml index 5d20a3b35..f79273824 100644 --- a/roles/ansible/tasks/main.yml +++ b/roles/ansible/tasks/main.yml @@ -5,6 +5,13 @@ yum: pkg: ansible state: installed + when: ansible_pkg_mgr == "yum" + +- name: Install Ansible + dnf: + pkg: ansible + state: installed + when: ansible_pkg_mgr == "dnf" - include: config.yml vars: diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 875cbad21..8410e7c90 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -8,6 +8,18 @@ - cockpit-shell - cockpit-bridge - "{{ cockpit_plugins }}" + when: ansible_pkg_mgr == "yum" + +- name: Install cockpit-ws + dnf: + name: "{{ item }}" + state: present + with_items: + - cockpit-ws + - cockpit-shell + - cockpit-bridge + - "{{ cockpit_plugins }}" + when: ansible_pkg_mgr == "dnf" - name: Enable cockpit-ws service: diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml index f7ef1c26e..f8496199d 100644 --- a/roles/copr_cli/tasks/main.yml +++ b/roles/copr_cli/tasks/main.yml @@ -2,3 +2,9 @@ - yum: name: copr-cli state: present + when: ansible_pkg_mgr == "yum" + +- dnf: + name: copr-cli + state: present + when: ansible_pkg_mgr == "dnf" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 96949230d..dd4401389 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -2,6 +2,11 @@ # tasks file for docker - name: Install docker yum: pkg=docker + when: ansible_pkg_mgr == "yum" + +- name: Install docker + dnf: pkg=docker + when: ansible_pkg_mgr == "dnf" - name: enable and start the docker service service: name=docker enabled=yes state=started diff --git a/roles/etcd/README.md b/roles/etcd/README.md index 88e4ff874..329a926c0 100644 --- a/roles/etcd/README.md +++ b/roles/etcd/README.md @@ -7,7 +7,7 @@ Requirements ------------ This role assumes it's being deployed on a RHEL/Fedora based host with package -named 'etcd' available via yum. +named 'etcd' available via yum or dnf (conditionally). Role Variables -------------- diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index fcbdecd37..efaab5f31 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -9,6 +9,11 @@ - name: Install etcd yum: pkg=etcd-2.* state=present + when: ansible_pkg_mgr == "yum" + +- name: Install etcd + dnf: pkg=etcd* state=present + when: ansible_pkg_mgr == "dnf" - name: Validate permissions on the config dir file: diff --git a/roles/flannel/README.md b/roles/flannel/README.md index b8aa830ac..8f271aada 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -7,7 +7,8 @@ Requirements ------------ This role assumes it's being deployed on a RHEL/Fedora based host with package -named 'flannel' available via yum, in version superior to 0.3. +named 'flannel' available via yum or dnf (conditionally), in version superior +to 0.3. Role Variables -------------- diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index acfb009ec..86e1bc96e 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,6 +2,12 @@ - name: Install flannel sudo: true yum: pkg=flannel state=present + when: ansible_pkg_mgr == "yum" + +- name: Install flannel + sudo: true + dnf: pkg=flannel state=present + when: ansible_pkg_mgr == "dnf" - name: Set flannel etcd url sudo: true diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 55cd94460..43c499b4d 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -4,6 +4,13 @@ yum: name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state: present + when: ansible_pkg_mgr == "yum" + +- name: download and install td-agent + dnf: + name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' + state: present + when: ansible_pkg_mgr == "dnf" - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml index f9ef30b83..827a1c075 100644 --- a/roles/fluentd_node/tasks/main.yml +++ b/roles/fluentd_node/tasks/main.yml @@ -4,6 +4,13 @@ yum: name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state: present + when: ansible_pkg_mgr == "yum" + +- name: download and install td-agent + dnf: + name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' + state: present + when: ansible_pkg_mgr == "dnf" - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 5638b7313..5d015fadd 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -3,6 +3,13 @@ yum: pkg: haproxy state: present + when: ansible_pkg_mgr == "yum" + +- name: Install haproxy + dnf: + pkg: haproxy + state: present + when: ansible_pkg_mgr == "dnf" - name: Configure haproxy template: diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml index d1dcf261a..3fcb9fd18 100644 --- a/roles/kube_nfs_volumes/tasks/main.yml +++ b/roles/kube_nfs_volumes/tasks/main.yml @@ -1,6 +1,11 @@ --- - name: Install pyparted (RedHat/Fedora) yum: name=pyparted,python-httplib2 state=present + when: ansible_pkg_mgr == "yum" + +- name: Install pyparted (RedHat/Fedora) + dnf: name=pyparted,python-httplib2 state=present + when: ansible_pkg_mgr == "dnf" - name: partition the drives partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }} diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml index 559fcf17c..a58a7b824 100644 --- a/roles/kube_nfs_volumes/tasks/nfs.yml +++ b/roles/kube_nfs_volumes/tasks/nfs.yml @@ -1,6 +1,11 @@ --- - name: Install NFS server on Fedora/Red Hat yum: name=nfs-utils state=present + when: ansible_pkg_mgr == "yum" + +- name: Install NFS server on Fedora/Red Hat + dnf: name=nfs-utils state=present + when: ansible_pkg_mgr == "dnf" - name: Start rpcbind on Fedora/Red Hat service: name=rpcbind state=started enabled=yes diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index f6919dada..2b99f8bcd 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -2,6 +2,16 @@ - yum: name: "{{ item }}" state: present + when: ansible_pkg_mgr == "yum" + with_items: + - openshift-ansible-inventory + - openshift-ansible-inventory-aws + - openshift-ansible-inventory-gce + +- dnf: + name: "{{ item }}" + state: present + when: ansible_pkg_mgr == "dnf" with_items: - openshift-ansible-inventory - openshift-ansible-inventory-aws diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 55065b3de..c0982290d 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -22,6 +22,7 @@ deployment_type: "{{ openshift_deployment_type }}" use_fluentd: "{{ openshift_use_fluentd | default(None) }}" use_flannel: "{{ openshift_use_flannel | default(None) }}" + use_manageiq: "{{ openshift_use_manageiq | default(None) }}" # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the # hostname by default. diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md index cd394e1ba..aed4ec871 100644 --- a/roles/openshift_expand_partition/README.md +++ b/roles/openshift_expand_partition/README.md @@ -8,7 +8,7 @@ partition, and then expanding the file system on the partition. * A machine with a disk that is not fully utilized -* cloud-utils-growpart rpm (either installed or avialable via yum) +* cloud-utils-growpart rpm (either installed or avialable via yum or dnf) * The partition you are expanding needs to be at the end of the partition list diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 8bc399070..42e7903fd 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,6 +1,11 @@ --- - name: Ensure growpart is installed yum: pkg=cloud-utils-growpart state=present + when: ansible_pkg_mgr == "yum" + +- name: Ensure growpart is installed + dnf: pkg=cloud-utils-growpart state=present + when: ansible_pkg_mgr == "dnf" - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 2fd7be07c..8b3402729 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -528,9 +528,9 @@ def set_aggregate_facts(facts): internal_hostnames.add(facts['common']['hostname']) internal_hostnames.add(facts['common']['ip']) + cluster_domain = facts['common']['dns_domain'] + if 'master' in facts: - # FIXME: not sure why but facts['dns']['domain'] fails - cluster_domain = 'cluster.local' if 'cluster_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_hostname']) if 'cluster_public_hostname' in facts['master']: @@ -985,7 +985,7 @@ class OpenShiftFacts(object): Raises: OpenShiftFactsUnsupportedRoleError: """ - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd'] + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd'] def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False): self.changed = False @@ -1053,9 +1053,10 @@ class OpenShiftFacts(object): common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr, deployment_type='origin', hostname=hostname, - public_hostname=hostname) + public_hostname=hostname, use_manageiq=False) common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc' common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm' + common['dns_domain'] = 'cluster.local' defaults['common'] = common if 'master' in roles: @@ -1076,7 +1077,6 @@ class OpenShiftFacts(object): node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16', iptables_sync_period='5s', set_node_ip=False) defaults['node'] = node - return defaults def guess_host_provider(self): diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 913f0dc78..2e889d7d5 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -8,6 +8,13 @@ - name: Ensure PyYaml is installed yum: pkg={{ item }} state=installed + when: ansible_pkg_mgr == "yum" + with_items: + - PyYAML + +- name: Ensure PyYaml is installed + dnf: pkg={{ item }} state=installed + when: ansible_pkg_mgr == "dnf" with_items: - PyYAML diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml new file mode 100644 index 000000000..2d3187e21 --- /dev/null +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -0,0 +1,50 @@ +--- +- name: Copy Configuration to temporary conf + command: > + cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} + changed_when: false + +- name: Add Managment Infrastructure project + command: > + {{ openshift.common.admin_binary }} new-project + management-infra + --description="Management Infrastructure" + --config={{manage_iq_tmp_conf}} + register: osmiq_create_mi_project + failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0" + changed_when: osmiq_create_mi_project.rc == 0 + +- name: Create Service Account + shell: > + echo {{ manageiq_service_account | to_json | quote }} | + {{ openshift.common.client_binary }} create + -n management-infra + --config={{manage_iq_tmp_conf}} + -f - + register: osmiq_create_service_account + failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0" + changed_when: osmiq_create_service_account.rc == 0 + +- name: Create Cluster Role + shell: > + echo {{ manageiq_cluster_role | to_json | quote }} | + {{ openshift.common.client_binary }} create + --config={{manage_iq_tmp_conf}} + -f - + register: osmiq_create_cluster_role + failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0" + changed_when: osmiq_create_cluster_role.rc == 0 + +- name: Configure role/user permissions + command: > + {{ openshift.common.admin_binary }} {{item}} + --config={{manage_iq_tmp_conf}} + with_items: "{{manage_iq_tasks}}" + register: osmiq_perm_task + failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0" + changed_when: osmiq_perm_task.rc == 0 + +- name: Clean temporary configuration file + command: > + rm -f {{manage_iq_tmp_conf}} + changed_when: false diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml new file mode 100644 index 000000000..77e1c304b --- /dev/null +++ b/roles/openshift_manageiq/vars/main.yml @@ -0,0 +1,24 @@ +manageiq_cluster_role: + apiVersion: v1 + kind: ClusterRole + metadata: + name: management-infra-admin + rules: + - resources: + - pods/proxy + verbs: + - '*' + +manageiq_service_account: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: management-admin + +manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig + +manage_iq_tasks: + - policy add-role-to-user -n management-infra admin -z management-admin + - policy add-role-to-user -n management-infra management-infra-admin -z management-admin + - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin + - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 2cf2a53c4..8a78f8f2a 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -79,16 +79,16 @@ - name: Install Master package yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present + when: ansible_pkg_mgr == "yum" register: install_result -# TODO: These values need to be configurable -- name: Set dns facts +- name: Install Master package + dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present + when: ansible_pkg_mgr == "dnf" + register: install_result + +- name: Re-gather package dependent master facts openshift_facts: - role: dns - local_facts: - ip: "{{ openshift_master_cluster_vip | default(openshift.common.ip, true) | default(None) }}" - domain: cluster.local - when: openshift.master.embedded_dns - name: Create config parent directory if it does not exist file: @@ -118,7 +118,12 @@ - name: Install httpd-tools if needed yum: pkg=httpd-tools state=present - when: item.kind == 'HTPasswdPasswordIdentityProvider' + when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider') + with_items: openshift.master.identity_providers + +- name: Install httpd-tools if needed + dnf: pkg=httpd-tools state=present + when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider') with_items: openshift.master.identity_providers - name: Ensure htpasswd directory exists @@ -263,7 +268,12 @@ - name: Install cluster packages yum: pkg=pcs state=present - when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' + when: (ansible_pkg_mgr == "yum") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' + register: install_result + +- name: Install cluster packages + dnf: pkg=pcs state=present + when: (ansible_pkg_mgr == "dnf") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' register: install_result - name: Start and enable cluster service diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 9f4a17f0a..cadb02fa3 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -83,7 +83,7 @@ kubernetesMasterConfig: {% endif %} apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }} controllerArguments: {{ controller_args if controller_args is defined else 'null' }} - masterCount: {{ openshift.master.master_count }} + masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} masterIP: {{ openshift.common.ip }} podEvictionTimeout: "" proxyClientInfo: diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 314f068e7..caac13be3 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -1,6 +1,12 @@ --- - name: Install the base package for admin tooling yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install the base package for admin tooling + dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present + when: ansible_pkg_mgr == "dnf" register: install_result - name: Reload generated facts diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 42d984a09..29e7eb532 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,12 +1,6 @@ --- # TODO: allow for overriding default ports where possible - fail: - msg: This role requres that osn_cluster_dns_domain is set - when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain -- fail: - msg: This role requres that osn_cluster_dns_ip is set - when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip -- fail: msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] @@ -20,6 +14,7 @@ hostname: "{{ openshift_hostname | default(none) }}" public_hostname: "{{ openshift_public_hostname | default(none) }}" deployment_type: "{{ openshift_deployment_type }}" + dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip | default(None, true), true) }}" - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" @@ -40,12 +35,23 @@ # problems because the rpms don't pin the version properly. - name: Install Node package yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present + when: ansible_pkg_mgr == "yum" + register: node_install_result + +- name: Install Node package + dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present + when: ansible_pkg_mgr == "dnf" register: node_install_result - name: Install sdn-ovs package yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present register: sdn_install_result - when: openshift.common.use_openshift_sdn + when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn + +- name: Install sdn-ovs package + dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present + register: sdn_install_result + when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index b6936618a..b5146dcac 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -3,3 +3,10 @@ yum: pkg: ceph-common state: installed + when: ansible_pkg_mgr == "yum" + +- name: Install Ceph storage plugin dependencies + dnf: + pkg: ceph-common + state: installed + when: ansible_pkg_mgr == "dnf" diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index decf4f49d..a357023e1 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -3,6 +3,13 @@ yum: pkg: glusterfs-fuse state: installed + when: ansible_pkg_mgr == "yum" + +- name: Install GlusterFS storage plugin dependencies + dnf: + pkg: glusterfs-fuse + state: installed + when: ansible_pkg_mgr == "dnf" - name: Set sebooleans to allow gluster storage plugin access from containers seboolean: diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 41a303dee..23bd81f91 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -1,7 +1,9 @@ allowDisabledDocker: false apiVersion: v1 -dnsDomain: {{ osn_cluster_dns_domain }} -dnsIP: {{ osn_cluster_dns_ip }} +dnsDomain: {{ openshift.common.dns_domain }} +{% if 'dns_ip' in openshift.common %} +dnsIP: {{ openshift.common.dns_ip }} +{% endif %} dockerConfig: execHandlerName: "" iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}" diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo new file mode 100644 index 000000000..bc0435d82 --- /dev/null +++ b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo @@ -0,0 +1,8 @@ +[maxamillion-fedora-openshift] +name=Copr repo for fedora-openshift owned by maxamillion +baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/ +skip_if_unavailable=True +gpgcheck=1 +gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg +enabled=1 +enabled_metadata=1
\ No newline at end of file diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml index 26558a455..fed4ab2f0 100644 --- a/roles/openshift_repos/handlers/main.yml +++ b/roles/openshift_repos/handlers/main.yml @@ -1,3 +1,6 @@ --- -- name: refresh package cache +- name: refresh yum cache command: yum clean all + +- name: refresh dnf cache + command: dnf clean all diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 66be0cb7b..c55b5df89 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -14,38 +14,64 @@ yum: pkg: libselinux-python state: present + when: ansible_pkg_mgr == "yum" + +- name: Ensure libselinux-python is installed + dnf: + pkg: libselinux-python + state: present + when: ansible_pkg_mgr == "dnf" - name: Create any additional repos that are defined template: src: yum_repo.j2 dest: /etc/yum.repos.d/openshift_additional.repo when: openshift_additional_repos | length > 0 - notify: refresh package cache + notify: refresh yum cache - name: Remove the additional repos if no longer defined file: dest: /etc/yum.repos.d/openshift_additional.repo state: absent when: openshift_additional_repos | length == 0 - notify: refresh package cache + notify: refresh yum cache -- name: Remove any yum repo files for other deployment types +- name: Remove any yum repo files for other deployment types RHEL/CentOS file: path: "/etc/yum.repos.d/{{ item | basename }}" state: absent with_fileglob: - '*/repos/*' - when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) - notify: refresh package cache + when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and + (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") + notify: refresh yum cache + +- name: Remove any yum repo files for other deployment types Fedora + file: + path: "/etc/yum.repos.d/{{ item | basename }}" + state: absent + with_fileglob: + - '*/repos/*' + when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and + (ansible_distribution == "Fedora") + notify: refresh dnf cache - name: Configure gpg keys if needed copy: src={{ item }} dest=/etc/pki/rpm-gpg/ with_fileglob: - "{{ openshift_deployment_type }}/gpg_keys/*" - notify: refresh package cache + notify: refresh yum cache -- name: Configure yum repositories +- name: Configure yum repositories RHEL/CentOS copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - "{{ openshift_deployment_type }}/repos/*" - notify: refresh package cache + notify: refresh yum cache + when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") + +- name: Configure yum repositories Fedora + copy: src={{ item }} dest=/etc/yum.repos.d/ + with_fileglob: + - "fedora-{{ openshift_deployment_type }}/repos/*" + notify: refresh dnf cache + when: (ansible_distribution == "Fedora") diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index 65ae069df..bf23dfe98 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,6 +1,11 @@ --- - name: Install NFS server yum: name=nfs-utils state=present + when: ansible_pkg_mgr == "yum" + +- name: Install NFS server + dnf: name=nfs-utils state=present + when: ansible_pkg_mgr == "dnf" - name: Start rpcbind service: name=rpcbind state=started enabled=yes diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml index 96b12ad5b..29599559c 100644 --- a/roles/os_env_extras/tasks/main.yaml +++ b/roles/os_env_extras/tasks/main.yaml @@ -15,3 +15,10 @@ yum: pkg: bash-completion state: installed + when: ansible_pkg_mgr == "yum" + +- name: Bash Completion + dnf: + pkg: bash-completion + state: installed + when: ansible_pkg_mgr == "dnf" diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 5089eb3e0..cf2a2c733 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -3,6 +3,14 @@ yum: name: firewalld state: present + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install firewalld packages + dnf: + name: firewalld + state: present + when: ansible_pkg_mgr == "dnf" register: install_result - name: Check if iptables-services is installed diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 9af9d8d29..36d51504c 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -6,6 +6,17 @@ with_items: - iptables - iptables-services + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install iptables packages + dnf: + name: "{{ item }}" + state: present + with_items: + - iptables + - iptables-services + when: ansible_pkg_mgr == "dnf" register: install_result - name: Check if firewalld is installed diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml index 4a2c3d47a..40eec8d35 100644 --- a/roles/os_update_latest/tasks/main.yml +++ b/roles/os_update_latest/tasks/main.yml @@ -1,3 +1,8 @@ --- - name: Update all packages yum: name=* state=latest + when: ansible_pkg_mgr == "yum" + +- name: Update all packages + dnf: name=* state=latest + when: ansible_pkg_mgr == "dnf" diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 8236cf135..514d6fd24 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -7,6 +7,12 @@ g_template_openshift_master: - Openshift Master key: create_app + - key: openshift.master.registry.healthz + description: "Shows the health status of the cluster's docker registry" + type: int + applications: + - Openshift Master + - key: openshift.master.process.count description: Shows number of master processes running type: int @@ -62,6 +68,36 @@ g_template_openshift_master: applications: - Openshift Master + - key: openshift.master.pv.total.count + description: Total number of Persistent Volumes in the Openshift Cluster + type: int + applications: + - Openshift Master + + - key: openshift.master.pv.available.count + description: Total number of Available Persistent Volumes in the Openshift Cluster + type: int + applications: + - Openshift Master + + - key: openshift.master.pv.released.count + description: Total number of Released Persistent Volumes in the Openshift Cluster + type: int + applications: + - Openshift Master + + - key: openshift.master.pv.bound.count + description: Total number of Bound Persistent Volumes in the Openshift Cluster + type: int + applications: + - Openshift Master + + - key: openshift.master.pv.failed.count + description: Total number of Failed Persistent Volumes in the Openshift Cluster + type: int + applications: + - Openshift Master + - key: openshift.master.etcd.create.success description: Show number of successful create actions type: int @@ -195,26 +231,6 @@ g_template_openshift_master: - Openshift Master Metrics ztriggers: - - name: 'Application creation has failed on {HOST.NAME}' - expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' - priority: avg - - - name: 'Openshift Master API health check is failing on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' - priority: high - - - name: 'Openshift Master API PING check is failing on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' - priority: high - - - name: 'Openshift Master metric PING check is failing on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' - priority: avg - - name: 'Openshift Master process not running on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' @@ -225,6 +241,16 @@ g_template_openshift_master: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' priority: high + - name: 'Low number of etcd watchers on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' + priority: avg + + - name: 'Etcd ping failed on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' + priority: high + - name: 'Number of users for Openshift Master on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.user.count.last()}=0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' @@ -235,14 +261,40 @@ g_template_openshift_master: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' priority: info - - name: 'Low number of etcd watchers on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' + # Put triggers that depend on other triggers here (deps must be created first) + - name: 'Application creation has failed on {HOST.NAME}' + expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' priority: avg - - name: 'Etcd ping failed on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' + - name: 'Openshift Master API health check is failing on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: high + + - name: 'Openshift Master API PING check is failing on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: high + + - name: 'Openshift Master metric PING check is failing on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: avg + + - name: 'Docker Registry check failed on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.registry.healthz.max(#2)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' priority: high zgraphs: diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml index 79d52ef9b..c6e557f12 100644 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -258,26 +258,34 @@ g_template_os_linux: - Network ztriggerprototypes: - - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}' - expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc' - priority: warn - - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}' expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc' priority: high - - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}' - expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90' + # This has a dependency on the previous trigger + # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0 + - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}' + expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc' priority: warn + dependencies: + - 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}' - name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}' expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc' priority: high + # This has a dependency on the previous trigger + # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0 + - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}' + expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc' + priority: warn + dependencies: + - 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}' + ztriggers: - name: 'Too many TOTAL processes on {HOST.NAME}' expression: '{Template OS Linux:proc.nprocs.last()}>5000' @@ -304,15 +312,3 @@ g_template_os_linux: description: 'CPU is less than 10% idle' dependencies: - 'CPU idle less than 5% on {HOST.NAME}' - - zgraphprototypes: - - name: Network Interface Usage - width: 1000 - height: 400 - graph_items: - - item_name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}" - item_type: prototype - color: red - - item_name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}" - item_type: prototype - color: blue diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md index 51ecd5d34..908ab4972 100644 --- a/roles/yum_repos/README.md +++ b/roles/yum_repos/README.md @@ -6,7 +6,7 @@ This role allows easy deployment of yum repository config files. Requirements ------------ -Yum +Yum or dnf Role Variables -------------- |