summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_AWS.md7
-rw-r--r--README_openstack.md7
-rw-r--r--filter_plugins/oo_filters.py23
-rw-r--r--inventory/byo/hosts.origin.example14
-rw-r--r--inventory/byo/hosts.ose.example33
-rw-r--r--openshift-ansible.spec24
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml10
-rw-r--r--playbooks/common/openshift-cluster/config.yml22
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml6
-rw-r--r--playbooks/common/openshift-master/config.yml6
-rw-r--r--roles/flannel/README.md18
-rw-r--r--roles/flannel/defaults/main.yaml7
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml38
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py16
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml3
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j21
-rw-r--r--roles/openshift_metrics/handlers/main.yml31
-rw-r--r--roles/openshift_metrics/tasks/install.yml114
-rw-r--r--roles/openshift_metrics/tasks/main.yaml126
-rw-r--r--roles/openshift_metrics/vars/main.yaml15
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml8
-rw-r--r--roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep0
-rw-r--r--roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep0
-rw-r--r--roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo8
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo10
-rw-r--r--roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo11
-rw-r--r--roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep0
-rw-r--r--roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep0
-rw-r--r--roles/openshift_repos/files/removed/repos/epel7-openshift.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo7
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo0
-rw-r--r--roles/openshift_repos/tasks/main.yaml60
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml22
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j21
-rw-r--r--utils/.gitignore2
-rw-r--r--utils/Makefile89
-rw-r--r--utils/README.md41
-rw-r--r--utils/README.txt24
-rw-r--r--utils/requirements.txt1
-rw-r--r--utils/src/ooinstall/__init__.py4
-rw-r--r--utils/src/ooinstall/cli_installer.py27
-rw-r--r--utils/src/ooinstall/oo_config.py85
-rw-r--r--utils/src/ooinstall/openshift_ansible.py10
-rw-r--r--utils/src/ooinstall/variants.py3
48 files changed, 700 insertions, 244 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 8cbc56bc0..31f25b762 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.3.14-1 ./
+3.3.15-1 ./
diff --git a/README_AWS.md b/README_AWS.md
index f3f98fed5..cccb122f6 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -135,7 +135,12 @@ Install Dependencies
--------------------
1. Ansible requires python-boto for aws operations:
-RHEL/CentOS/Fedora
+Fedora
+```
+ dnf install -y ansible python-boto pyOpenSSL
+```
+
+RHEL/CentOS
```
yum install -y ansible python-boto pyOpenSSL
```
diff --git a/README_openstack.md b/README_openstack.md
index 7a6b24145..1998a5878 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -17,7 +17,12 @@ Install Dependencies
* `python-neutronclient`
* `python-heatclient`
-On RHEL / CentOS / Fedora:
+On Fedora:
+```
+ dnf install -y ansible python-novaclient python-neutronclient python-heatclient
+```
+
+On RHEL / CentOS:
```
yum install -y ansible python-novaclient python-neutronclient python-heatclient
```
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b57bc0afa..a4dceb679 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -17,6 +17,7 @@ import re
import json
import yaml
from ansible.utils.unicode import to_unicode
+from urlparse import urlparse
# Disabling too-many-public-methods, since filter methods are necessarily
# public
@@ -709,7 +710,7 @@ class FilterModule(object):
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
- elif kind != 'object':
+ elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
component)
@@ -733,7 +734,8 @@ class FilterModule(object):
if 'storage' in hostvars['openshift']['hosted'][component]:
kind = hostvars['openshift']['hosted'][component]['storage']['kind']
create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv']
- if kind != None and create_pv:
+ create_pvc = hostvars['openshift']['hosted'][component]['storage']['create_pvc']
+ if kind != None and create_pv and create_pvc:
volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name']
size = hostvars['openshift']['hosted'][component]['storage']['volume']['size']
access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes']
@@ -829,6 +831,22 @@ class FilterModule(object):
return version
+ @staticmethod
+ def oo_hostname_from_url(url):
+ """ Returns the hostname contained in a URL
+
+ Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
+ """
+ if not isinstance(url, basestring):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ parse_result = urlparse(url)
+ if parse_result.netloc != '':
+ return parse_result.netloc
+ else:
+ # netloc wasn't parsed, assume url was missing scheme and path
+ return parse_result.path
+
+
def filters(self):
""" returns a mapping of filters to methods """
return {
@@ -859,5 +877,6 @@ class FilterModule(object):
"oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars,
"oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version,
"oo_merge_dicts": self.oo_merge_dicts,
+ "oo_hostname_from_url": self.oo_hostname_from_url,
"oo_merge_hostvars": self.oo_merge_hostvars,
}
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 8e7883f3b..d7db63383 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -109,10 +109,6 @@ openshift_release=v1.2
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-# Origin Fedora copr repo
-# Use this if you are installing on Fedora
-#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}]
-
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
@@ -343,6 +339,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
+
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 0d358146c..cdcbae723 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -325,6 +325,30 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_pullthrough=true
+# Metrics Storage Options
+#
+# NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_host=nfs.example.com
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -339,6 +363,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_portal_net=172.30.0.0/16
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 0b8a644fc..5144c4920 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.3.14
+Version: 3.3.15
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -221,6 +221,28 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Aug 24 2016 Scott Dodson <sdodson@redhat.com> 3.3.15-1
+- simplify repo configuration (jdetiber@redhat.com)
+- don't set virt_sandbox_use_nfs on Fedora, it was replaced by virt_use_nfs
+ (maxamillion@fedoraproject.org)
+- Correct flannel cert variables. (abutcher@redhat.com)
+- Make note about ansible/install logs messing up ci tests
+ (tbielawa@redhat.com)
+- remove fedora origin copr (it's in mainline fedora now), some dnf/yum clean
+ up (maxamillion@fedoraproject.org)
+- Move nested print_read_config_error function into it's own function
+ (tbielawa@redhat.com)
+- Makefile includes ci-pyflakes target now (tbielawa@redhat.com)
+- Fix BZ1368296 by quietly recollecting facts if the cache is removed
+ (tbielawa@redhat.com)
+- Correct masterCA config typo. (abutcher@redhat.com)
+- don't gather facts when bootstrapping ansible for Fedora hosts
+ (maxamillion@fedoraproject.org)
+- a-o-i: Add variant and variant_version to migration (smunilla@redhat.com)
+- Fix upgrade failure when master-config does not have pluginOrderOverride.
+ (dgoodwin@redhat.com)
+- Add externalIPNetworkCIDRs to config (smunilla@redhat.com)
+
* Tue Aug 23 2016 Scott Dodson <sdodson@redhat.com> 3.3.14-1
- a-o-i: Fix ansible_ssh_user question (smunilla@redhat.com)
- Don't run node config upgrade hook if host is not a node.
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 0a931fbe0..389ba3419 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -5,6 +5,8 @@
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- add_host:
@@ -14,6 +16,8 @@
- hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index e9fb3de96..ebbd45a67 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -1,11 +1,3 @@
-- name: Configure flannel
- hosts: oo_first_master
- vars:
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- roles:
- - role: flannel_register
- when: openshift.common.use_flannel | bool
-
- name: Additional master configuration
hosts: oo_first_master
vars:
@@ -26,5 +18,3 @@
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
- role: flannel_register
when: openshift.common.use_flannel | bool
-
-
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 5cf5df08e..d6a99fcda 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,14 +1,22 @@
---
- include: evaluate_groups.yml
+ tags:
+ - always
- include: initialize_facts.yml
+ tags:
+ - always
- include: validate_hostnames.yml
+ tags:
+ - node
- include: initialize_openshift_version.yml
- name: Set oo_options
hosts: oo_all_hosts
+ tags:
+ - always
tasks:
- set_fact:
openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
@@ -30,15 +38,29 @@
when: openshift_docker_log_options is not defined
- include: ../openshift-etcd/config.yml
+ tags:
+ - etcd
- include: ../openshift-nfs/config.yml
+ tags:
+ - nfs
- include: ../openshift-loadbalancer/config.yml
+ tags:
+ - loadbalancer
- include: ../openshift-master/config.yml
+ tags:
+ - master
- include: additional_config.yml
+ tags:
+ - master
- include: ../openshift-node/config.yml
+ tags:
+ - node
- include: openshift_hosted.yml
+ tags:
+ - hosted
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 4d4a09828..c90c85cbd 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,5 +1,7 @@
- name: Create persistent volumes
hosts: oo_first_master
+ tags:
+ - hosted
vars:
persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
@@ -9,6 +11,8 @@
- name: Create Hosted Resources
hosts: oo_first_master
+ tags:
+ - hosted
pre_tasks:
- set_fact:
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
@@ -36,6 +40,6 @@
openshift_serviceaccounts_sccs:
- privileged
when: not openshift.common.version_gte_3_2_or_1_2
+ - role: openshift_hosted
- role: openshift_metrics
when: openshift.hosted.metrics.deploy | bool
- - role: openshift_hosted
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1d818eea0..7f60cd9e4 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -48,6 +48,12 @@
- set_fact:
openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
when: openshift_hosted_metrics_resolution is not defined
+ - set_fact:
+ openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}"
+ when: openshift_hosted_metrics_deployer_prefix is not defined
+ - set_fact:
+ openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
+ when: openshift_hosted_metrics_deployer_prefix is not defined
roles:
- openshift_facts
post_tasks:
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index 8f271aada..84e2c5c49 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -13,15 +13,15 @@ to 0.3.
Role Variables
--------------
-| Name | Default value | Description |
-|---------------------|-----------------------------------------|-----------------------------------------------|
-| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
-| flannel_etcd_key | /openshift.com/network | etcd prefix |
-| etcd_hosts | etcd_urls | a list of etcd endpoints |
-| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
-| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
-| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
-| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
+| Name | Default value | Description |
+|----------------------|-----------------------------------------|-----------------------------------------------|
+| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_cert_config_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
+| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
Dependencies
------------
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 34cebda9c..988731ef2 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -2,7 +2,6 @@
flannel_interface: "{{ ansible_default_ipv4.interface }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
-etcd_conf_dir: "{{ openshift.common.config_base }}/node"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key"
+etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
+etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
+etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
index 032f94a18..afd47ec7c 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -34,9 +34,11 @@ objects:
metadata:
generateName: metrics-deployer-
spec:
+ securityContext: {}
containers:
- image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
name: deployer
+ securityContext: {}
volumeMounts:
- name: secret
mountPath: /secret
@@ -48,6 +50,10 @@ objects:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
- name: IMAGE_PREFIX
value: ${IMAGE_PREFIX}
- name: IMAGE_VERSION
@@ -58,8 +64,12 @@ objects:
value: ${MODE}
- name: REDEPLOY
value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
- name: USE_PERSISTENT_STORAGE
value: ${USE_PERSISTENT_STORAGE}
+ - name: DYNAMICALLY_PROVISION_STORAGE
+ value: ${DYNAMICALLY_PROVISION_STORAGE}
- name: HAWKULAR_METRICS_HOSTNAME
value: ${HAWKULAR_METRICS_HOSTNAME}
- name: CASSANDRA_NODES
@@ -68,6 +78,10 @@ objects:
value: ${CASSANDRA_PV_SIZE}
- name: METRIC_DURATION
value: ${METRIC_DURATION}
+ - name: USER_WRITE_ACCESS
+ value: ${USER_WRITE_ACCESS}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
- name: METRIC_RESOLUTION
value: ${METRIC_RESOLUTION}
dnsPolicy: ClusterFirst
@@ -87,7 +101,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.2.1"
+ value: "3.3.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -97,7 +111,7 @@ parameters:
name: HAWKULAR_METRICS_HOSTNAME
required: true
-
- description: "Can be set to: 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process)"
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
-
@@ -105,10 +119,18 @@ parameters:
name: REDEPLOY
value: "false"
-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
description: "Set to true for persistent storage, set to false to use non persistent storage"
name: USE_PERSISTENT_STORAGE
value: "true"
-
+ description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
+ name: DYNAMICALLY_PROVISION_STORAGE
+ value: "false"
+-
description: "The number of Cassandra Nodes to deploy for the initial cluster"
name: CASSANDRA_NODES
value: "1"
@@ -121,6 +143,14 @@ parameters:
name: METRIC_DURATION
value: "7"
-
- description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ description: "If a user accounts should be allowed to write metrics."
+ name: USER_WRITE_ACCESS
+ value: "false"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
name: METRIC_RESOLUTION
- value: "10s"
+ value: "15s"
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
index ab62ae76f..ac5098c8a 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
@@ -151,6 +151,6 @@ parameters:
name: HEAPSTER_NODE_ID
value: "nodename"
-
- description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
name: METRIC_RESOLUTION
- value: "10s"
+ value: "15s"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 6fab996b2..ff4d9c946 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -477,6 +477,14 @@ def set_selectors(facts):
facts['hosted']['registry'] = {}
if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
facts['hosted']['registry']['selector'] = selector
+ if 'metrics' not in facts['hosted']:
+ facts['hosted']['metrics'] = {}
+ if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
+ facts['hosted']['metrics']['selector'] = None
+ if 'logging' not in facts['hosted']:
+ facts['hosted']['logging'] = {}
+ if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
+ facts['hosted']['logging']['selector'] = None
return facts
@@ -1791,8 +1799,9 @@ class OpenShiftFacts(object):
filesystem='ext4',
volumeID='123'),
host=None,
- access_modes=['ReadWriteMany'],
- create_pv=True
+ access_modes=['ReadWriteOnce'],
+ create_pv=True,
+ create_pvc=False
)
),
registry=dict(
@@ -1807,7 +1816,8 @@ class OpenShiftFacts(object):
options='*(rw,root_squash)'),
host=None,
access_modes=['ReadWriteMany'],
- create_pv=True
+ create_pv=True,
+ create_pvc=True
)
),
router=dict()
diff --git a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
index 6bf859e82..60eefd71a 100644
--- a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
@@ -5,10 +5,11 @@
- name: Determine if volume is already attached to dc/docker-registry
command: "{{ openshift.common.client_binary }} get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\} --output-version=v1"
changed_when: false
+ failed_when: false
register: registry_volumes_output
- set_fact:
- volume_attached: "{{ registry_volume_claim in registry_volumes_output.stdout }}"
+ volume_attached: "{{ registry_volume_claim in (registry_volumes_output).stdout | default(['']) }}"
- name: Add volume to dc/docker-registry
command: >
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 0430fc779..ced3eb76f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -163,6 +163,7 @@ networkConfig:
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
+ externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
new file mode 100644
index 000000000..edb7369de
--- /dev/null
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -0,0 +1,31 @@
+---
+- name: restart master
+ service: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml
new file mode 100644
index 000000000..2fbb7d606
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install.yml
@@ -0,0 +1,114 @@
+---
+
+- name: Test if metrics-deployer service account exists
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace=openshift-infra
+ get serviceaccount metrics-deployer -o json
+ register: serviceaccount
+ changed_when: false
+ failed_when: false
+
+- name: Create metrics-deployer Service Account
+ shell: >
+ echo {{ metrics_deployer_sa | to_json | quote }} |
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ create -f -
+ when: serviceaccount.rc == 1
+
+- name: Test edit permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
+ register: edit_rolebindings
+ changed_when: false
+
+- name: Add edit permission to the openshift-infra project to metrics-deployer SA
+ command: >
+ {{ openshift.common.admin_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-role-to-user edit
+ system:serviceaccount:openshift-infra:metrics-deployer
+ when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
+
+- name: Test cluster-reader permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
+ register: cluster_reader_clusterrolebindings
+ changed_when: false
+
+- name: Add cluster-reader permission to the openshift-infra project to heapster SA
+ command: >
+ {{ openshift.common.admin_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-cluster-role-to-user cluster-reader
+ system:serviceaccount:openshift-infra:heapster
+ when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout"
+
+- name: Create metrics-deployer secret
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ secrets new metrics-deployer nothing=/dev/null
+ register: metrics_deployer_secret
+ changed_when: metrics_deployer_secret.rc == 0
+ failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+
+# TODO: extend this to allow user passed in certs or generating cert with
+# OpenShift CA
+- name: Build metrics deployer command
+ set_fact:
+ deployer_cmd: "{{ openshift.common.client_binary }} process -f \
+ {{ metrics_template_dir }}/metrics-deployer.yaml -v \
+ HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \
+ | {{ openshift.common.client_binary }} --namespace openshift-infra \
+ --config={{ openshift_metrics_kubeconfig }} \
+ create -f -"
+
+- name: Deploy Metrics
+ shell: "{{ deployer_cmd }}"
+ register: deploy_metrics
+ failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0"
+ changed_when: deploy_metrics.rc == 0
+
+- set_fact:
+ deployer_pod: "{{ deploy_metrics.stdout[1:2] }}"
+
+# TODO: re-enable this once the metrics deployer validation issue is fixed
+# when using dynamically provisioned volumes
+- name: "Wait for image pull and deployer pod"
+ shell: >
+ {{ openshift.common.client_binary }}
+ --namespace openshift-infra
+ --config={{ openshift_metrics_kubeconfig }}
+ get {{ deploy_metrics.stdout }}
+ register: deploy_result
+ until: "{{ 'Completed' in deploy_result.stdout }}"
+ failed_when: "{{ 'Completed' not in deploy_result.stdout }}"
+ retries: 60
+ delay: 10
+
+- name: Configure master for metrics
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: assetConfig.metricsPublicURL
+ yaml_value: "https://{{ metrics_hostname }}/hawkular/metrics"
+ notify: restart master
+
+- name: Store metrics public_url
+ openshift_facts:
+ role: master
+ local_facts:
+ metrics_public_url: "https://{{ metrics_hostname }}/hawkular/metrics"
+ when: deploy_result | changed
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 43b85204a..8a6712468 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,64 +1,88 @@
---
-- name: Copy Configuration to temporary conf
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{hawkular_tmp_conf}}
- changed_when: false
+- fail:
+ msg: This role required openshift_master_default_subdomain or openshift_master_metrics_url be set
+ when: openshift.master.metrics_public_url | default(openshift_master_metrics_public_url | default(openshift.master.default_subdomain | default(openshift_master_default_subdomain | default(none)))) is none
-- name: Create metrics-deployer Service Account
- shell: >
- echo {{ deployer_service_account | to_json | quote }} |
- {{ openshift.common.client_binary }} create
- -n openshift-infra
- --config={{hawkular_tmp_conf}}
- -f -
- register: deployer_create_service_account
- failed_when: "'already exists' not in deployer_create_service_account.stderr and deployer_create_service_account.rc != 0"
- changed_when: deployer_create_service_account.rc == 0
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
-- name: Create metrics-deployer Secret
- command: >
- {{ openshift.common.client_binary }}
- secrets new metrics-deployer
- nothing=/dev/null
- --config={{hawkular_tmp_conf}}
- -n openshift-infra
- register: deployer_create_secret
- failed_when: "'already exists' not in deployer_create_secret.stderr and deployer_create_secret.rc !=0"
- changed_when: deployer_create_secret.rc == 0
+- name: Record kubeconfig tmp dir
+ set_fact:
+ openshift_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-- name: Configure role/user permissions
+- name: Copy the admin client config(s)
command: >
- {{ openshift.common.admin_binary }} {{item}}
- --config={{hawkular_tmp_conf}}
- with_items: "{{hawkular_permission_oc_commands}}"
- register: hawkular_perm_task
- failed_when: "'already exists' not in hawkular_perm_task.stderr and hawkular_perm_task.rc != 0"
- changed_when: hawkular_perm_task.rc == 0
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_metrics_kubeconfig }}
+ changed_when: False
+
+- name: Set hosted metrics facts
+ openshift_facts:
+ role: hosted
+ openshift_env: "{{ hostvars
+ | oo_merge_hostvars(vars, inventory_hostname)
+ | oo_openshift_env }}"
+ openshift_env_structures:
+ - 'openshift.hosted.metrics.*'
+
+- set_fact:
+ # Prefer the master facts over bare variables if present, prefer
+ # metrics_public_url over creating a default using default_subdomain
+ metrics_hostname: "{{ openshift.hosted.metrics.public_url
+ | default('hawkular-metrics.' ~ (openshift.master.default_subdomain
+ | default(openshift_master_default_subdomain )))
+ | oo_hostname_from_url }}"
+ metrics_persistence: True
+ #"{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
+ metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
+ metrics_template_dir: "/usr/share/openshift/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+ cassandra_nodes: "{{ ',CASSANDRA_NODES=' ~ openshift.hosted.metrics.cassandra_nodes if 'cassandra' in openshift.hosted.metrics else '' }}"
+ cassandra_pv_size: "{{ ',CASSANDRA_PV_SIZE=' ~ openshift.hosted.metrics.storage_volume_size if openshift.hosted.metrics.storage_volume_size | default(none) is not none else '' }}"
+ image_prefix: "{{ ',IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer_prefix if 'deployer_prefix' in openshift.hosted.metrics else '' }}"
+ image_version: "{{ ',IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer_version if 'deployer_version' in openshift.hosted.metrics else '' }}"
-- name: Check openshift_master_default_subdomain
- fail:
- msg: "Default subdomain should be defined"
- when: openshift.master.default_subdomain is not defined
-- name: Create Heapster and Hawkular/Cassandra Services
+- name: Check for existing metrics pods
shell: >
- {{ openshift.common.client_binary }} process -f \
- /usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }},USE_PERSISTENT_STORAGE={{ hawkular_persistence }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} \
- | {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
- register: oex_heapster_services
- failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get pods -l {{ item }} | grep -q Running
+ register: metrics_pods_status
+ with_items:
+ - metrics-infra=hawkular-metrics
+ - metrics-infra=heapster
+ - metrics-infra=hawkular-cassandra
+ failed_when: false
changed_when: false
-- name: Clean temporary config file
- command: >
- rm -rf {{hawkular_tmp_conf}}
+- name: Check for previous deployer
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
+ register: metrics_deployer_status
+ failed_when: false
changed_when: false
-- name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} get pods -n openshift-infra | grep metrics-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 60
- delay: 10
+- name: Record current deployment status
+ set_fact:
+ greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
+ failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
+ metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
+
+- name: Set deployment mode
+ set_fact:
+ deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
+
+# TODO: handle non greenfield deployments in the future
+- include: install.yml
+ when: greenfield
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 82d9d29f7..0331bcb89 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -2,13 +2,13 @@ hawkular_permission_oc_commands:
- policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-deployer_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
+metrics_deployer_sa:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
@@ -17,3 +17,4 @@ hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}tru
hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index e384c1bd7..22b539d16 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -17,16 +17,16 @@
persistent: yes
when: ansible_selinux and ansible_selinux.status == "enabled" and virt_use_nfs_output.rc == 0
-- name: Check for existence of virt_sandbox_use_nfs seboolean
+- name: Check for existence of virt_sandbox_use_nfs seboolean (RHEL)
command: getsebool virt_sandbox_use_nfs
register: virt_sandbox_use_nfs_output
- when: ansible_selinux and ansible_selinux.status == "enabled"
+ when: ansible_distribution != "Fedora" and ansible_selinux and ansible_selinux.status == "enabled"
failed_when: false
changed_when: false
-- name: Set seboolean to allow nfs storage plugin access from containers(sandbox)
+- name: Set seboolean to allow nfs storage plugin access from containers(sandbox) (RHEL)
seboolean:
name: virt_sandbox_use_nfs
state: yes
persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0
+ when: ansible_distribution != "Fedora" and ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0
diff --git a/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep b/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep b/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
deleted file mode 100644
index bc0435d82..000000000
--- a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
+++ /dev/null
@@ -1,8 +0,0 @@
-[maxamillion-fedora-openshift]
-name=Copr repo for fedora-openshift owned by maxamillion
-baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
-enabled=1
-enabled_metadata=1 \ No newline at end of file
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
deleted file mode 100644
index 92bd35834..000000000
--- a/roles/openshift_repos/files/online/repos/enterprise-v3.repo
+++ /dev/null
@@ -1,10 +0,0 @@
-[enterprise-v3]
-name=OpenShift Enterprise
-baseurl=https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/
- https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/
-enabled=1
-gpgcheck=0
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
deleted file mode 100644
index b4215679f..000000000
--- a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
+++ /dev/null
@@ -1,11 +0,0 @@
-[rhel-7-libra-candidate]
-name=rhel-7-libra-candidate - \$basearch
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
- https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
-gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-sslverify=False
diff --git a/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep b/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep b/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
+++ /dev/null
diff --git a/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo
deleted file mode 100644
index 0b21e0a65..000000000
--- a/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo
+++ /dev/null
@@ -1,7 +0,0 @@
-[maxamillion-origin-next]
-name=Copr repo for origin-next owned by maxamillion
-baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg
-enabled=1
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
+++ /dev/null
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
+++ /dev/null
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 07a8d28fd..9be168611 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -29,62 +29,20 @@
when: openshift_additional_repos | length == 0 and not openshift.common.is_containerized | bool
notify: refresh cache
-- name: Remove any yum repo files for other deployment types RHEL/CentOS
- file:
- path: "/etc/yum.repos.d/{{ item | basename }}"
- state: absent
- with_fileglob:
- - "fedora-openshift-enterprise/repos/*"
- - "fedora-origin/repos/*"
- - "online/repos/*"
- - "openshift-enterprise/repos/*"
- - "origin/repos/*"
- - "removed/repos/*"
- when: not openshift.common.is_containerized | bool
- and not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
- notify: refresh cache
-
-- name: Remove any yum repo files for other deployment types Fedora
- file:
- path: "{{ item | basename }}"
- state: absent
- with_fileglob:
- - "fedora-openshift-enterprise/repos/*"
- - "fedora-origin/repos/*"
- - "online/repos/*"
- - "openshift-enterprise/repos/*"
- - "origin/repos/*"
- - "removed/repos/*"
- when: not openshift.common.is_containerized | bool
- and not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos"))
- and (ansible_distribution == "Fedora")
- notify: refresh cache
-
-- name: Configure gpg keys if needed
+- name: Configure origin gpg keys if needed
copy:
- src: "{{ item }}"
+ src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
dest: /etc/pki/rpm-gpg/
- with_fileglob:
- - "{{ openshift_deployment_type }}/gpg_keys/*"
- notify: refresh cache
- when: not openshift.common.is_containerized | bool
-
-- name: Configure yum repositories RHEL/CentOS
- copy:
- src: "{{ item }}"
- dest: /etc/yum.repos.d/
- with_fileglob:
- - "{{ openshift_deployment_type }}/repos/*"
notify: refresh cache
- when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+ when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
+ and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
-- name: Configure yum repositories Fedora
+- name: Configure origin yum repositories RHEL/CentOS
copy:
- src: "{{ item }}"
+ src: origin/repos/openshift-ansible-centos-paas-sig.repo
dest: /etc/yum.repos.d/
- with_fileglob:
- - "fedora-{{ openshift_deployment_type }}/repos/*"
notify: refresh cache
- when: (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool
+ when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
+ and openshift_deployment_type == 'origin'
+ and not openshift.common.is_containerized | bool
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index fe7f83cbb..08d0b8540 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -20,21 +20,37 @@
- name: Ensure export directories exist
file:
- path: "{{ openshift.hosted.registry.storage.nfs.directory }}/{{ item }}"
+ path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}"
state: directory
mode: 0777
owner: nfsnobody
group: nfsnobody
with_items:
- - "{{ openshift.hosted.registry.storage.volume.name }}"
+ - "{{ openshift.hosted.registry }}"
+ - "{{ openshift.hosted.metrics }}"
- name: Configure exports
template:
- dest: /etc/exports
+ dest: /etc/exports.d/openshift-ansible.exports
src: exports.j2
notify:
- restart nfs-server
+# Now that we're putting our exports in our own file clean up the old ones
+- name: register exports
+ command: cat /etc/exports.d/openshift-ansible.exports
+ register: exports_out
+
+- name: remove exports from /etc/exports
+ lineinfile:
+ dest: /etc/exports
+ line: "{{ item }}"
+ state: absent
+ with_items: "{{ exports_out.stdout_lines | default([]) }}"
+ when: exports_out.rc == 0
+ notify:
+ - restart nfs-server
+
- name: Enable and start services
service:
name: "{{ item }}"
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index c1e1994b0..d6d936b72 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1 +1,2 @@
{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
+{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
diff --git a/utils/.gitignore b/utils/.gitignore
index 68759c0ba..7e72a43c3 100644
--- a/utils/.gitignore
+++ b/utils/.gitignore
@@ -43,3 +43,5 @@ coverage.xml
# Sphinx documentation
docs/_build/
+oo-install
+oo-installenv
diff --git a/utils/Makefile b/utils/Makefile
new file mode 100644
index 000000000..b1a3874ae
--- /dev/null
+++ b/utils/Makefile
@@ -0,0 +1,89 @@
+########################################################
+
+# Makefile for OpenShift: Atomic Quick Installer
+#
+# useful targets (not all implemented yet!):
+# make clean -- Clean up garbage
+# make ci ------------------- Execute CI steps (for travis or jenkins)
+
+########################################################
+
+# > VARIABLE = value
+#
+# Normal setting of a variable - values within it are recursively
+# expanded when the variable is USED, not when it's declared.
+#
+# > VARIABLE := value
+#
+# Setting of a variable with simple expansion of the values inside -
+# values within it are expanded at DECLARATION time.
+
+########################################################
+
+
+NAME := oo-install
+TESTPACKAGE := oo-install
+SHORTNAME := ooinstall
+
+sdist: clean
+ python setup.py sdist
+ rm -fR $(SHORTNAME).egg-info
+
+clean:
+ @find . -type f -regex ".*\.py[co]$$" -delete
+ @find . -type f \( -name "*~" -or -name "#*" \) -delete
+ @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
+ @rm -fR $(NAME)env
+
+virtualenv:
+ @echo "#############################################"
+ @echo "# Creating a virtualenv"
+ @echo "#############################################"
+ virtualenv $(NAME)env
+ . $(NAME)env/bin/activate && pip install -r requirements.txt
+ . $(NAME)env/bin/activate && pip install pep8 nose coverage mock flake8 PyYAML click
+
+# If there are any special things to install do it here
+# . $(NAME)env/bin/activate && INSTALL STUFF
+
+ci-unittests:
+ @echo "#############################################"
+ @echo "# Running Unit Tests in virtualenv"
+ @echo "#############################################"
+# . $(NAME)env/bin/activate && nosetests -v --with-cover --cover-html --cover-min-percentage=80 --cover-package=$(TESTPACKAGE) test/
+ . $(NAME)env/bin/activate && nosetests -v test/
+
+ci-pylint:
+ @echo "#############################################"
+ @echo "# Running PyLint Tests in virtualenv"
+ @echo "#############################################"
+ python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py
+
+ci-list-deps:
+ @echo "#############################################"
+ @echo "# Listing all pip deps"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && pip freeze
+
+ci-pyflakes:
+ @echo "#################################################"
+ @echo "# Running Pyflakes Compliance Tests in virtualenv"
+ @echo "#################################################"
+ . $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py
+
+ci-pep8:
+ @echo "#############################################"
+ @echo "# Running PEP8 Compliance Tests in virtualenv"
+ @echo "#############################################"
+ @echo "Skipping PEP8 tests until we clean them up"
+# . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/
+
+ci-pep8-real:
+ @echo "#############################################"
+ @echo "# Running PEP8 Compliance Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/
+
+
+ci: clean virtualenv ci-list-deps ci-pylint ci-pep8 ci-unittests ci-pyflakes
+ :
diff --git a/utils/README.md b/utils/README.md
new file mode 100644
index 000000000..2abf2705e
--- /dev/null
+++ b/utils/README.md
@@ -0,0 +1,41 @@
+# Running Tests (NEW)
+
+Run the command:
+
+ make ci
+
+to run an array of unittests locally.
+
+You will get errors if the log files already exist and can not be
+written to by the current user (`/tmp/ansible.log` and
+`/tmp/installer.txt`). *We're working on it.*
+
+# Running From Source
+
+You will need to setup a **virtualenv** to run from source:
+
+ $ virtualenv oo-install
+ $ source ./oo-install/bin/activate
+ $ virtualenv --relocatable ./oo-install/
+ $ python setup.py install
+
+The virtualenv `bin` directory should now be at the start of your
+`$PATH`, and `oo-install` is ready to use from your shell.
+
+You can exit the virtualenv with:
+
+ $ deactivate
+
+# Testing (OLD)
+
+*This section is deprecated, but still works*
+
+First, run the **virtualenv setup steps** described above.
+
+Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
+
+$ pip install mock nose
+
+Then run the tests with:
+
+$ oo-install/bin/nosetests
diff --git a/utils/README.txt b/utils/README.txt
deleted file mode 100644
index 6a6a1d24d..000000000
--- a/utils/README.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-## Running From Source
-
-You will need to setup a virtualenv to run from source and execute the unit tests.
-
-$ virtualenv oo-install
-$ source ./oo-install/bin/activate
-$ virtualenv --relocatable ./oo-install/
-$ python setup.py install
-
-The virtualenv bin directory should now be at the start of your $PATH, and oo-install is ready to use from your shell.
-
-You can exit the virtualenv with:
-
-$ deactivate
-
-## Testing
-
-Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
-
-$ pip install mock nose
-
-Then run the tests with:
-
-$ oo-install/bin/nosetests
diff --git a/utils/requirements.txt b/utils/requirements.txt
new file mode 100644
index 000000000..8b1378917
--- /dev/null
+++ b/utils/requirements.txt
@@ -0,0 +1 @@
+
diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py
index 944dea3b5..96e495e19 100644
--- a/utils/src/ooinstall/__init__.py
+++ b/utils/src/ooinstall/__init__.py
@@ -1,5 +1 @@
-# TODO: Temporarily disabled due to importing old code into openshift-ansible
-# repo. We will work on these over time.
# pylint: disable=missing-docstring
-
-from .oo_config import OOConfig
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 2b070a3d2..230891e7f 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -8,11 +8,22 @@ import sys
from distutils.version import LooseVersion
import click
from ooinstall import openshift_ansible
-from ooinstall import OOConfig
+from ooinstall.oo_config import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host, Role
from ooinstall.variants import find_variant, get_variant_version_combos
+import logging
+installer_log = logging.getLogger('installer')
+installer_log.setLevel(logging.CRITICAL)
+installer_file_handler = logging.FileHandler('/tmp/installer.txt')
+installer_file_handler.setFormatter(
+ logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+# Example output:
+# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
+installer_file_handler.setLevel(logging.DEBUG)
+installer_log.addHandler(installer_file_handler)
+
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
@@ -798,11 +809,14 @@ def set_infra_nodes(hosts):
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
+@click.option('-d', '--debug',
+ help="Enable installer debugging (/tmp/installer.log)",
+ is_flag=True, default=False)
@click.help_option('--help', '-h')
#pylint: disable=too-many-arguments
#pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose, debug):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
@@ -810,6 +824,14 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
+ if debug:
+ # DEFAULT log level threshold is set to CRITICAL (the
+ # highest), anything below that (we only use debug/warning
+ # presently) is not logged. If '-d' is given though, we'll
+ # lower the threshold to debug (almost everything gets through)
+ installer_log.setLevel(logging.DEBUG)
+ installer_log.debug("Quick Installer debugging initialized")
+
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
@@ -991,7 +1013,6 @@ def install(ctx, force, gen_inventory):
hosts_to_run_on, callback_facts = get_hosts_to_run_on(
oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
-
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
# TODO: if there are *new* nodes and this is a live install, we may need the user
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index f2990662e..0e855f437 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -5,6 +5,9 @@ import sys
import yaml
from pkg_resources import resource_filename
+import logging
+installer_log = logging.getLogger('installer')
+
CONFIG_PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_callback_facts_yaml',
@@ -25,6 +28,16 @@ DEPLOYMENT_VARIABLES_BLACKLIST = [
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
+def print_read_config_error(error, path='the configuration file'):
+ message = """
+Error loading config. {}.
+
+See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
+for information on creating a configuration file or delete {} and re-run the installer.
+"""
+ print message.format(error, path)
+
+
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
@@ -164,25 +177,20 @@ class OOConfig(object):
self._read_config()
self._set_defaults()
-
-# pylint: disable=too-many-branches
+ # pylint: disable=too-many-branches
+ # Lots of different checks ran in a single method, could
+ # use a little refactoring-love some time
def _read_config(self):
- def _print_read_config_error(error, path='the configuration file'):
- message = """
-Error loading config. {}.
-
-See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
-for information on creating a configuration file or delete {} and re-run the installer.
-"""
- print message.format(error, path)
-
+ installer_log.debug("Attempting to read the OO Config")
try:
+ installer_log.debug("Attempting to see if the provided config file exists: %s", self.config_path)
if os.path.exists(self.config_path):
+ installer_log.debug("We think the config file exists: %s", self.config_path)
with open(self.config_path, 'r') as cfgfile:
loaded_config = yaml.safe_load(cfgfile.read())
if not 'version' in loaded_config:
- _print_read_config_error('Legacy configuration file found', self.config_path)
+ print_read_config_error('Legacy configuration file found', self.config_path)
sys.exit(0)
if loaded_config.get('version', '') == 'v1':
@@ -192,14 +200,31 @@ for information on creating a configuration file or delete {} and re-run the ins
host_list = loaded_config['deployment']['hosts']
role_list = loaded_config['deployment']['roles']
except KeyError as e:
- _print_read_config_error("No such key: {}".format(e), self.config_path)
+ print_read_config_error("No such key: {}".format(e), self.config_path)
+ print "Error loading config, required key missing: {}".format(e)
sys.exit(0)
for setting in CONFIG_PERSIST_SETTINGS:
- try:
- self.settings[setting] = str(loaded_config[setting])
- except KeyError:
- continue
+ persisted_value = loaded_config.get(setting)
+ if persisted_value is not None:
+ self.settings[setting] = str(persisted_value)
+
+ # We've loaded any persisted configs, let's verify any
+ # paths which are required for a correct and complete
+ # install
+
+ # - ansible_callback_facts_yaml - Settings from a
+ # pervious run. If the file doesn't exist then we
+ # will just warn about it for now and recollect the
+ # facts.
+ if self.settings.get('ansible_callback_facts_yaml', None) is not None:
+ if not os.path.exists(self.settings['ansible_callback_facts_yaml']):
+ # Cached callback facts file does not exist
+ installer_log.warning("The specified 'ansible_callback_facts_yaml'"
+ "file does not exist (%s)",
+ self.settings['ansible_callback_facts_yaml'])
+ installer_log.debug("Remote system facts will be collected again later")
+ self.settings.pop('ansible_callback_facts_yaml')
for setting in loaded_config['deployment']:
try:
@@ -224,6 +249,8 @@ for information on creating a configuration file or delete {} and re-run the ins
except yaml.scanner.ScannerError:
raise OOConfigFileError(
'Config file "{}" is not a valid YAML document'.format(self.config_path))
+ installer_log.debug("Parsed the config file")
+
def _upgrade_v1_config(self, config):
new_config_data = {}
@@ -237,6 +264,12 @@ for information on creating a configuration file or delete {} and re-run the ins
if config.get('ansible_ssh_user', False):
new_config_data['deployment']['ansible_ssh_user'] = config['ansible_ssh_user']
+ if config.get('variant', False):
+ new_config_data['variant'] = config['variant']
+
+ if config.get('variant_version', False):
+ new_config_data['variant_version'] = config['variant_version']
+
for host in config['hosts']:
host_props = {}
host_props['roles'] = []
@@ -257,11 +290,21 @@ for information on creating a configuration file or delete {} and re-run the ins
return new_config_data
def _set_defaults(self):
+ installer_log.debug("Setting defaults, current OOConfig settings: %s", self.settings)
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir()
+
if not os.path.exists(self.settings['ansible_inventory_directory']):
+ installer_log.debug("'ansible_inventory_directory' does not exist, "
+ "creating it now (%s)",
+ self.settings['ansible_inventory_directory'])
os.makedirs(self.settings['ansible_inventory_directory'])
+ else:
+ installer_log.debug("We think this 'ansible_inventory_directory' "
+ "is OK: %s",
+ self.settings['ansible_inventory_directory'])
+
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
@@ -269,8 +312,14 @@ for information on creating a configuration file or delete {} and re-run the ins
self.settings['version'] = 'v2'
if 'ansible_callback_facts_yaml' not in self.settings:
+ installer_log.debug("No 'ansible_callback_facts_yaml' in self.settings")
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
+ installer_log.debug("Value: %s", self.settings['ansible_callback_facts_yaml'])
+ else:
+ installer_log.debug("'ansible_callback_facts_yaml' already set "
+ "in self.settings: %s",
+ self.settings['ansible_callback_facts_yaml'])
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
@@ -283,6 +332,8 @@ for information on creating a configuration file or delete {} and re-run the ins
if not self.settings[setting]:
self.settings.pop(setting)
+ installer_log.debug("Updated OOConfig settings: %s", self.settings)
+
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index ef7906828..570b48dda 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -6,6 +6,8 @@ import sys
import os
import yaml
from ooinstall.variants import find_variant
+import logging
+installer_log = logging.getLogger('installer')
CFG = None
@@ -216,17 +218,21 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
+ installer_log.debug("Inside load_system_facts")
FNULL = open(os.devnull, 'w')
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory_file),
os_facts_path])
+ installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
status = subprocess.call(args, env=env_vars, stdout=FNULL)
if not status == 0:
+ installer_log.debug("Exit status from subprocess was not 0")
return [], 1
with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
+ installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
try:
callback_facts = yaml.safe_load(callback_facts_file)
except yaml.YAMLError, exc:
@@ -239,6 +245,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
def default_facts(hosts, verbose=False):
global CFG
+ installer_log.debug("Current global CFG vars here: %s", CFG)
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
@@ -250,6 +257,9 @@ def default_facts(hosts, verbose=False):
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+
+ installer_log.debug("facts_env: %s", facts_env)
+ installer_log.debug("Going to 'load_system_facts' next")
return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index b32370cd5..ce4d772ee 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -11,6 +11,9 @@ to be specified by the user, and to point the generic variants to the latest
version.
"""
+import logging
+installer_log = logging.getLogger('installer')
+
class Version(object):
def __init__(self, name, ansible_key):