summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--inventory/byo/hosts.example9
-rw-r--r--openshift-ansible.spec48
-rw-r--r--playbooks/aws/BUILD_AMI.md21
-rw-r--r--playbooks/aws/PREREQUISITES.md40
-rw-r--r--playbooks/aws/README.md140
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml8
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml8
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini25
-rw-r--r--playbooks/aws/provisioning_vars.yml.example120
-rw-r--r--playbooks/byo/openshift-management/config.yml2
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_management.yml25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml24
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml10
-rw-r--r--playbooks/common/openshift-management/config.yml20
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml6
-rw-r--r--roles/etcd/tasks/system_container.yml1
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/openshift_aws/README.md77
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml178
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j24
-rw-r--r--roles/openshift_logging_elasticsearch/vars/default_images.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j24
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j222
-rw-r--r--roles/openshift_master/defaults/main.yml3
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml3
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j29
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml1
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/install.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml8
-rw-r--r--roles/template_service_broker/defaults/main.yml1
-rw-r--r--roles/template_service_broker/tasks/install.yml16
54 files changed, 614 insertions, 356 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index cbfb1fd48..98f63d791 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.148.0 ./
+3.7.0-0.158.0 ./
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 0b6050891..436135bcf 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -811,8 +811,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-# Enable template service broker by specifying one of more namespaces whose
-# templates will be served by the broker
+# Enable service catalog
+#openshift_enable_service_catalog=true
+
+# Enable template service broker (requires service catalog to be enabled, above)
+#template_service_broker_install=true
+
+# Configure one of more namespaces whose templates will be served by the TSB
#openshift_template_service_broker_namespaces=['openshift']
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index aee30b29b..2d3dae4b1 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.148.0%{?dist}
+Release: 0.158.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -276,6 +276,52 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Oct 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.158.0
+- Refactor openshift-management entry point (rteague@redhat.com)
+- Add switch to enable/disable container engine's audit log being stored in ES.
+ (jkarasek@redhat.com)
+
+* Mon Oct 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.157.0
+- data migration of embedded etcd not allowed (jchaloup@redhat.com)
+- GlusterFS: remove topology reference from deploy-heketi (jarrpa@redhat.com)
+
+* Mon Oct 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.156.0
+- set initial etcd cluster properly during system container scale up
+ (jchaloup@redhat.com)
+
+* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
+-
+
+* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
+-
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
+- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
+-
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
+- updated dynamic provision section for openshift metrics to support storage
+ class name (elvirkuric@gmail.com)
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.150.0
+- Ensure upgrade playbook exits on health check failures (rteague@redhat.com)
+- Ensure docker is installed for containerized load balancers
+ (mgugino@redhat.com)
+- Fix containerized node service unit placement order (mgugino@redhat.com)
+- Provisioning Documentation Updates (mgugino@redhat.com)
+
+* Thu Oct 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.149.0
+- Fix broken debug_level (mgugino@redhat.com)
+- Ensure host was reached for proper conditional validation
+ (rteague@redhat.com)
+- Ensure docker service status actually changes (mgugino@redhat.com)
+- Display warnings at the end of the control plane upgrade (sdodson@redhat.com)
+- Force reconciliation of role for 3.6 (simo@redhat.com)
+- Remove etcd health check (sdodson@redhat.com)
+- migrate embedded etcd to external etcd (jchaloup@redhat.com)
+
* Wed Oct 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.148.0
- Bug 1490647 - logging-fluentd deployed with openshift_logging_use_mux=false
fails to start due to missing (nhosoi@redhat.com)
diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md
new file mode 100644
index 000000000..468264a9a
--- /dev/null
+++ b/playbooks/aws/BUILD_AMI.md
@@ -0,0 +1,21 @@
+# Build AMI
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1. Create an instance, using a specified ssh key.
+2. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+3. Create the AMI.
+4. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+5. Terminate the instance used to configure the AMI.
+
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
+
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md
new file mode 100644
index 000000000..4f428dcc3
--- /dev/null
+++ b/playbooks/aws/PREREQUISITES.md
@@ -0,0 +1,40 @@
+# Prerequisites
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1) vpc
+2) security group to build the AMI in.
+3) ssh keys to log into instances
+
+These items can be provisioned ahead of time, or you can utilize the plays here
+to create these items.
+
+If you wish to provision these items yourself, or you already have these items
+provisioned and wish to utilize existing components, please refer to
+provisioning_vars.yml.example.
+
+If you wish to have these items created for you, continue with this document.
+
+# Running prerequisites.yml
+
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+## Step 1:
+Ensure you have specified all the necessary provisioning variables. See
+provisioning_vars.example.yml and README.md for more information.
+
+## Step 2:
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
+```
+
+This will create a VPC, security group, and ssh_key. These plays are idempotent,
+and multiple runs should result in no additional provisioning of these components.
+
+You can also verify that you will successfully utilize existing components with
+these plays.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 816cb35b4..fbab61189 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS
deploy highly scalable Openshift clusters utilizing AWS auto scale groups and
custom AMIs.
+To speed in the provisioning of medium and large clusters, openshift-node
+instances are created using a pre-built AMI. A list of pre-built AMIs will
+be available soon.
+
+If the deployer wishes to build their own AMI for provisioning, instructions
+to do so are provided here.
+
### Where do I start?
Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways:
@@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
-The newly added playbooks are the following:
-- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+#### High-level overview
+- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information.
+- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information.
- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
- install.yml - Calls the openshift-ansible installer on the newly created instances
- provision_nodes.yml - Creates the infra and compute node scale groups
@@ -41,82 +53,38 @@ The newly added playbooks are the following:
The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
+Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars
+under the appropriate groups. Most variables can exist in the 'all' group.
+
```yaml
---
-# when creating an AMI set this to True
-# when installing a cluster set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-# openshift_aws_clusterid: default
-
-# specify a region
-# openshift_aws_region: us-east-1
-
-# must specify a base_ami when building an AMI
-# openshift_aws_base_ami: # base image for AMI to build from
-# specify when using a custom AMI
-# openshift_aws_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-# openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
-# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
-# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
-
-# This is required for any ec2 instances
-# openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
+# Minimum mandatory provisioning variables. See provisioning_vars.yml.example.
+# for more information.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+openshift_release: # example: v3.7
+openshift_pkg_version: # example: -3.7.0
+openshift_aws_ssh_key_name: # example: myuser_key
+openshift_aws_base_ami: # example: ami-12345678
+openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
-
-```ini
-[OSEv3:children]
-masters
-nodes
-etcd
-
-[OSEv3:vars]
-################################################################################
-# Ensure these variables are set for bootstrap
-################################################################################
-# openshift_deployment_type is required for installation
-openshift_deployment_type=origin
+In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
+using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-# required when building an AMI. This will
-# be dependent on the version provided by the yum repository
-openshift_pkg_version=-3.6.0
-
-openshift_master_bootstrap_enabled=True
-
-openshift_hosted_router_wait=False
-openshift_hosted_registry_wait=False
-
-# Repository for installation
-openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
-
-################################################################################
-# cluster specific settings maybe be placed here
+There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-[masters]
+#### Step 0 (optional)
-[etcd]
+You may provision a VPC, Security Group, and SSH keypair to build the AMI.
-[nodes]
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
```
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+See PREREQUISITES.md for more information.
#### Step 1
@@ -126,24 +94,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with
$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create a security group.
-4. Create an instance using the key from step 2 or a specified key.
-5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
-6. Create the AMI.
-7. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-8. Terminate the instance used to configure the AMI.
-
-More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
-```
-# openshift_aws_ami_encrypt: True # defaults to false
-```
-
-**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-
#### Step 2
Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
@@ -167,16 +117,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created.
-2. Ensures a SSH key exists.
-3. Creates an s3 bucket for the registry named $clusterid-docker-registry
-4. Create master security groups.
-5. Create a master launch config.
-6. Create the master auto scaling groups.
-7. If certificates are desired for ELB, they will be uploaded.
-8. Create internal and external master ELBs.
-9. Add newly created masters to the correct groups.
-10. Set a couple of important facts for the masters.
+1. Creates an s3 bucket for the registry named $clusterid-docker-registry
+2. Create master security groups.
+3. Create a master launch config.
+4. Create the master auto scaling groups.
+5. If certificates are desired for ELB, they will be uploaded.
+6. Create internal and external master ELBs.
+7. Add newly created masters to the correct groups.
+8. Set a couple of important facts for the masters.
At this point we have successfully created the infrastructure including the master nodes.
@@ -195,13 +143,13 @@ Once this playbook completes, the cluster masters should be installed and config
#### Step 5
-Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes:
```
$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
-Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
+Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6.
#### Step 6
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 1ab1e8041..559a37cbe 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,14 +17,6 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_vpc.yml
-
-- include: provision_ssh_keypair.yml
-
-- include: provision_sec_group.yml
- vars:
- openshift_aws_node_group_type: compute
-
- include: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
new file mode 100644
index 000000000..df77fe3bc
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -0,0 +1,8 @@
+---
+- include: provision_vpc.yml
+
+- include: provision_ssh_keypair.yml
+
+- include: provision_sec_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
deleted file mode 100644
index 28eb9c993..000000000
--- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# when creating an AMI set this option to True
-# when installing the cluster, set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-#openshift_aws_clusterid: default
-
-# must specify a base_ami when building an AMI
-#openshift_aws_base_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-#openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
-#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
-
-# This is required for any ec2 instances
-#openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
new file mode 100644
index 000000000..238a7eb2f
--- /dev/null
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -0,0 +1,25 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+openshift_master_bootstrap_enabled=True
+
+openshift_hosted_router_wait=False
+openshift_hosted_registry_wait=False
+
+################################################################################
+# cluster specific settings maybe be placed here
+
+[masters]
+
+[etcd]
+
+[nodes]
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
new file mode 100644
index 000000000..aa91363ae
--- /dev/null
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -0,0 +1,120 @@
+---
+# Variables that are commented in this file are optional; uncommented variables
+# are mandatory.
+
+# Default values for each variable are provided, as applicable.
+# Example values for mandatory variables are provided as a comment at the end
+# of the line.
+
+# ------------------------ #
+# Common/Cluster Variables #
+# ------------------------ #
+# Variables in this section affect all areas of the cluster
+
+# Deployment type must be specified.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+
+# openshift_release must be specified. Use whatever version of openshift
+# that is supported by openshift-ansible that you wish.
+openshift_release: # v3.7
+
+# This will be dependent on the version provided by the yum repository
+openshift_pkg_version: # -3.7.0
+
+# specify a clusterid
+# This value is also used as the default value for many other components.
+#openshift_aws_clusterid: default
+
+# AWS region
+# This value will instruct the plays where all items should be created.
+# Multi-region deployments are not supported using these plays at this time.
+#openshift_aws_region: us-east-1
+
+#openshift_aws_create_launch_config: true
+#openshift_aws_create_scale_group: true
+
+# --- #
+# VPC #
+# --- #
+
+# openshift_aws_create_vpc defaults to true. If you don't wish to provision
+# a vpc, set this to false.
+#openshift_aws_create_vpc: true
+
+# Name of the vpc. Needs to be set if using a pre-existing vpc.
+#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
+# vpc + subnet.
+#openshift_aws_subnet_name:
+
+# -------------- #
+# Security Group #
+# -------------- #
+
+# openshift_aws_create_security_groups defaults to true. If you wish to use
+# an existing security group, set this to false.
+#openshift_aws_create_security_groups: true
+
+# openshift_aws_build_ami_group is the name of the security group to build the
+# ami in. This defaults to the value of openshift_aws_clusterid.
+#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+
+# openshift_aws_launch_config_security_groups specifies the security groups to
+# apply to the launch config. The launch config security groups will be what
+# the cluster actually is deployed in.
+#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml
+
+# openshift_aws_node_security_groups are created when
+# openshift_aws_create_security_groups is set to true.
+#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml
+
+# -------- #
+# ssh keys #
+# -------- #
+
+# Specify the key pair name here to connect to the provisioned instances. This
+# can be an existing key, or it can be one of the keys specified in
+# openshift_aws_users
+openshift_aws_ssh_key_name: # myuser_key
+
+# This will ensure these user and public keys are created.
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
+
+# When building the AMI, specify the user to ssh to the instance as.
+# openshift_aws_build_ami_ssh_user: root
+
+# --------- #
+# AMI Build #
+# --------- #
+# Variables in this section apply to building a node AMI for use in your
+# openshift cluster.
+
+# must specify a base_ami when building an AMI
+openshift_aws_base_ami: # ami-12345678
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# -- #
+# S3 #
+# -- #
+
+# Create an s3 bucket.
+#openshift_aws_create_s3: True
+
+# --- #
+# ELB #
+# --- #
+
+# openshift_aws_elb_name will be the base-name of the ELBs.
+#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}"
+
+# custom certificates are required for the ELB
+openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml
index 33a555cc1..e8795ef85 100644
--- a/playbooks/byo/openshift-management/config.yml
+++ b/playbooks/byo/openshift-management/config.yml
@@ -1,7 +1,5 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/evaluate_groups.yml
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
index ebd6fb261..a1fb1cdc4 100644
--- a/playbooks/byo/openshift-management/uninstall.yml
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -1,6 +1,4 @@
---
# - include: ../openshift-cluster/initialize_groups.yml
-# tags:
-# - always
- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index dbe09dce2..395eb51f1 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -55,7 +55,7 @@
- include: service_catalog.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: openshift_management.yml
+- include: ../openshift-management/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/common/openshift-cluster/openshift_management.yml b/playbooks/common/openshift-cluster/openshift_management.yml
deleted file mode 100644
index 6e582920b..000000000
--- a/playbooks/common/openshift-cluster/openshift_management.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Management Install Checkpoint Start
- hosts: localhost
- connection: local
- gather_facts: false
- tasks:
- - name: Set Management install 'In Progress'
- set_stats:
- data:
- installer_phase_Management: "In Progress"
- aggregate: false
-
-- name: Management
- include: ../openshift-management/config.yml
-
-- name: Management Install Checkpoint End
- hosts: localhost
- connection: local
- gather_facts: false
- tasks:
- - name: Set Management install 'Complete'
- set_stats:
- data:
- installer_phase_Management: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index ad6325ca0..2a8de50a2 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -1,12 +1,14 @@
---
-- name: Verify Host Requirements
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+ any_errors_fatal: true
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: upgrade
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
args:
checks:
- disk_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index d69472fad..5e7a66171 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -41,12 +41,12 @@
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
- modify_yaml:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index ed89dbe8d..763922439 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -14,3 +14,13 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'kubernetesMasterConfig.admissionConfig'
yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
index ed89dbe8d..763922439 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -14,3 +14,13 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'kubernetesMasterConfig.admissionConfig'
yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index ed89dbe8d..763922439 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -14,3 +14,13 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'kubernetesMasterConfig.admissionConfig'
yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index df59a8782..c21ab97bc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -19,3 +19,13 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.election.lockName'
yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 2456ad3a8..31362f2f6 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,4 +1,17 @@
---
+- name: Check if the master has embedded etcd
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - fail:
+ msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
@@ -60,12 +73,11 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
- - name: Disable etcd members
- service:
- name: "{{ l_etcd_service }}"
- state: stopped
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index b5ba2bbba..20061366c 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -46,7 +46,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -71,7 +71,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index d4addedee..2a703cb61 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -9,6 +9,15 @@
installer_phase_loadbalancer: "In Progress"
aggregate: false
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -24,7 +33,6 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- - role: os_firewall
- role: openshift_loadbalancer
- role: tuned
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
index 0aaafe440..908679e81 100644
--- a/playbooks/common/openshift-management/config.yml
+++ b/playbooks/common/openshift-management/config.yml
@@ -1,4 +1,14 @@
---
+- name: Management Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_management: "In Progress"
+ aggregate: false
+
- name: Setup CFME
hosts: oo_first_master
pre_tasks:
@@ -13,3 +23,13 @@
name: openshift_management
vars:
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
+
+- name: Management Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ set_stats:
+ data:
+ installer_phase_management: "Complete"
+ aggregate: false
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index fdc6cd24a..a79600930 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -36,6 +36,12 @@
state: present
when: not openshift.common.is_atomic | bool
+- name: Check we are not using node as a Docker container with CRI-O
+ fail: msg='Cannot use CRI-O with node configured as a Docker container'
+ when:
+ - openshift.common.is_containerized | bool
+ - not openshift.common.is_node_system_container | bool
+
# Used to pull and install the system container
- name: Ensure atomic is installed
package:
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index e735bf50a..024479fb4 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -17,6 +17,7 @@
{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
+ when: etcd_initial_cluster is undefined
- name: Check etcd system container package
command: >
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 8462bb4c8..3027a9447 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -29,8 +29,8 @@ ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }}
ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1
{% else %}
-{% if initial_etcd_cluster is defined and initial_etcd_cluster %}
-ETCD_INITIAL_CLUSTER={{ initial_etcd_cluster }}
+{% if etcd_initial_cluster is defined and etcd_initial_cluster %}
+ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
{% else %}
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
{% endif %}
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index ac369b882..25f9405af 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -136,7 +136,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_management': {
'title': 'Management Install',
- 'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml'
+ 'playbook': 'playbooks/byo/openshift-management/config.yml'
},
}
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index ff96081fe..4aca5c7a8 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -1,7 +1,29 @@
openshift_aws
==================================
-Provision AWS infrastructure helpers.
+Provision AWS infrastructure and instances.
+
+This role contains many task-areas to provision resources and perform actions
+against an AWS account for the purposes of dynamically building an openshift
+cluster.
+
+This role is primarily intended to be used with "include_role" and "tasks_from".
+
+include_role can be called from the tasks section in a play. See example
+playbook below for reference.
+
+These task-areas are:
+
+* provision a vpc: vpc.yml
+* provision elastic load balancers: elb.yml
+* upload IAM ssl certificates to use with load balancers: iam_cert.yml
+* provision an S3 bucket: s3.yml
+* provision an instance to build an AMI: provision_instance.yml
+* provision a security group in AWS: security_group.yml
+* provision ssh keys and users in AWS: ssh_keys.yml
+* provision an AMI in AWS: seal_ami.yml
+* provision scale groups: scale_group.yml
+* provision launch configs: launch_config.yml
Requirements
------------
@@ -9,56 +31,9 @@ Requirements
* Ansible 2.3
* Boto
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value
-|---------------------------------------------------|-----------------------
-| openshift_aws_clusterid | default
-| openshift_aws_elb_scheme | internet-facing
-| openshift_aws_launch_config_bootstrap_token | ''
-| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
-| openshift_aws_ami_copy_wait | False
-| openshift_aws_users | []
-| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_node_group_type | master
-| openshift_aws_elb_cert_arn | ''
-| openshift_aws_kubernetes_cluster_status | owned
-| openshift_aws_s3_mode | create
-| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_ssh_keys | False
-| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
-| openshift_aws_use_custom_ami | False
-| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
-| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
-| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
-| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
-| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_security_groups | False
-| openshift_aws_create_iam_cert | False
-| openshift_aws_create_scale_group | True
-| openshift_aws_ami_encrypt | False
-| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
-| openshift_aws_region | us-east-1
-| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_elb_idle_timout | 400
-| openshift_aws_subnet_name | us-east-1c
-| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
-| openshift_aws_create_launch_config | True
-| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
-| openshift_aws_ami_name | openshift-gi
-| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
-|
-
-
-Dependencies
-------------
+Appropriate AWS credentials and permissions are required.
+
+
Example Playbook
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 829c78728..69eb9283d 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -69,6 +69,9 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.
- `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination.
+- `openshift_logging_fluentd_audit_container_engine`: When `openshift_logging_fluentd_audit_container_engine` is set to `True`, the audit log of the container engine will be collected and stored in ES.
+- `openshift_logging_fluentd_audit_file`: Location of audit log file. The default is `/var/log/audit/audit.log`
+- `openshift_logging_fluentd_audit_pos_file`: Location of fluentd in_tail position file for the audit log file. The default is `/var/log/audit/audit.log.pos`
- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index 554aa5bb2..fc48b7f71 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -40,8 +40,6 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
-openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy"
-openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index df2c17aa0..aeff2d198 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -17,6 +17,17 @@
- include: determine_version.yaml
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set elasticsearch_prefix image facts
+ set_fact:
+ openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}"
+ openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}"
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -52,7 +63,7 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
- copy:
@@ -66,7 +77,7 @@
kind: clusterrole
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/rolebinding-reader.yml"
+ - "{{ tempdir }}/rolebinding-reader.yml"
delete_after: true
# SA roles
@@ -107,8 +118,8 @@
- fail:
msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
when:
- - "prometheus_out.stderr | length > 0"
- - "'already exists' not in prometheus_out.stderr"
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
# View role and binding
- name: Generate logging-elasticsearch-view-role
@@ -120,8 +131,8 @@
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -131,18 +142,18 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
@@ -194,22 +205,22 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -223,7 +234,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -236,8 +247,8 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
- name: Set logging-{{ es_component}}-prometheus service
oc_service:
@@ -247,9 +258,9 @@
labels:
logging-infra: 'support'
ports:
- - name: proxy
- port: 443
- targetPort: 4443
+ - name: proxy
+ port: 443
+ targetPort: 4443
selector:
component: "{{ es_component }}-prometheus"
provider: openshift
@@ -277,46 +288,46 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
@@ -337,6 +348,7 @@
logging_component: elasticsearch
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
+ proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
@@ -352,7 +364,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
@@ -360,37 +372,37 @@
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
@@ -421,7 +433,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 1ed886627..ce3b2eb83 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -40,7 +40,7 @@ spec:
{% endif %}
containers:
- name: proxy
- image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}}
+ image: {{ proxy_image }}
imagePullPolicy: Always
args:
- --upstream-ca=/etc/elasticsearch/secret/admin-ca
@@ -86,7 +86,7 @@ spec:
requests:
memory: "{{es_memory_limit}}"
{% if es_container_security_context %}
- securityContext: {{ es_container_security_context | to_yaml }}
+ securityContext: {{ es_container_security_context | to_yaml }}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml
new file mode 100644
index 000000000..b7d105caf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/"
+__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..c87d48e27
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_logging_elasticsearch_proxy_image_version: "v3.7"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 9ff4c7e80..ea1fd3efd 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -54,9 +54,9 @@ objects:
serviceAccount: aggregated-logging-eventrouter
serviceAccountName: aggregated-logging-eventrouter
{% if node_selector is iterable and node_selector | length > 0 %}
- nodeSelector:
+ nodeSelector:
{% for key, value in node_selector.iteritems() %}
- {{ key }}: "{{ value }}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 82326bdd1..25f7580a4 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -56,3 +56,7 @@ openshift_logging_fluentd_aggregating_passphrase: none
#fluentd_secureforward_contents:
openshift_logging_fluentd_file_buffer_limit: 1Gi
+
+# Configure fluentd to tail audit log file and filter out container engine's logs from there
+# These logs are then stored in ES operation index
+openshift_logging_fluentd_audit_container_engine: False
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 37960afd1..06bb35dbc 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -108,7 +108,6 @@
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
when: fluentd_secureforward_contents is undefined
-
changed_when: no
- copy:
@@ -173,6 +172,9 @@
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
+ audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
+ audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index f286b0656..644b70031 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -172,6 +172,28 @@ spec:
value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"
{% endif %}
+{% if audit_container_engine %}
+ - name: "AUDIT_CONTAINER_ENGINE"
+ value: "{{ audit_container_engine | lower }}"
+{% endif %}
+
+{% if audit_container_engine %}
+ - name: "NODE_NAME"
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+{% endif %}
+
+{% if audit_log_file != '' %}
+ - name: AUDIT_FILE
+ value: "{{ audit_log_file }}"
+{% endif %}
+
+{% if audit_pos_log_file != '' %}
+ - name: AUDIT_POS_FILE
+ value: "{{ audit_pos_log_file }}"
+{% endif %}
+
volumes:
- name: runlogjournal
hostPath:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 7e62a8c6d..f861a8e4d 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -51,3 +51,6 @@ r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
+
+openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}"
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index c6abaeb9b..fcc66044b 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,4 +1,7 @@
---
+# systemd_units.yml is included both in the openshift_master role and in the upgrade
+# playbooks.
+
- include: upgrade_facts.yml
when: openshift_master_defaults_in_use is not defined
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 9b3fbcf49..7159ccc7f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -115,7 +115,7 @@ etcdStorageConfig:
openShiftStorageVersion: v1
imageConfig:
format: {{ openshift.master.registry_url }}
- latest: false
+ latest: {{ openshift_master_image_config_latest }}
{% if 'image_policy_config' in openshift.master %}
imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }}
{% endif %}
@@ -275,12 +275,5 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-{% if openshift_template_service_broker_namespaces is defined %}
-templateServiceBrokerConfig:
- templateNamespaces:
-{% for namespace in openshift_template_service_broker_namespaces %}
- - {{ namespace }}
-{% endfor %}
-{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 7928a0346..48584bd64 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -54,6 +54,7 @@
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 298d1013f..739b0d968 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -107,3 +107,6 @@ openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index b83b2c452..6bd2df362 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -29,7 +29,7 @@
line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
regexp: "^ExecStart=.*"
-- name: "systemctl enable {{ openshift_service_type }}-node"
+- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"
systemd:
name: "{{ item }}"
enabled: no
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
deleted file mode 100644
index f92ff79b5..000000000
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 265bf2c46..1539d6e3b 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -27,5 +27,3 @@
docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include: config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 6b4490f61..9c182ade6 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,11 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: "node.service.j2"
- when: not openshift.common.is_containerized | bool
+ src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
+ when: not openshift.common.is_node_system_container | bool
notify:
- reload systemd units
- restart node
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7049f7189..08e1c7f4f 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -13,7 +13,7 @@ dockerConfig:
iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
imageConfig:
format: {{ openshift.node.registry_url }}
- latest: false
+ latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
{% if openshift_use_crio | default(False) %}
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
index 9ebb0d5ec..7b705c2d4 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -85,8 +85,6 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
- - name: topology
- mountPath: ${TOPOLOGY_PATH}
- name: config
mountPath: /etc/heketi
readinessProbe:
@@ -103,9 +101,6 @@ objects:
port: 8080
volumes:
- name: db
- - name: topology
- secret:
- secretName: heketi-${CLUSTER_NAME}-topology-secret
- name: config
secret:
secretName: heketi-${CLUSTER_NAME}-config-secret
@@ -138,6 +133,3 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
-- name: TOPOLOGY_PATH
- displayName: heketi topology file location
- required: True
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index fb407c4a2..a92a138b0 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -2,3 +2,4 @@
# placeholder file?
template_service_broker_remove: False
template_service_broker_install: False
+openshift_template_service_broker_namespaces: ['openshift']
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index f5fd6487c..54008bbf1 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -28,10 +28,24 @@
- "{{ __tsb_template_file }}"
- "{{ __tsb_rbac_file }}"
- "{{ __tsb_broker_file }}"
+ - "{{ __tsb_config_file }}"
+
+- yedit:
+ src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
+ key: templateNamespaces
+ value: "{{ openshift_template_service_broker_namespaces }}"
+ value_type: list
+
+- slurp:
+ src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
+ register: config
- name: Apply template file
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
+ | kubectl apply -f -
# reconcile with rbac
- name: Reconcile with RBAC file