diff options
86 files changed, 1778 insertions, 231 deletions
diff --git a/.dockerignore b/.dockerignore index 0a70c5bfa..2509d48b5 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,7 +2,7 @@ bin docs hack -inventory +inventory/hosts.* test utils **/*.md diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 065cf9668..d6dd5a3c8 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.22.0 ./ +3.9.0-0.23.0 ./ diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml index 32a28775f..55c650fbe 100644 --- a/files/origin-components/console-config.yaml +++ b/files/origin-components/console-config.yaml @@ -12,6 +12,7 @@ extensions: properties: null features: inactivityTimeoutMinutes: 0 + clusterResourceOverridesEnabled: false servingInfo: bindAddress: 0.0.0.0:8443 bindNetwork: tcp4 diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile index b1390480a..22a0d06a0 100644 --- a/images/installer/Dockerfile +++ b/images/installer/Dockerfile @@ -8,12 +8,14 @@ USER root COPY images/installer/origin-extra-root / # install ansible and deps -RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \ +RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \ && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \ && yum install -y epel-release \ && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \ - && rpm -V $INSTALL_PKGS $EPEL_PKGS \ + && EPEL_TESTING_PKGS="python2-libcloud" \ + && yum install -y --enablerepo=epel-testing --setopt=tsflags=nodocs $EPEL_TESTING_PKGS \ + && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \ && yum clean all LABEL name="openshift/origin-ansible" \ diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7 index 05df6b43a..3b05c1aa6 100644 --- a/images/installer/Dockerfile.rhel7 +++ b/images/installer/Dockerfile.rhel7 @@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com> USER root # Playbooks, roles, and their dependencies are installed from packages. -RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \ +RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \ && yum repolist > /dev/null \ && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \ && yum-config-manager --enable rhel-7-server-rh-common-rpms \ diff --git a/images/installer/root/usr/local/bin/entrypoint-gcp b/images/installer/root/usr/local/bin/entrypoint-gcp new file mode 100755 index 000000000..d0ffd9904 --- /dev/null +++ b/images/installer/root/usr/local/bin/entrypoint-gcp @@ -0,0 +1,51 @@ +#!/bin/bash +# +# This file sets up the user to run in the GCP environment. +# It provides dynamic inventory that works well when run in +# a container environment by setting up a default inventory. +# It assumes the user has provided a GCP service account token +# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected" +# and automatically links any YAML files found into the group +# vars directory, which allows the playbook to more easily be +# run in containerized contexts. + +WORK=$(pwd) +FILES="${WORK}/inventory/dynamic/injected" + +# Patch /etc/passwd file with the current user info. +# The current user's entry must be correctly defined in this file in order for +# the `ssh` command to work within the created container. + +if ! whoami &>/dev/null; then + echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd +fi + +# Provide a "files_dir" variable that points to inventory/dynamic/injected +echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml" +# Add any injected variable files into the group vars directory +find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all" +# Avoid sudo when running locally - nothing in the image requires it. +mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost" +echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml" + +if [[ -z "${ANSIBLE_CONFIG-}" ]]; then + export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg" +fi + +# SSH requires the file to be owned by the current user, but Docker copies +# files in as root. Put the file into the ssh dir with the right permissions +if [[ -f "${FILES}/ssh-privatekey" ]]; then + keyfile="${HOME}/.ssh/google_compute_engine" + mkdir "${HOME}/.ssh" + rm -f "${keyfile}" + cat "${FILES}/ssh-privatekey" > "${keyfile}" + chmod 0600 "${keyfile}" + ssh-keygen -y -f "${keyfile}" > "${keyfile}.pub" +fi +if [[ -f "${FILES}/gce.json" ]]; then + gcloud auth activate-service-account --key-file="${FILES}/gce.json" +else + echo "No service account file found at ${FILES}/gce.json, bypassing login" +fi + +exec "$@"
\ No newline at end of file diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup index b76e60a4d..dba0af3e4 100755 --- a/images/installer/root/usr/local/bin/user_setup +++ b/images/installer/root/usr/local/bin/user_setup @@ -12,6 +12,8 @@ chmod g+rw /etc/passwd # ensure that the ansible content is accessible chmod -R g+r ${WORK_DIR} find ${WORK_DIR} -type d -exec chmod g+x {} + +# ensure that the dynamic inventory dir can have content created +find ${WORK_DIR} -type d -exec chmod g+wx {} + # no need for this script to remain in the image after running rm $0 diff --git a/inventory/.gitignore b/inventory/.gitignore index 6ff331c7e..97aa044f6 100644 --- a/inventory/.gitignore +++ b/inventory/.gitignore @@ -1 +1,2 @@ hosts +/dynamic/gcp/group_vars/all/00_default_files_dir.yml
\ No newline at end of file diff --git a/inventory/dynamic/gcp/README.md b/inventory/dynamic/gcp/README.md new file mode 100644 index 000000000..217a035ca --- /dev/null +++ b/inventory/dynamic/gcp/README.md @@ -0,0 +1 @@ +This directory provides dynamic inventory for a GCP cluster configured via the GCP provisioning playbook. Set inventory to `inventory/dynamic/gcp/hosts.sh` to calculate the appropriate host set.
\ No newline at end of file diff --git a/inventory/dynamic/gcp/ansible.cfg b/inventory/dynamic/gcp/ansible.cfg new file mode 100644 index 000000000..f87d51f28 --- /dev/null +++ b/inventory/dynamic/gcp/ansible.cfg @@ -0,0 +1,45 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== + +# This config file provides examples for running +# the OpenShift playbooks with the provided +# inventory scripts. + +[defaults] +# Set the log_path +#log_path = /tmp/ansible.log + +private_key_file = $HOME/.ssh/google_compute_engine + +# Additional default options for OpenShift Ansible +forks = 50 +host_key_checking = False +retry_files_enabled = False +retry_files_save_path = ~/ansible-installer-retries +nocows = True +remote_user = cloud-user +roles_path = ../../../roles/ +gathering = smart +fact_caching = jsonfile +fact_caching_connection = $HOME/ansible/facts +fact_caching_timeout = 600 +callback_whitelist = profile_tasks +inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt +# work around privilege escalation timeouts in ansible: +timeout = 30 + +# Uncomment to use the provided example inventory +inventory = hosts.sh + +[inventory] +# fail more helpfully when the inventory file does not parse (Ansible 2.4+) +unparsed_is_failed=true + +# Additional ssh options for OpenShift Ansible +[ssh_connection] +pipelining = True +ssh_args = -o ControlMaster=auto -o ControlPersist=600s +timeout = 10 +# shorten the ControlPath which is often too long; when it is, +# ssh connection reuse silently fails, making everything slower. +control_path = %(directory)s/%%h-%%r diff --git a/inventory/dynamic/gcp/group_vars/all/00_defaults.yml b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml new file mode 100644 index 000000000..2f72e905f --- /dev/null +++ b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml @@ -0,0 +1,42 @@ +# GCP uses non-root users by default, so sudo by default +--- +ansible_become: yes + +openshift_deployment_type: origin + +# Debugging settings +debug_level: 2 +openshift_debug_level: "{{ debug_level }}" +openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}" +openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}" + +# External API settings +console_port: 443 +internal_console_port: 8443 +openshift_master_api_port: "8443" +openshift_master_console_port: "8443" +openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_zone }}" +openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}" +openshift_master_default_subdomain: "{{ wildcard_zone }}" + +# Cloud specific settings +openshift_cloudprovider_kind: gce +openshift_hosted_registry_storage_provider: gcs + +openshift_master_access_token_max_seconds: 2419200 +openshift_master_identity_providers: + +# Networking settings +openshift_node_port_range: 30000-32000 +openshift_node_open_ports: [{"service":"Router stats port", "port":"1936/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/udp"}] +openshift_node_sdn_mtu: 1410 +osm_cluster_network_cidr: 172.16.0.0/16 +osm_host_subnet_length: 9 +openshift_portal_net: 172.30.0.0/16 + +# Default cluster configuration +openshift_master_cluster_method: native +openshift_schedulable: true +# TODO: change to upstream conventions +openshift_hosted_infra_selector: "role=infra" +osm_default_node_selector: "role=app" diff --git a/inventory/dynamic/gcp/hosts.py b/inventory/dynamic/gcp/hosts.py new file mode 100755 index 000000000..cd1262622 --- /dev/null +++ b/inventory/dynamic/gcp/hosts.py @@ -0,0 +1,408 @@ +#!/usr/bin/env python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# This is a derivative of gce.py that adds support for filtering +# the returned inventory to only include instances that have tags +# as specified by GCE_TAGGED_INSTANCES. This prevents dynamic +# inventory for multiple clusters within the same project from +# accidentally stomping each other. + +# pylint: skip-file + +''' +GCE external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests +Google Compute Engine via the libcloud library. Full install/configuration +instructions for the gce* modules can be found in the comments of +ansible/test/gce_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the libcloud Node object: + - gce_uuid + - gce_id + - gce_image + - gce_machine_type + - gce_private_ip + - gce_public_ip + - gce_name + - gce_description + - gce_status + - gce_zone + - gce_tags + - gce_metadata + - gce_network + +When run in --list mode, instances are grouped by the following categories: + - zone: + zone group name examples are us-central1-b, europe-west1-a, etc. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - machine type + types follow a pattern like n1-standard-4, g1-small, etc. + - running status: + group name prefixed with 'status_' (e.g. status_running, status_stopped,..) + - image: + when using an ephemeral/scratch disk, this will be set to the image name + used when creating the instance (e.g. debian-7-wheezy-v20130816). when + your instance was created with a root persistent disk it will be set to + 'persistent_disk' since there is no current way to determine the image. + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" + + Use the GCE inventory script to print out instance specific information + $ contrib/inventory/gce.py --host my_instance + +Author: Eric Johnson <erjohnso@google.com> +Contributors: Matt Hite <mhite@hotmail.com> +Version: 0.0.2 +''' + +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + +USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" +USER_AGENT_VERSION="v2" + +import sys +import os +import time +import argparse +import ConfigParser + +import logging +logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) + +try: + import json +except ImportError: + import simplejson as json + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import ResourceNotFoundError + _ = Provider.GCE +except: + sys.exit("GCE inventory script requires libcloud >= 0.13") + + +class GceInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.parse_cli_args() + self.config = self.get_config() + self.driver = self.get_gce_driver() + self.ip_type = self.get_inventory_options() + if self.ip_type: + self.ip_type = self.ip_type.lower() + + # Just display data for specific host + if self.args.host: + print(self.json_format_dict(self.node_to_dict( + self.get_instance(self.args.host)), + pretty=self.args.pretty)) + sys.exit(0) + + zones = self.parse_env_zones() + + # Otherwise, assume user wants all instances grouped + print(self.json_format_dict(self.group_instances(zones), + pretty=self.args.pretty)) + sys.exit(0) + + def get_config(self): + """ + Populates a SafeConfigParser object with defaults and + attempts to read an .ini-style configuration from the filename + specified in GCE_INI_PATH. If the environment variable is + not present, the filename defaults to gce.ini in the current + working directory. + """ + gce_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "gce.ini") + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'gce_service_account_email_address': '', + 'gce_service_account_pem_file_path': '', + 'gce_project_id': '', + 'libcloud_secrets': '', + 'inventory_ip_type': '', + }) + if 'gce' not in config.sections(): + config.add_section('gce') + if 'inventory' not in config.sections(): + config.add_section('inventory') + + config.read(gce_ini_path) + + ######### + # Section added for processing ini settings + ######### + + # Set the instance_states filter based on config file options + self.instance_states = [] + if config.has_option('gce', 'instance_states'): + states = config.get('gce', 'instance_states') + # Ignore if instance_states is an empty string. + if states: + self.instance_states = states.split(',') + + return config + + def get_inventory_options(self): + """Determine inventory options. Environment variables always + take precedence over configuration files.""" + ip_type = self.config.get('inventory', 'inventory_ip_type') + # If the appropriate environment variables are set, they override + # other configuration + ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) + return ip_type + + def get_gce_driver(self): + """Determine the GCE authorization settings and return a + libcloud driver. + """ + # Attempt to get GCE params from a configuration file, if one + # exists. + secrets_path = self.config.get('gce', 'libcloud_secrets') + secrets_found = False + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify libcloud secrets file as " + err += "/absolute/path/to/secrets.py" + sys.exit(err) + sys.path.append(os.path.dirname(secrets_path)) + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + if not secrets_found: + args = [ + self.config.get('gce','gce_service_account_email_address'), + self.config.get('gce','gce_service_account_pem_file_path') + ] + kwargs = {'project': self.config.get('gce', 'gce_project_id')} + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + args[0] = os.environ.get('GCE_EMAIL', args[0]) + args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) + kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + + # Retrieve and return the GCE driver. + gce = get_driver(Provider.GCE)(*args, **kwargs) + gce.connection.user_agent_append( + '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), + ) + return gce + + def parse_env_zones(self): + '''returns a list of comma seperated zones parsed from the GCE_ZONE environment variable. + If provided, this will be used to filter the results of the grouped_instances call''' + import csv + reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) + zones = [r for r in reader] + return [z for z in zones[0]] + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on GCE') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--tagged', action='store', + help='Only include instances with this tag') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') + self.args = parser.parse_args() + + tag_env = os.environ.get('GCE_TAGGED_INSTANCES') + if not self.args.tagged and tag_env: + self.args.tagged = tag_env + + def node_to_dict(self, inst): + md = {} + + if inst is None: + return {} + + if inst.extra['metadata'].has_key('items'): + for entry in inst.extra['metadata']['items']: + md[entry['key']] = entry['value'] + + net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + # default to exernal IP unless user has specified they prefer internal + if self.ip_type == 'internal': + ssh_host = inst.private_ips[0] + else: + ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] + + return { + 'gce_uuid': inst.uuid, + 'gce_id': inst.id, + 'gce_image': inst.image, + 'gce_machine_type': inst.size, + 'gce_private_ip': inst.private_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, + 'gce_name': inst.name, + 'gce_description': inst.extra['description'], + 'gce_status': inst.extra['status'], + 'gce_zone': inst.extra['zone'].name, + 'gce_tags': inst.extra['tags'], + 'gce_metadata': md, + 'gce_network': net, + # Hosts don't have a public name, so we add an IP + 'ansible_host': ssh_host + } + + def get_instance(self, instance_name): + '''Gets details about a specific instance ''' + try: + return self.driver.ex_get_node(instance_name) + except Exception as e: + return None + + def group_instances(self, zones=None): + '''Group all instances''' + groups = {} + meta = {} + meta["hostvars"] = {} + + # list_nodes will fail if a disk is in the process of being deleted + # from a node, which is not uncommon if other playbooks are managing + # the same project. Retry if we receive a not found error. + nodes = [] + tries = 0 + while True: + try: + nodes = self.driver.list_nodes() + break + except ResourceNotFoundError: + tries = tries + 1 + if tries > 15: + raise e + time.sleep(1) + continue + + for node in nodes: + + # This check filters on the desired instance states defined in the + # config file with the instance_states config option. + # + # If the instance_states list is _empty_ then _ALL_ states are returned. + # + # If the instance_states list is _populated_ then check the current + # state against the instance_states list + if self.instance_states and not node.extra['status'] in self.instance_states: + continue + + name = node.name + + if self.args.tagged and self.args.tagged not in node.extra['tags']: + continue + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.extra['zone'].name + + # To avoid making multiple requests per zone + # we list all nodes and then filter the results + if zones and zone not in zones: + continue + + if groups.has_key(zone): groups[zone].append(name) + else: groups[zone] = [name] + + tags = node.extra['tags'] + for t in tags: + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t + if groups.has_key(tag): groups[tag].append(name) + else: groups[tag] = [name] + + net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] + net = 'network_%s' % net + if groups.has_key(net): groups[net].append(name) + else: groups[net] = [name] + + machine_type = node.size + if groups.has_key(machine_type): groups[machine_type].append(name) + else: groups[machine_type] = [name] + + image = node.image and node.image or 'persistent_disk' + if groups.has_key(image): groups[image].append(name) + else: groups[image] = [name] + + status = node.extra['status'] + stat = 'status_%s' % status.lower() + if groups.has_key(stat): groups[stat].append(name) + else: groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +GceInventory() diff --git a/inventory/dynamic/gcp/hosts.sh b/inventory/dynamic/gcp/hosts.sh new file mode 100755 index 000000000..0c88e3a6b --- /dev/null +++ b/inventory/dynamic/gcp/hosts.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -euo pipefail + +# Use a playbook to calculate the inventory dynamically from +# the provided cluster variables. +src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then + echo "error: Inventory configuration failed" 1>&2 + echo "$out" 1>&2 + echo "{}" + exit 1 +fi +source "/tmp/inventory.sh" +exec ${src}/hosts.py diff --git a/inventory/dynamic/gcp/none b/inventory/dynamic/gcp/none new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/inventory/dynamic/gcp/none @@ -0,0 +1 @@ +{}
\ No newline at end of file diff --git a/inventory/dynamic/injected/README.md b/inventory/dynamic/injected/README.md new file mode 100644 index 000000000..5e2e4c549 --- /dev/null +++ b/inventory/dynamic/injected/README.md @@ -0,0 +1,3 @@ +This directory may be used to inject inventory into openshift-ansible +when used in a container. Other scripts like the cloud provider entrypoints +will automatically use the content of this directory as inventory. diff --git a/inventory/hosts.example b/inventory/hosts.example index da60b63e6..f9f331880 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -845,12 +845,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # See: https://github.com/nickhammond/ansible-logrotate #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] -# openshift-ansible will wait indefinitely for your input when it detects that the +# The OpenShift-Ansible installer will fail when it detects that the # value of openshift_hostname resolves to an IP address not bound to any local # interfaces. This mis-configuration is problematic for any pod leveraging host # networking and liveness or readiness probes. -# Setting this variable to true will override that check. -#openshift_override_hostname_check=true +# Setting this variable to false will override that check. +#openshift_hostname_check=true # openshift_use_dnsmasq is deprecated. This must be true, or installs will fail # in versions >= 3.6 diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 65ee71c56..719e54eb9 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.9.0 -Release: 0.22.0%{?dist} +Release: 0.23.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -48,7 +48,8 @@ popd %install # Base openshift-ansible install mkdir -p %{buildroot}%{_datadir}/%{name} -mkdir -p %{buildroot}%{_datadir}/ansible/%{name} +mkdir -p %{buildroot}%{_datadir}/ansible/%{name}/inventory +cp -rp inventory/dynamic %{buildroot}%{_datadir}/ansible/%{name}/inventory # openshift-ansible-bin install mkdir -p %{buildroot}%{_bindir} @@ -62,7 +63,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce # openshift-ansible-docs install # Install example inventory into docs/examples mkdir -p docs/example-inventories -cp inventory/* docs/example-inventories/ +cp inventory/hosts.* inventory/README.md docs/example-inventories/ # openshift-ansible-files install cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/ @@ -101,6 +102,7 @@ popd %license LICENSE %dir %{_datadir}/ansible/%{name} %{_datadir}/ansible/%{name}/files +%{_datadir}/ansible/%{name}/inventory/dynamic %ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved # ---------------------------------------------------------------------------------- @@ -202,6 +204,30 @@ Atomic OpenShift Utilities includes %changelog +* Tue Jan 23 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.23.0 +- docker_image_availability: enable skopeo to use proxies (lmeyer@redhat.com) +- Install base_packages earlier (mgugino@redhat.com) +- allow uninstalling AWS objects created by prerequisite playbook + (jdiaz@redhat.com) +- Bug 1536262: Default console and TSB node selector to + openshift_hosted_infra_selector (spadgett@redhat.com) +- Migrate master-config.yaml asset config (spadgett@redhat.com) +- Fix master scaleup play (mgugino@redhat.com) +- use admin credentials for tsb install operations (bparees@redhat.com) +- Fix etcd-upgrade sanity checks (mgugino@redhat.com) +- Bug 1536253: Pass `--config` flag on oc commands when installing console + (spadgett@redhat.com) +- Fix enterprise registry-console prefix (sdodson@redhat.com) +- [release-3.7] Fix enterprise registry console image prefix + (sdodson@redhat.com) +- [release-3.6] Fix enterprise registry console image prefix + (sdodson@redhat.com) +- Bug 1512825 - add mux pod failed for Serial number 02 has already been issued + (nhosoi@redhat.com) +- Remove old console asset config (spadgett@redhat.com) +- Add support for Amazon EC2 C5 instance types (rteague@redhat.com) +- Fix provider network support at openstack playbook (ltomasbo@redhat.com) + * Fri Jan 19 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.22.0 - Fix OpenStack readme (tomas@sedovic.cz) - Quick installer: deprecate upgrades (vrutkovs@redhat.com) diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml deleted file mode 100644 index 9d9ed29de..000000000 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- import_playbook: ../../openshift-hosted/private/config.yml - -- import_playbook: ../../openshift-metrics/private/config.yml - when: openshift_metrics_install_metrics | default(false) | bool - -- import_playbook: ../../openshift-logging/private/config.yml - when: openshift_logging_install_logging | default(false) | bool - -- import_playbook: ../../openshift-prometheus/private/config.yml - when: openshift_hosted_prometheus_deploy | default(false) | bool - -- import_playbook: ../../openshift-service-catalog/private/config.yml - when: openshift_enable_service_catalog | default(false) | bool - -- import_playbook: ../../openshift-management/private/config.yml - when: openshift_management_install_management | default(false) | bool - -- name: Print deprecated variable warning message if necessary - hosts: oo_first_master - gather_facts: no - tasks: - - debug: msg="{{__deprecation_message}}" - when: - - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index a3fc82f9a..938e83f5e 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -18,29 +18,8 @@ - name: run the init import_playbook: ../../init/main.yml -- name: perform the installer openshift-checks - import_playbook: ../../openshift-checks/private/install.yml +- name: configure the control plane + import_playbook: ../../common/private/control_plane.yml -- name: etcd install - import_playbook: ../../openshift-etcd/private/config.yml - -- name: include nfs - import_playbook: ../../openshift-nfs/private/config.yml - when: groups.oo_nfs_to_config | default([]) | count > 0 - -- name: include loadbalancer - import_playbook: ../../openshift-loadbalancer/private/config.yml - when: groups.oo_lb_to_config | default([]) | count > 0 - -- name: include openshift-master config - import_playbook: ../../openshift-master/private/config.yml - -- name: include master additional config - import_playbook: ../../openshift-master/private/additional_config.yml - -- name: include master additional config +- name: ensure the masters are configured as nodes import_playbook: ../../openshift-node/private/config.yml - -- name: include openshift-glusterfs - import_playbook: ../../openshift-glusterfs/private/config.yml - when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index f98f5be9a..bd154fa83 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -15,5 +15,5 @@ - name: Include the accept.yml playbook to accept nodes into the cluster import_playbook: accept.yml -- name: Include the hosted.yml playbook to finish the hosted configuration - import_playbook: hosted.yml +- name: Include the components playbook to finish the hosted configuration + import_playbook: ../../common/private/components.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index fc1cbf32a..07be0b0d4 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -31,7 +31,7 @@ with_items: " {{ groups['oo_nodes_to_config'] }}" when: - hostvars[item].openshift is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml new file mode 100644 index 000000000..089645d07 --- /dev/null +++ b/playbooks/common/private/components.yml @@ -0,0 +1,38 @@ +--- +# These are the core component plays that configure the layers above the control +# plane. A component is generally considered any part of OpenShift that runs on +# top of the cluster and may be considered optional. Over time, much of OpenShift +# above the Kubernetes apiserver and masters may be considered components. +# +# Preconditions: +# +# 1. The control plane is configured and reachable from nodes inside the cluster +# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can +# perform root level actions against the cluster +# 3. On cloud providers, persistent volume provisioners are configured +# 4. A subset of nodes is available to allow components to schedule - this must +# include the masters and usually includes infra nodes. +# 5. The init/main.yml playbook has been invoked + +- import_playbook: ../../openshift-glusterfs/private/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-hosted/private/config.yml + +- import_playbook: ../../openshift-web-console/private/config.yml + when: openshift_web_console_install | default(true) | bool + +- import_playbook: ../../openshift-metrics/private/config.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- import_playbook: ../../openshift-logging/private/config.yml + when: openshift_logging_install_logging | default(false) | bool + +- import_playbook: ../../openshift-prometheus/private/config.yml + when: openshift_hosted_prometheus_deploy | default(false) | bool + +- import_playbook: ../../openshift-service-catalog/private/config.yml + when: openshift_enable_service_catalog | default(true) | bool + +- import_playbook: ../../openshift-management/private/config.yml + when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml new file mode 100644 index 000000000..0a5f1142b --- /dev/null +++ b/playbooks/common/private/control_plane.yml @@ -0,0 +1,34 @@ +--- +# These are the control plane plays that configure a control plane on top of hosts +# identified as masters. Over time, some of the pieces of the current control plane +# may be moved to the components list. +# +# It is not required for any nodes to be configured, or passed to be configured, +# when this playbook is invoked. +# +# Preconditions: +# +# 1. A set of machines have been identified to act as masters +# 2. On cloud providers, a load balancer has been configured to point to the masters +# and that load balancer has a DNS name +# 3. The init/main.yml playbook has been invoked +# +# Postconditions: +# +# 1. The control plane is reachable from the outside of the cluster +# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin +# access. + +- import_playbook: ../../openshift-checks/private/install.yml + +- import_playbook: ../../openshift-etcd/private/config.yml + +- import_playbook: ../../openshift-nfs/private/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-loadbalancer/private/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-master/private/config.yml + +- import_playbook: ../../openshift-master/private/additional_config.yml diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml index a2361d50c..8fb7b63e8 100644 --- a/playbooks/container-runtime/private/build_container_groups.yml +++ b/playbooks/container-runtime/private/build_container_groups.yml @@ -1,6 +1,8 @@ --- +# l_build_container_groups_hosts is passed in via prerequisites.yml during +# etcd scaleup plays. - name: create oo_hosts_containerized_managed_true host group - hosts: oo_all_hosts:!oo_nodes_to_config + hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}" tasks: - group_by: key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }} diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index 817a8bf30..5396df20a 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -1,9 +1,11 @@ --- # l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. +# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd +# scaleup plays. - import_playbook: build_container_groups.yml -- hosts: "{{ l_scale_up_hosts | default(l_default_container_runtime_hosts) }}" +- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}" vars: l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true" roles: diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index 65630be62..586149b1d 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -1,9 +1,11 @@ --- # l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. +# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd +# scaleup plays. - import_playbook: build_container_groups.yml -- hosts: "{{ l_scale_up_hosts | default(l_default_container_storage_hosts) }}" +- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}" vars: l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true" l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml index 5efdc486a..361553ee4 100644 --- a/playbooks/deploy_cluster.yml +++ b/playbooks/deploy_cluster.yml @@ -1,44 +1,11 @@ --- - import_playbook: init/main.yml -- import_playbook: openshift-checks/private/install.yml - -- import_playbook: openshift-etcd/private/config.yml - -- import_playbook: openshift-nfs/private/config.yml - when: groups.oo_nfs_to_config | default([]) | count > 0 - -- import_playbook: openshift-loadbalancer/private/config.yml - when: groups.oo_lb_to_config | default([]) | count > 0 - -- import_playbook: openshift-master/private/config.yml - -- import_playbook: openshift-master/private/additional_config.yml +- import_playbook: common/private/control_plane.yml - import_playbook: openshift-node/private/config.yml -- import_playbook: openshift-glusterfs/private/config.yml - when: groups.oo_glusterfs_to_config | default([]) | count > 0 - -- import_playbook: openshift-hosted/private/config.yml - -- import_playbook: openshift-web-console/private/config.yml - when: openshift_web_console_install | default(true) | bool - -- import_playbook: openshift-metrics/private/config.yml - when: openshift_metrics_install_metrics | default(false) | bool - -- import_playbook: openshift-logging/private/config.yml - when: openshift_logging_install_logging | default(false) | bool - -- import_playbook: openshift-prometheus/private/config.yml - when: openshift_hosted_prometheus_deploy | default(false) | bool - -- import_playbook: openshift-service-catalog/private/config.yml - when: openshift_enable_service_catalog | default(true) | bool - -- import_playbook: openshift-management/private/config.yml - when: openshift_management_install_management | default(false) | bool +- import_playbook: common/private/components.yml - name: Print deprecated variable warning message if necessary hosts: oo_first_master diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml new file mode 100644 index 000000000..75d0ddf9d --- /dev/null +++ b/playbooks/gcp/openshift-cluster/build_base_image.yml @@ -0,0 +1,162 @@ +--- +# This playbook ensures that a base image is up to date with all of the required settings +- name: Launch image build instance + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Require openshift_gcp_root_image + fail: + msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined." + when: openshift_gcp_root_image is undefined + + - name: Create the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + disk_type: pd-ssd + image: "{{ openshift_gcp_root_image }}" + size_gb: 10 + state: present + + - name: Launch the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + machine_type: n1-standard-1 + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: present + tags: + - build-image-instance + disk_auto_delete: false + disks: + - "{{ openshift_gcp_prefix }}build-image-instance" + register: gce + + - add_host: + hostname: "{{ item.public_ip }}" + groupname: build_instance_ips + with_items: "{{ gce.instance_data }}" + + - name: Wait for instance to respond to SSH + wait_for: + delay: 1 + host: "{{ item.public_ip }}" + port: 22 + state: started + timeout: 120 + with_items: "{{ gce.instance_data }}" + +- name: Prepare instance content sources + pre_tasks: + - set_fact: + allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}" + - set_fact: + using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}" + hosts: build_instance_ips + roles: + - role: rhel_subscribe + when: using_rhel_subscriptions + - role: openshift_repos + vars: + openshift_additional_repos: [] + post_tasks: + - name: Add custom repositories + include_role: + name: openshift_gcp + tasks_from: add_custom_repositories.yml + - name: Add the Google Cloud repo + yum_repository: + name: google-cloud + description: Google Cloud Compute + baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64 + gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + gpgcheck: yes + repo_gpgcheck: yes + state: present + when: ansible_os_family == "RedHat" + - name: Add the jdetiber-qemu-user-static copr repo + yum_repository: + name: jdetiber-qemu-user-static + description: QEMU user static COPR + baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/ + gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg + gpgcheck: yes + repo_gpgcheck: no + state: present + when: ansible_os_family == "RedHat" + - name: Install qemu-user-static + package: + name: qemu-user-static + state: present + - name: Start and enable systemd-binfmt service + systemd: + name: systemd-binfmt + state: started + enabled: yes + +- name: Build image + hosts: build_instance_ips + pre_tasks: + - name: Set up core host GCP configuration + include_role: + name: openshift_gcp + tasks_from: configure_gcp_base_image.yml + roles: + - role: os_update_latest + post_tasks: + - name: Disable all repos on RHEL + command: subscription-manager repos --disable="*" + when: using_rhel_subscriptions + - name: Enable repos for packages on RHEL + command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms" + when: using_rhel_subscriptions + - name: Install common image prerequisites + package: name={{ item }} state=latest + with_items: + # required by Ansible + - PyYAML + - docker + - google-compute-engine + - google-compute-engine-init + - google-config + - wget + - git + - net-tools + - bind-utils + - iptables-services + - bridge-utils + - bash-completion + - name: Clean yum metadata + command: yum clean all + args: + warn: no + when: ansible_os_family == "RedHat" + +- name: Commit image + hosts: localhost + connection: local + tasks: + - name: Terminate the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent + - name: Save the new image + command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}" + - name: Remove the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml new file mode 100644 index 000000000..787de8ebc --- /dev/null +++ b/playbooks/gcp/openshift-cluster/build_image.yml @@ -0,0 +1,106 @@ +--- +- name: Verify prerequisites for image build + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Require openshift_gcp_base_image + fail: + msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined." + when: openshift_gcp_base_image is undefined + +- name: Launch image build instance + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Set facts + set_fact: + openshift_node_bootstrap: True + openshift_master_unsupported_embedded_etcd: True + + - name: Create the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + disk_type: pd-ssd + image: "{{ openshift_gcp_base_image }}" + size_gb: 10 + state: present + + - name: Launch the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + machine_type: n1-standard-1 + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: present + tags: + - build-image-instance + disk_auto_delete: false + disks: + - "{{ openshift_gcp_prefix }}build-image-instance" + register: gce + + - name: add host to nodes + add_host: + hostname: "{{ item.public_ip }}" + groupname: nodes + with_items: "{{ gce.instance_data }}" + + - name: Wait for instance to respond to SSH + wait_for: + delay: 1 + host: "{{ item.public_ip }}" + port: 22 + state: started + timeout: 120 + with_items: "{{ gce.instance_data }}" + +- hosts: nodes + tasks: + - name: Set facts + set_fact: + openshift_node_bootstrap: True + +# This is the part that installs all of the software and configs for the instance +# to become a node. +- import_playbook: ../../openshift-node/private/image_prep.yml + +# Add additional GCP specific behavior +- hosts: nodes + tasks: + - include_role: + name: openshift_gcp + tasks_from: node_cloud_config.yml + - include_role: + name: openshift_gcp + tasks_from: frequent_log_rotation.yml + +- name: Commit image + hosts: localhost + connection: local + tasks: + - name: Terminate the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent + - name: Save the new image + command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}" + - name: Remove the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml new file mode 100644 index 000000000..589fddd2f --- /dev/null +++ b/playbooks/gcp/openshift-cluster/deprovision.yml @@ -0,0 +1,10 @@ +# This playbook terminates a running cluster +--- +- name: Terminate running cluster and remove all supporting resources in GCE + hosts: localhost + connection: local + tasks: + - include_role: + name: openshift_gcp + vars: + state: absent diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml new file mode 100644 index 000000000..fb35b4348 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/install.yml @@ -0,0 +1,33 @@ +# This playbook installs onto a provisioned cluster +--- +- hosts: localhost + connection: local + tasks: + - name: place all scale groups into Ansible groups + include_role: + name: openshift_gcp + tasks_from: setup_scale_group_facts.yml + +- name: run the init + import_playbook: ../../init/main.yml + +- name: configure the control plane + import_playbook: ../../common/private/control_plane.yml + +- name: ensure the masters are configured as nodes + import_playbook: ../../openshift-node/private/config.yml + +- name: run the GCP specific post steps + import_playbook: install_gcp.yml + +- name: install components + import_playbook: ../../common/private/components.yml + +- hosts: primary_master + gather_facts: no + tasks: + - name: Retrieve cluster configuration + fetch: + src: "{{ openshift.common.config_base }}/master/admin.kubeconfig" + dest: "/tmp/" + flat: yes diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml new file mode 100644 index 000000000..09db78971 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/install_gcp.yml @@ -0,0 +1,21 @@ +--- +- hosts: masters + gather_facts: no + tasks: + - name: create master health check service + include_role: + name: openshift_gcp + tasks_from: configure_master_healthcheck.yml + - name: configure node bootstrapping + include_role: + name: openshift_gcp + tasks_from: configure_master_bootstrap.yml + when: + - openshift_master_bootstrap_enabled | default(False) + - name: configure node bootstrap autoapprover + include_role: + name: openshift_bootstrap_autoapprover + tasks_from: main + when: + - openshift_master_bootstrap_enabled | default(False) + - openshift_master_bootstrap_auto_approve | default(False) | bool diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml new file mode 100644 index 000000000..96de6d6db --- /dev/null +++ b/playbooks/gcp/openshift-cluster/inventory.yml @@ -0,0 +1,10 @@ +--- +- name: Set up the connection variables for retrieving inventory from GCE + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: materialize the inventory + include_role: + name: openshift_gcp + tasks_from: dynamic_inventory.yml diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml new file mode 100644 index 000000000..02f00408a --- /dev/null +++ b/playbooks/gcp/openshift-cluster/launch.yml @@ -0,0 +1,12 @@ +# This playbook launches a new cluster or converges it if already launched +--- +- import_playbook: build_image.yml + when: openshift_gcp_build_image | default(False) | bool + +- import_playbook: provision.yml + +- hosts: localhost + tasks: + - meta: refresh_inventory + +- import_playbook: install.yml diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml index b6edf9961..293a195c9 100644 --- a/playbooks/gcp/provision.yml +++ b/playbooks/gcp/openshift-cluster/provision.yml @@ -3,11 +3,10 @@ hosts: localhost connection: local gather_facts: no + roles: + - openshift_gcp tasks: - - - name: provision a GCP cluster in the specified project + - name: recalculate the dynamic inventory import_role: name: openshift_gcp - -- name: run the cluster deploy - import_playbook: ../deploy_cluster.yml + tasks_from: dynamic_inventory.yml diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml new file mode 100644 index 000000000..76fd49e9c --- /dev/null +++ b/playbooks/gcp/openshift-cluster/publish_image.yml @@ -0,0 +1,9 @@ +--- +- name: Publish the most recent image + hosts: localhost + connection: local + gather_facts: no + tasks: + - import_role: + name: openshift_gcp + tasks_from: publish_image.yml diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/gcp/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml index c4cd226c9..924ae481a 100644 --- a/playbooks/init/evaluate_groups.yml +++ b/playbooks/init/evaluate_groups.yml @@ -47,7 +47,7 @@ msg: > Running etcd as an embedded service is no longer supported. when: - - g_etcd_hosts | default([]) | length not in [3,1] + - g_etcd_hosts | default([]) | length not in [5,3,1] - not (openshift_node_bootstrap | default(False)) - name: Evaluate oo_all_hosts diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml index 86e0b2416..b49f7dd08 100644 --- a/playbooks/init/validate_hostnames.yml +++ b/playbooks/init/validate_hostnames.yml @@ -25,7 +25,7 @@ when: - lookupip.stdout != '127.0.0.1' - lookupip.stdout not in ansible_all_ipv4_addresses - - openshift_hostname_check | default(true) + - openshift_hostname_check | default(true) | bool - name: Validate openshift_ip exists on node when defined fail: @@ -40,4 +40,4 @@ when: - openshift_ip is defined - openshift_ip not in ansible_all_ipv4_addresses - - openshift_ip_check | default(true) + - openshift_ip_check | default(true) | bool diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml index 7e9ab6834..656454fe3 100644 --- a/playbooks/openshift-etcd/scaleup.yml +++ b/playbooks/openshift-etcd/scaleup.yml @@ -1,4 +1,51 @@ --- +- import_playbook: ../init/evaluate_groups.yml + +- name: Ensure there are new_etcd + hosts: localhost + connection: local + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_etcd in inventory. Please add hosts to the + new_etcd host group to add etcd hosts. + when: + - g_new_etcd_hosts | default([]) | length == 0 + + - fail: + msg: > + Detected new_etcd host is member of new_masters or new_nodes. Please + run playbooks/openshift-master/scaleup.yml or + playbooks/openshift-node/scaleup.yml before running this play. + when: > + inventory_hostname in (groups['new_masters'] | default([])) + or inventory_hostname in (groups['new_nodes'] | default([])) + +# We only need to run this if etcd is being installed on a standalone host; +# If etcd is part of master or node group, there's no need to +# re-run prerequisites +- import_playbook: ../prerequisites.yml + vars: + # We need to ensure container_runtime is only processed for containerized + # etcd hosts by setting l_build_container_groups_hosts and l_etcd_scale_up_hosts + l_build_container_groups_hosts: "oo_new_etcd_to_config" + l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true" + l_scale_up_hosts: "oo_new_etcd_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config" + l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}" + when: + - inventory_hostname not in groups['oo_masters'] + - inventory_hostname not in groups['oo_nodes_to_config'] + +# If this etcd host is part of a master or node, we don't need to run +# prerequisites, we can just init facts as normal. - import_playbook: ../init/main.yml + vars: + skip_verison: True + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config" + when: + - inventory_hostname in groups['oo_masters'] + - inventory_hostname in groups['oo_nodes_to_config'] - import_playbook: private/scaleup.yml diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index d64be06e5..842bb34de 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -30,15 +30,17 @@ version 10) or newer. It must also satisfy these requirements: - look at the [Minimum Hardware Requirements page][hardware-requirements] for production -* The keypair for SSH must be available in openstack -* `keystonerc` file that lets you talk to the openstack services +* The keypair for SSH must be available in OpenStack +* `keystonerc` file that lets you talk to the OpenStack services * NOTE: only Keystone V2 is currently supported +* A host with the supported version of [Ansible][ansible] installed, see the + [Setup section of the openshift-ansible README][openshift-ansible-setup] + for details on the requirements. Optional: * External Neutron network with a floating IP address pool - ## Installation There are four main parts to the installation: @@ -68,12 +70,11 @@ First, you need to select where to run [Ansible][ansible] from (the *Ansible host*). This can be the computer you read this guide on or an OpenStack VM you'll create specifically for this purpose. -We will use -a +This guide will use a [Docker image that has all the dependencies installed][control-host-image] to make things easier. If you don't want to use Docker, take a look at the [Ansible host dependencies][ansible-dependencies] and make sure -they're installed. +they are installed. Your *Ansible host* needs to have the following: @@ -222,6 +223,7 @@ advanced configuration: [ansible]: https://www.ansible.com/ [openshift-ansible]: https://github.com/openshift/openshift-ansible +[openshift-ansible-setup]: https://github.com/openshift/openshift-ansible#setup [devstack]: https://docs.openstack.org/devstack/ [tripleo]: http://tripleo.org/ [ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index a8663f946..1287b25f3 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -43,7 +43,7 @@ openshift_hosted_registry_wait: True # NOTE(shadower): the hostname check seems to always fail because the # host's floating IP address doesn't match the address received from # inside the host. -openshift_override_hostname_check: true +openshift_hostname_check: false # For POCs or demo environments that are using smaller instances than # the official recommended values for RAM and DISK, uncomment the line below. diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index d0e37e2f4..8203d15f5 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -101,45 +101,34 @@ l_crt_crio_image_tag_dict: openshift-enterprise: "{{ l_openshift_image_tag }}" origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}" -l_crt_crio_image_prepend_dict: - openshift-enterprise: "registry.access.redhat.com/openshift3" - origin: "docker.io/gscrivano" - l_crt_crio_image_dict: - Fedora: - crio_image_name: "cri-o-fedora" - crio_image_tag: "latest" - CentOS: - crio_image_name: "cri-o-centos" - crio_image_tag: "latest" - RedHat: - crio_image_name: "cri-o" - crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}" - -l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}" -l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}" -l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}" - -l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" + Fedora: "registry.fedoraproject.org/latest/cri-o" + CentOS: "registry.centos.org/projectatomic/cri-o" + RedHat: "registry.access.redhat.com/openshift3/cri-o" + +l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution] }}" +l_crio_image_tag: "{{ l_crt_crio_image_tag_dict[openshift_deployment_type] }}" + +l_crio_image_default: "{{ l_crio_image_name }}:{{ l_crio_image_tag }}" l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}" # ----------------------- # # systemcontainers_docker # # ----------------------- # -l_crt_docker_image_prepend_dict: - Fedora: "registry.fedoraproject.org/latest" - Centos: "docker.io/gscrivano" - RedHat: "registry.access.redhat.com/openshift3" +l_crt_docker_image_dict: + Fedora: "registry.fedoraproject.org/latest/docker" + Centos: "registry.centos.org/projectatomic/docker" + RedHat: "registry.access.redhat.com/openshift3/container-engine" openshift_docker_image_tag_default: "latest" l_crt_docker_image_tag_dict: openshift-enterprise: "{{ l_openshift_image_tag }}" origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}" -l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}" +l_docker_image_prepend: "{{ l_crt_docker_image_dict[ansible_distribution] }}" l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}" -l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" +l_docker_image_default: "{{ l_docker_image_prepend }}:{{ l_docker_image_tag }}" l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}" l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 7634b8192..38d2f748b 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - role: lib_utils +- role: openshift_facts diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py index 9f73510c4..ef996fefe 100644 --- a/roles/lib_utils/filter_plugins/oo_filters.py +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -4,6 +4,7 @@ """ Custom filters for use in openshift-ansible """ +import json import os import pdb import random @@ -586,6 +587,18 @@ that result to this filter plugin. return secret_name +def lib_utils_oo_l_of_d_to_csv(input_list): + """Map a list of dictionaries, input_list, into a csv string + of json values. + + Example input: + [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}] + Example output: + u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}' + """ + return ','.join(json.dumps(x) for x in input_list) + + def map_from_pairs(source, delim="="): ''' Returns a dict given the source and delim delimited ''' if source == '': @@ -623,5 +636,6 @@ class FilterModule(object): "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, + "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv, "map_from_pairs": map_from_pairs } diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py index d4674845e..b94c0b779 100644 --- a/roles/lib_utils/library/docker_creds.py +++ b/roles/lib_utils/library/docker_creds.py @@ -135,7 +135,7 @@ def update_config(docker_config, registry, username, password): docker_config['auths'][registry] = {} # base64 encode our username:password string - encoded_data = base64.b64encode('{}:{}'.format(username, password)) + encoded_data = base64.b64encode('{}:{}'.format(username, password).encode()) # check if the same value is already present for idempotency. if 'auth' in docker_config['auths'][registry]: @@ -151,7 +151,7 @@ def write_config(module, docker_config, dest): conf_file_path = os.path.join(dest, 'config.json') try: with open(conf_file_path, 'w') as conf_file: - json.dump(docker_config, conf_file, indent=8) + json.dump(docker_config.decode(), conf_file, indent=8) except IOError as ioerror: result = {'failed': True, 'changed': False, diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index a729e8dbd..5053823b0 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -59,7 +59,7 @@ openshift_aws_elb_name_dict: external: "{{ openshift_aws_elb_basename }}-infra" openshift_aws_elb_idle_timout: 400 -openshift_aws_elb_scheme: internet-facing + openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 5d371ec7a..6f0028a3d 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -15,7 +15,7 @@ - "{{ subnetout.subnets[0].id }}" health_check: "{{ openshift_aws_elb_health_check }}" listeners: "{{ item.value }}" - scheme: "{{ openshift_aws_elb_scheme }}" + scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" tags: "{{ openshift_aws_elb_tags }}" wait: True register: new_elb diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml new file mode 100644 index 000000000..90ee40943 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml @@ -0,0 +1,10 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: bootstrap-autoapprover +roleRef: + kind: ClusterRole + name: system:node-bootstrap-autoapprover +subjects: +- kind: User + name: system:serviceaccount:openshift-infra:bootstrap-autoapprover diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml new file mode 100644 index 000000000..d8143d047 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: system:node-bootstrap-autoapprover +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - delete + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - create + - update diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml new file mode 100644 index 000000000..e22ce6f34 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: bootstrap-autoapprover + namespace: openshift-infra diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml new file mode 100644 index 000000000..dbcedb407 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml @@ -0,0 +1,68 @@ +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: bootstrap-autoapprover + namespace: openshift-infra +spec: + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: bootstrap-autoapprover + spec: + serviceAccountName: bootstrap-autoapprover + terminationGracePeriodSeconds: 1 + containers: + - name: signer + image: openshift/node:v3.7.0-rc.0 + command: + - /bin/bash + - -c + args: + - | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + unset KUBECONFIG + cat <<SCRIPT > /tmp/signer + #!/bin/bash + # + # It will approve any CSR that is not approved yet, and delete any CSR that expired more than 60 seconds + # ago. + # + + set -o errexit + set -o nounset + set -o pipefail + + name=\${1} + condition=\${2} + certificate=\${3} + username=\${4} + + # auto approve + if [[ -z "\${condition}" && ("\${username}" == "system:serviceaccount:openshift-infra:node-bootstrapper" || "\${username}" == "system:node:"* ) ]]; then + oc adm certificate approve "\${name}" + exit 0 + fi + + # check certificate age + if [[ -n "\${certificate}" ]]; then + text="\$( echo "\${certificate}" | base64 -d - )" + if ! echo "\${text}" | openssl x509 -noout; then + echo "error: Unable to parse certificate" 2>&1 + exit 1 + fi + if ! echo "\${text}" | openssl x509 -checkend -60 > /dev/null; then + echo "Certificate is expired, deleting" + oc delete csr "\${name}" + fi + exit 0 + fi + SCRIPT + chmod u+x /tmp/signer + + exec oc observe csr --maximum-errors=1 --resync-period=10m -a '{.status.conditions[*].type}' -a '{.status.certificate}' -a '{.spec.username}' -- /tmp/signer diff --git a/roles/openshift_bootstrap_autoapprover/tasks/main.yml b/roles/openshift_bootstrap_autoapprover/tasks/main.yml new file mode 100644 index 000000000..88e9d08e7 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Copy auto-approver config to host + run_once: true + copy: + src: "{{ item }}" + dest: /tmp/openshift-approver/ + owner: root + mode: 0400 + with_fileglob: + - "*.yaml" + +- name: Set auto-approver nodeSelector + run_once: true + yedit: + src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml" + key: spec.template.spec.nodeSelector + value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}" + value_type: list + +- name: Create auto-approver on cluster + run_once: true + command: oc apply -f /tmp/openshift-approver/ + +- name: Remove auto-approver config + run_once: true + file: + path: /tmp/openshift-approver/ + state: absent diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index ee4048911..395bd304c 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -13,5 +13,11 @@ ini_file: dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf" section: Global - option: multizone - value: "true" + option: "{{ item.key }}" + value: "{{ item.value }}" + with_items: + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_gcp/files/bootstrap-script.sh b/roles/openshift_gcp/files/bootstrap-script.sh new file mode 100644 index 000000000..0c3f1999b --- /dev/null +++ b/roles/openshift_gcp/files/bootstrap-script.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# This script is a startup script for bootstrapping a GCP node +# from a config stored in the project metadata. It loops until +# it finds the script and then starts the origin-node service. +# TODO: generalize + +set -o errexit +set -o nounset +set -o pipefail + +if [[ "$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap" -H "Metadata-Flavor: Google" )" != "true" ]]; then + echo "info: Bootstrap is not enabled for this instance, skipping" 1>&2 + exit 0 +fi + +if ! id=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-id" -H "Metadata-Flavor: Google" ); then + echo "error: Unable to get cluster-id for instance from cluster metadata" 1>&2 + exit 1 +fi + +if ! node_group=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/node-group" -H "Metadata-Flavor: Google" ); then + echo "error: Unable to get node-group for instance from cluster metadata" 1>&2 + exit 1 +fi + +if ! config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then + while true; do + if config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/project/attributes/${id}-bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then + break + fi + echo "info: waiting for ${id}-bootstrap-config to become available in cluster metadata ..." 1>&2 + sleep 5 + done +fi + +echo "Got bootstrap config from metadata" +mkdir -p /etc/origin/node +echo -n "${config}" > /etc/origin/node/bootstrap.kubeconfig +echo "BOOTSTRAP_CONFIG_NAME=node-config-${node_group}" >> /etc/sysconfig/origin-node +systemctl enable origin-node +systemctl start origin-node diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.service b/roles/openshift_gcp/files/openshift-bootstrap-update.service new file mode 100644 index 000000000..c65b1b34e --- /dev/null +++ b/roles/openshift_gcp/files/openshift-bootstrap-update.service @@ -0,0 +1,7 @@ +[Unit] +Description=Update the OpenShift node bootstrap configuration + +[Service] +Type=oneshot +ExecStart=/usr/bin/openshift-bootstrap-update +User=root diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.timer b/roles/openshift_gcp/files/openshift-bootstrap-update.timer new file mode 100644 index 000000000..1a517b33e --- /dev/null +++ b/roles/openshift_gcp/files/openshift-bootstrap-update.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Update the OpenShift node bootstrap credentials hourly + +[Timer] +OnBootSec=30s +OnCalendar=hourly +Persistent=true + +[Install] +WantedBy=timers.target
\ No newline at end of file diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp/files/partition.conf index b87e5e0b6..76e65ab9c 100644 --- a/roles/openshift_gcp_image_prep/files/partition.conf +++ b/roles/openshift_gcp/files/partition.conf @@ -1,3 +1,3 @@ [Service] ExecStartPost=-/usr/bin/growpart /dev/sda 1 -ExecStartPost=-/sbin/xfs_growfs / +ExecStartPost=-/sbin/xfs_growfs /
\ No newline at end of file diff --git a/roles/openshift_gcp/meta/main.yml b/roles/openshift_gcp/meta/main.yml new file mode 100644 index 000000000..5e428f8de --- /dev/null +++ b/roles/openshift_gcp/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: Clayton Coleman + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- role: lib_utils +- role: lib_openshift diff --git a/roles/openshift_gcp/tasks/add_custom_repositories.yml b/roles/openshift_gcp/tasks/add_custom_repositories.yml new file mode 100644 index 000000000..04718f78e --- /dev/null +++ b/roles/openshift_gcp/tasks/add_custom_repositories.yml @@ -0,0 +1,20 @@ +--- +- name: Copy custom repository secrets + copy: + src: "{{ files_dir }}/{{ item.1.sslclientcert }}" + dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert + when: item.1.sslclientcert | default(false) + with_indexed_items: "{{ provision_custom_repositories }}" +- name: Copy custom repository secrets + copy: + src: "{{ files_dir }}/{{ item.1.sslclientkey }}" + dest: /var/lib/yum/custom_secret_{{ item.0 }}_key + when: item.1.sslclientkey | default(false) + with_indexed_items: "{{ provision_custom_repositories }}" + +- name: Create any custom repos that are defined + template: + src: yum_repo.j2 + dest: /etc/yum.repos.d/provision_custom_repositories.repo + when: provision_custom_repositories | length > 0 + notify: refresh cache diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml index fee5ab618..2c6e2790a 100644 --- a/roles/openshift_gcp_image_prep/tasks/main.yaml +++ b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml @@ -1,18 +1,10 @@ ---- # GCE instances are starting with xfs AND barrier=1, which is only for extfs. +--- - name: Remove barrier=1 from XFS fstab entries - lineinfile: - path: /etc/fstab - regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$' - line: '\1xfs\2 \4' - backrefs: yes + command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab - name: Ensure the root filesystem has XFS group quota turned on - lineinfile: - path: /boot/grub2/grub.cfg - regexp: '^(.*)linux16 (.*)$' - line: '\1linux16 \2 rootflags=gquota' - backrefs: yes + command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg - name: Ensure the root partition grows on startup copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/ diff --git a/roles/openshift_gcp/tasks/configure_master_bootstrap.yml b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml new file mode 100644 index 000000000..591cb593c --- /dev/null +++ b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml @@ -0,0 +1,36 @@ +# +# These tasks configure the instance to periodically update the project metadata with the +# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata +# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that +# are waiting to join the cluster. +# +--- +- name: Copy unit service + copy: + src: openshift-bootstrap-update.timer + dest: /etc/systemd/system/openshift-bootstrap-update.timer + owner: root + group: root + mode: 0664 + +- name: Copy unit timer + copy: + src: openshift-bootstrap-update.service + dest: /etc/systemd/system/openshift-bootstrap-update.service + owner: root + group: root + mode: 0664 + +- name: Create bootstrap update script + template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx + +- name: Start bootstrap update timer + systemd: + name: "openshift-bootstrap-update.timer" + state: started + +- name: Bootstrap all nodes that were identified with bootstrap metadata + run_once: true + oc_adm_csr: + nodes: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}" + timeout: 60 diff --git a/roles/openshift_gcp/tasks/configure_master_healthcheck.yml b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml new file mode 100644 index 000000000..aa9655977 --- /dev/null +++ b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml @@ -0,0 +1,19 @@ +--- +- name: refresh yum cache + command: yum clean all + args: + warn: no + when: ansible_os_family == "RedHat" + +- name: install haproxy + package: name=haproxy state=present + register: result + until: '"failed" not in result' + retries: 10 + delay: 10 + +- name: configure haproxy + template: src=master_healthcheck.j2 dest=/etc/haproxy/haproxy.cfg + +- name: start and enable haproxy service + service: name=haproxy state=started enabled=yes diff --git a/roles/openshift_gcp/tasks/dynamic_inventory.yml b/roles/openshift_gcp/tasks/dynamic_inventory.yml new file mode 100644 index 000000000..1637da945 --- /dev/null +++ b/roles/openshift_gcp/tasks/dynamic_inventory.yml @@ -0,0 +1,5 @@ +--- +- name: Extract PEM from service account file + copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600 +- name: Templatize environment script + template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx diff --git a/roles/openshift_gcp/tasks/frequent_log_rotation.yml b/roles/openshift_gcp/tasks/frequent_log_rotation.yml new file mode 100644 index 000000000..0b4b27f84 --- /dev/null +++ b/roles/openshift_gcp/tasks/frequent_log_rotation.yml @@ -0,0 +1,18 @@ +--- +- name: Rotate logs daily + replace: + dest: /etc/logrotate.conf + regexp: '^weekly|monthly|yearly$' + replace: daily +- name: Rotate at a smaller size of log + lineinfile: + dest: /etc/logrotate.conf + state: present + regexp: '^size' + line: size 10M +- name: Limit total size of log files + lineinfile: + dest: /etc/logrotate.conf + state: present + regexp: '^maxsize' + line: maxsize 20M diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yml index ad205ba33..fb147bc78 100644 --- a/roles/openshift_gcp/tasks/main.yaml +++ b/roles/openshift_gcp/tasks/main.yml @@ -17,7 +17,7 @@ - name: Provision GCP DNS domain command: /tmp/openshift_gcp_provision_dns.sh args: - chdir: "{{ playbook_dir }}/files" + chdir: "{{ files_dir }}" register: dns_provision when: - state | default('present') == 'present' @@ -33,7 +33,7 @@ - name: Provision GCP resources command: /tmp/openshift_gcp_provision.sh args: - chdir: "{{ playbook_dir }}/files" + chdir: "{{ files_dir }}" when: - state | default('present') == 'present' diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml new file mode 100644 index 000000000..4e982f497 --- /dev/null +++ b/roles/openshift_gcp/tasks/node_cloud_config.yml @@ -0,0 +1,12 @@ +--- +- name: ensure the /etc/origin folder exists + file: name=/etc/origin state=directory + +- name: configure gce cloud config options + ini_file: dest=/etc/origin/cloudprovider/gce.conf section=Global option={{ item.key }} value={{ item.value }} state=present create=yes + with_items: + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_gcp/tasks/publish_image.yml b/roles/openshift_gcp/tasks/publish_image.yml new file mode 100644 index 000000000..db8a7ca69 --- /dev/null +++ b/roles/openshift_gcp/tasks/publish_image.yml @@ -0,0 +1,32 @@ +--- +- name: Require openshift_gcp_image + fail: + msg: "A source image name or family is required for image publishing. Please ensure `openshift_gcp_image` is defined." + when: openshift_gcp_image is undefined + +- name: Require openshift_gcp_target_image + fail: + msg: "A target image name or family is required for image publishing. Please ensure `openshift_gcp_target_image` is defined." + when: openshift_gcp_target_image is undefined + +- block: + - name: Retrieve images in the {{ openshift_gcp_target_image }} family + command: > + gcloud --project "{{ openshift_gcp_project }}" compute images list + "--filter=family={{ openshift_gcp_target_image }}" + --format=json --sort-by ~creationTimestamp + register: images + - name: Prune oldest images + command: > + gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}" + with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}" + when: openshift_gcp_keep_images is defined + +- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }} + command: > + gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}" + beta compute images create + "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" + --family "{{ openshift_gcp_target_image }}" + --source-image-family "{{ openshift_gcp_image }}" + --source-image-project "{{ openshift_gcp_project }}" diff --git a/roles/openshift_gcp/tasks/setup_scale_group_facts.yml b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml new file mode 100644 index 000000000..0fda43123 --- /dev/null +++ b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml @@ -0,0 +1,44 @@ +--- +- name: Add masters to requisite groups + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: masters, etcd + with_items: "{{ groups['tag_ocp-master'] }}" + +- name: Add a master to the primary masters group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: primary_master + with_items: "{{ groups['tag_ocp-master'].0 }}" + +- name: Add non-bootstrapping master node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: infra + with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add infra node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: infra + with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: app + with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add bootstrap node instances + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: bootstrap_nodes + openshift_node_bootstrap: True + with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}" + when: not (openshift_node_bootstrap | default(False)) diff --git a/roles/openshift_gcp/templates/inventory.j2.sh b/roles/openshift_gcp/templates/inventory.j2.sh new file mode 100644 index 000000000..dcaffb578 --- /dev/null +++ b/roles/openshift_gcp/templates/inventory.j2.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +export GCE_PROJECT="{{ openshift_gcp_project }}" +export GCE_ZONE="{{ openshift_gcp_zone }}" +export GCE_EMAIL="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" +export GCE_PEM_FILE_PATH="/tmp/gce.pem" +export INVENTORY_IP_TYPE="{{ inventory_ip_type }}" +export GCE_TAGGED_INSTANCES="{{ openshift_gcp_prefix }}ocp"
\ No newline at end of file diff --git a/roles/openshift_gcp/templates/master_healthcheck.j2 b/roles/openshift_gcp/templates/master_healthcheck.j2 new file mode 100644 index 000000000..189e578c5 --- /dev/null +++ b/roles/openshift_gcp/templates/master_healthcheck.j2 @@ -0,0 +1,68 @@ +#--------------------------------------------------------------------- +# Example configuration for a possible web application. See the +# full configuration options online. +# +# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt +# +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + # to have these messages end up in /var/log/haproxy.log you will + # need to: + # + # 1) configure syslog to accept network log events. This is done + # by adding the '-r' option to the SYSLOGD_OPTIONS in + # /etc/sysconfig/syslog + # + # 2) configure local2 events to go to the /var/log/haproxy.log + # file. A line like the following can be added to + # /etc/sysconfig/syslog + # + # local2.* /var/log/haproxy.log + # + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +frontend http-proxy *:8080 + acl url_healthz path_beg -i /healthz + use_backend ocp if url_healthz + +backend ocp + server ocp localhost:{{ internal_console_port }} ssl verify none diff --git a/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 new file mode 100644 index 000000000..5b0563724 --- /dev/null +++ b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 @@ -0,0 +1,7 @@ +#!/bin/bash + +set -euo pipefail + +oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig +gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig' +rm -f /root/bootstrap.kubeconfig diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index 4d150bc74..794985322 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -9,15 +9,26 @@ if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then ssh-add "{{ openshift_gcp_ssh_private_key }}" || true fi - # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there - pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + # Check if the public key is in the project metadata, and if not, add it there + if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then + pub_file="{{ openshift_gcp_ssh_private_key }}.pub" + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + else + keyfile="${HOME}/.ssh/google_compute_engine" + pub_file="${keyfile}.pub" + mkdir -p "${HOME}/.ssh" + cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}" + chmod 0600 "${keyfile}" + ssh-keygen -y -f "${keyfile}" > "${pub_file}" + pub_key=$(cut -d ' ' -f 2 < "${pub_file}") + fi key_tmp_file='/tmp/ocp-gce-keys' if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" fi echo -n 'cloud-user:' >> "$key_tmp_file" - cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + cat "${pub_file}" >> "$key_tmp_file" gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" rm -f "$key_tmp_file" fi diff --git a/roles/openshift_gcp/templates/yum_repo.j2 b/roles/openshift_gcp/templates/yum_repo.j2 new file mode 100644 index 000000000..77919ea75 --- /dev/null +++ b/roles/openshift_gcp/templates/yum_repo.j2 @@ -0,0 +1,20 @@ +{% for repo in provision_custom_repositories %} +[{{ repo.id | default(repo.name) }}] +name={{ repo.name | default(repo.id) }} +baseurl={{ repo.baseurl }} +{% set enable_repo = repo.enabled | default(1) %} +enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} +{% set enable_gpg_check = repo.gpgcheck | default(1) %} +gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} +{% if 'sslclientcert' in repo %} +sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }} +{% endif %} +{% if 'sslclientkey' in repo %} +sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }} +{% endif %} +{% for key, value in repo.iteritems() %} +{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %} +{{ key }}={{ value }} +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/openshift_hosted/tasks/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2 deleted file mode 120000 index f3e82ad4f..000000000 --- a/roles/openshift_hosted/tasks/storage/registry_config.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../templates/registry_config.j2
\ No newline at end of file diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 057963c1a..30fdde94c 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -16,9 +16,7 @@ apply -f {{ file_name }} -n {{namespace}} register: generation_apply - failed_when: - - "'error' in generation_apply.stderr" - - "generation_apply.rc != 0" + failed_when: "'error' in generation_apply.stderr or (generation_apply.rc | int != 0)" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} @@ -30,7 +28,5 @@ register: version_changed vars: init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}" - failed_when: - - "'error' in version_changed.stderr" - - "version_changed.rc != 0" + failed_when: "'error' in version_changed.stderr or version_changed.rc | int != 0" changed_when: version_changed.stdout | int > init_version | int diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 0b10413c5..5864d3c03 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -77,6 +77,18 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) } l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" +openshift_node_syscon_auth_mounts_l: +- type: bind + source: "{{ oreg_auth_credentials_path }}" + destination: "/root/.docker" + options: + - ro + +# If we need to add new mounts in the future, or the user wants to mount data. +# This should be in the same format as auth_mounts_l above. +openshift_node_syscon_add_mounts_l: [] + + openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" openshift_node_image_dict: diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 06b879050..008f209d7 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -14,4 +14,23 @@ - "DNS_DOMAIN={{ openshift.common.dns_domain }}" - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" - "MASTER_SERVICE={{ openshift_service_type }}.service" + - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}' state: latest + vars: + # We need to evaluate some variables here to ensure + # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been + # processed. + + # Determine if we want to include auth credentials mount. + l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}" + + # Join any user-provided mounts and auth_mounts into a combined list. + l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}" + + # We must prepend a ',' here to ensure the value is inserted properly into an + # existing json list in the container's config.json + # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py + l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}" + # if we have just a ',' then both mount lists were empty, we don't want to add + # anything to config.json + l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 5f2a94ea2..7d817463c 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -32,7 +32,7 @@ masterClientConnectionOverrides: contentType: application/vnd.kubernetes.protobuf burst: 200 qps: 100 -masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig +masterKubeConfig: system:node:{{ openshift.common.hostname | lower }}.kubeconfig {% if openshift_node_use_openshift_sdn | bool %} networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 5f73f3bdc..13d9fd718 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -18,9 +18,9 @@ stat: path: "{{ openshift.common.config_base }}/node/{{ item }}" with_items: - - "system:node:{{ openshift.common.hostname }}.crt" - - "system:node:{{ openshift.common.hostname }}.key" - - "system:node:{{ openshift.common.hostname }}.kubeconfig" + - "system:node:{{ openshift.common.hostname | lower }}.crt" + - "system:node:{{ openshift.common.hostname | lower }}.key" + - "system:node:{{ openshift.common.hostname | lower }}.kubeconfig" - ca.crt - server.key - server.crt @@ -59,16 +59,16 @@ --certificate-authority {{ legacy_ca_certificate }} {% endfor %} --certificate-authority={{ openshift_ca_cert }} - --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }} + --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }} --groups=system:nodes --master={{ hostvars[openshift_ca_host].openshift.master.api_url }} --signer-cert={{ openshift_ca_cert }} --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} - --user=system:node:{{ hostvars[item].openshift.common.hostname }} + --user=system:node:{{ hostvars[item].openshift.common.hostname | lower }} --expire-days={{ openshift_node_cert_expire_days }} args: - creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}" + creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}" with_items: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" @@ -78,16 +78,16 @@ - name: Generate the node server certificate command: > {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert - --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt - --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key + --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt + --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.key --expire-days={{ openshift_node_cert_expire_days }} --overwrite=true - --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }} + --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.hostname | lower }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.public_hostname | lower }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }} --signer-cert={{ openshift_ca_cert }} --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} args: - creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt" + creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt" with_items: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml index 17ad8106d..12a6d3f94 100644 --- a/roles/openshift_node_certificates/vars/main.yml +++ b/roles/openshift_node_certificates/vars/main.yml @@ -1,7 +1,7 @@ --- openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs" openshift_node_cert_dir: "{{ openshift.common.config_base }}/node" -openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}" +openshift_node_cert_subdir: "node-{{ openshift.common.hostname | lower }}" openshift_node_config_dir: "{{ openshift.common.config_base }}/node" openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}" diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index 239e1f1cc..27c8a4b81 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -15,9 +15,7 @@ apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: - - "'error' in generation_apply.stderr" - - "generation_apply.rc != 0" + failed_when: "'error' in generation_apply.stderr or generation_apply.rc != 0" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} @@ -38,9 +36,7 @@ delete -f {{ file_name }} -n {{ namespace }} register: generation_delete - failed_when: - - "'error' in generation_delete.stderr" - - "generation_delete.rc != 0" + failed_when: "'error' in generation_delete.stderr or generation_delete.rc != 0" changed_when: generation_delete.rc == 0 when: generation_apply.rc != 0 @@ -50,8 +46,6 @@ apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: - - "'error' in generation_apply.stderr" - - "generation_apply.rc != 0" + failed_when: "'error' in generation_apply.stderr or generation_apply.rc | int != 0" changed_when: generation_apply.rc == 0 when: generation_apply.rc != 0 diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index ead62799a..cc5eef47d 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -86,6 +86,8 @@ value: "{{ openshift.master.logout_url | default('') }}" - key: features#inactivityTimeoutMinutes value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(false) }}" - key: extensions#scriptURLs value: "{{ openshift_web_console_extension_script_urls | default([]) }}" - key: extensions#stylesheetURLs diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml index 4d2957977..967222ea4 100644 --- a/roles/openshift_web_console/tasks/update_console_config.yml +++ b/roles/openshift_web_console/tasks/update_console_config.yml @@ -19,43 +19,49 @@ # value: "https://{{ openshift_logging_kibana_hostname }}" # when: openshift_web_console_install | default(true) | bool -- name: Read web console config map +- name: Read the existing web console config map oc_configmap: namespace: openshift-web-console name: webconsole-config state: list - register: webconsole_config - -- name: Make temp directory - command: mktemp -d /tmp/console-ansible-XXXXXX - register: mktemp_console - changed_when: False - -- name: Copy web console config to temp file - copy: - content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}" - dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml" - -- name: Change web console config properties - yedit: - src: "{{ mktemp_console.stdout }}/webconsole-config.yaml" - edits: "{{console_config_edits}}" - separator: '#' - state: present - -- name: Update web console config map - oc_configmap: - namespace: openshift-web-console - name: webconsole-config - state: present - from_file: - webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml" - -- name: Remove temp directory - file: - state: absent - name: "{{ mktemp_console.stdout }}" - changed_when: False - -# TODO: Only rollout if config has changed. -- include_tasks: rollout_console.yml + register: webconsole_config_map + +- set_fact: + existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}" + +- when: existing_config_map_data['webconsole-config.yaml'] is defined + block: + - name: Make temp directory + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp_console + changed_when: False + + - name: Copy the existing web console config to temp directory + copy: + content: "{{ existing_config_map_data['webconsole-config.yaml'] }}" + dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + + - name: Change web console config properties + yedit: + src: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + edits: "{{console_config_edits}}" + separator: '#' + state: present + + - name: Update web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: present + from_file: + webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + register: update_console_config_map + + - name: Remove temp directory + file: + state: absent + name: "{{ mktemp_console.stdout }}" + changed_when: False + + - include_tasks: rollout_console.yml + when: update_console_config_map.changed | bool diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index a85a43bd3..eb42721b5 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -820,7 +820,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html click.echo(message) -@click.group() +@click.group(context_settings=dict(max_content_width=120)) @click.pass_context @click.option('--unattended', '-u', is_flag=True, default=False) @click.option('--configuration', '-c', @@ -932,7 +932,7 @@ def uninstall(ctx): openshift_ansible.run_uninstall_playbook(hosts, verbose) -@click.command() +@click.command(context_settings=dict(max_content_width=120)) @click.option('--latest-minor', '-l', is_flag=True, default=False) @click.option('--next-major', '-n', is_flag=True, default=False) @click.pass_context diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 216664cd0..84a76fa53 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -122,7 +122,7 @@ def write_inventory_vars(base_inventory, lb): if CFG.deployment.variables['ansible_ssh_user'] != 'root': base_inventory.write('ansible_become=yes\n') - base_inventory.write('openshift_override_hostname_check=true\n') + base_inventory.write('openshift_hostname_check=false\n') if lb is not None: base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) |