diff options
Diffstat (limited to 'images/installer/root')
-rw-r--r-- | images/installer/root/exports/config.json.template | 234 | ||||
-rw-r--r-- | images/installer/root/exports/manifest.json | 12 | ||||
-rw-r--r-- | images/installer/root/exports/service.template | 6 | ||||
-rw-r--r-- | images/installer/root/exports/tmpfiles.template | 2 | ||||
-rwxr-xr-x | images/installer/root/usr/local/bin/entrypoint | 17 | ||||
-rwxr-xr-x | images/installer/root/usr/local/bin/run | 46 | ||||
-rwxr-xr-x | images/installer/root/usr/local/bin/run-system-container.sh | 4 | ||||
-rwxr-xr-x | images/installer/root/usr/local/bin/usage | 33 | ||||
-rwxr-xr-x | images/installer/root/usr/local/bin/usage.ocp | 33 | ||||
-rwxr-xr-x | images/installer/root/usr/local/bin/user_setup | 17 |
10 files changed, 404 insertions, 0 deletions
diff --git a/images/installer/root/exports/config.json.template b/images/installer/root/exports/config.json.template new file mode 100644 index 000000000..739c0080f --- /dev/null +++ b/images/installer/root/exports/config.json.template @@ -0,0 +1,234 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "os": "linux", + "arch": "amd64" + }, + "process": { + "terminal": false, + "consoleSize": { + "height": 0, + "width": 0 + }, + "user": { + "uid": 0, + "gid": 0 + }, + "args": [ + "/usr/local/bin/run-system-container.sh" + ], + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "TERM=xterm", + "OPTS=$OPTS", + "PLAYBOOK_FILE=$PLAYBOOK_FILE", + "ANSIBLE_CONFIG=$ANSIBLE_CONFIG" + ], + "cwd": "/opt/app-root/src/", + "rlimits": [ + { + "type": "RLIMIT_NOFILE", + "hard": 1024, + "soft": 1024 + } + ], + "noNewPrivileges": true + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "mounts": [ + { + "destination": "/proc", + "type": "proc", + "source": "proc" + }, + { + "destination": "/dev", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination": "/dev/pts", + "type": "devpts", + "source": "devpts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination": "/dev/shm", + "type": "tmpfs", + "source": "shm", + "options": [ + "nosuid", + "noexec", + "nodev", + "mode=1777", + "size=65536k" + ] + }, + { + "destination": "/dev/mqueue", + "type": "mqueue", + "source": "mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/sys", + "type": "sysfs", + "source": "sysfs", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "type": "bind", + "source": "$HOME_ROOT/.ssh", + "destination": "/opt/app-root/src/.ssh", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$HOME_ROOT", + "destination": "/root", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$VAR_LIB_OPENSHIFT_INSTALLER", + "destination": "/var/lib/openshift-installer", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$VAR_LOG_OPENSHIFT_LOG", + "destination": "/var/log/ansible.log", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/root/.ansible", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755" + ] + }, + { + "destination": "/tmp", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$INVENTORY_FILE", + "destination": "/etc/ansible/hosts", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/etc/resolv.conf", + "type": "bind", + "source": "/etc/resolv.conf", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { + "destination": "/sys/fs/cgroup", + "type": "cgroup", + "source": "cgroup", + "options": [ + "nosuid", + "noexec", + "nodev", + "relatime", + "ro" + ] + } + ], + "hooks": { + + }, + "linux": { + "resources": { + "devices": [ + { + "allow": false, + "access": "rwm" + } + ] + }, + "namespaces": [ + { + "type": "pid" + }, + { + "type": "mount" + } + ], + "maskedPaths": [ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware" + ], + "readonlyPaths": [ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + } +} diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json new file mode 100644 index 000000000..8b984d7a3 --- /dev/null +++ b/images/installer/root/exports/manifest.json @@ -0,0 +1,12 @@ +{ + "version": "1.0", + "defaultValues": { + "OPTS": "", + "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer", + "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log", + "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml", + "HOME_ROOT": "/root", + "ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg", + "INVENTORY_FILE": "/dev/null" + } +} diff --git a/images/installer/root/exports/service.template b/images/installer/root/exports/service.template new file mode 100644 index 000000000..bf5316af6 --- /dev/null +++ b/images/installer/root/exports/service.template @@ -0,0 +1,6 @@ +[Service] +ExecStart=$EXEC_START +ExecStop=-$EXEC_STOP +Restart=no +WorkingDirectory=$DESTDIR +Type=oneshot diff --git a/images/installer/root/exports/tmpfiles.template b/images/installer/root/exports/tmpfiles.template new file mode 100644 index 000000000..b1f6caf47 --- /dev/null +++ b/images/installer/root/exports/tmpfiles.template @@ -0,0 +1,2 @@ +d $VAR_LIB_OPENSHIFT_INSTALLER - - - - - +f $VAR_LOG_OPENSHIFT_LOG - - - - - diff --git a/images/installer/root/usr/local/bin/entrypoint b/images/installer/root/usr/local/bin/entrypoint new file mode 100755 index 000000000..777bf3f11 --- /dev/null +++ b/images/installer/root/usr/local/bin/entrypoint @@ -0,0 +1,17 @@ +#!/bin/bash -e +# +# This file serves as the main entrypoint to the openshift-ansible image. +# +# For more information see the documentation: +# https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md + + +# Patch /etc/passwd file with the current user info. +# The current user's entry must be correctly defined in this file in order for +# the `ssh` command to work within the created container. + +if ! whoami &>/dev/null; then + echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd +fi + +exec "$@" diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run new file mode 100755 index 000000000..9401ea118 --- /dev/null +++ b/images/installer/root/usr/local/bin/run @@ -0,0 +1,46 @@ +#!/bin/bash -e +# +# This file serves as the default command to the openshift-ansible image. +# Runs a playbook with inventory as specified by environment variables. +# +# For more information see the documentation: +# https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md + +# SOURCE and HOME DIRECTORY: /opt/app-root/src + +if [[ -z "${PLAYBOOK_FILE}" ]]; then + echo + echo "PLAYBOOK_FILE must be provided." + exec /usr/local/bin/usage +fi + +INVENTORY="$(mktemp)" +if [[ -v INVENTORY_FILE ]]; then + # Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below + # does not attempt to modify the original + cp -a ${INVENTORY_FILE} ${INVENTORY} +elif [[ -v INVENTORY_URL ]]; then + curl -o ${INVENTORY} ${INVENTORY_URL} +elif [[ -v DYNAMIC_SCRIPT_URL ]]; then + curl -o ${INVENTORY} ${DYNAMIC_SCRIPT_URL} + chmod 755 ${INVENTORY} +else + echo + echo "One of INVENTORY_FILE, INVENTORY_URL or DYNAMIC_SCRIPT_URL must be provided." + exec /usr/local/bin/usage +fi +INVENTORY_ARG="-i ${INVENTORY}" + +if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then + sed -i s/ansible_connection=local// ${INVENTORY} +fi + +if [[ -v VAULT_PASS ]]; then + VAULT_PASS_FILE=.vaultpass + echo ${VAULT_PASS} > ${VAULT_PASS_FILE} + VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}" +fi + +cd ${WORK_DIR} + +exec ansible-playbook ${INVENTORY_ARG} ${VAULT_PASS_ARG} ${OPTS} ${PLAYBOOK_FILE} diff --git a/images/installer/root/usr/local/bin/run-system-container.sh b/images/installer/root/usr/local/bin/run-system-container.sh new file mode 100755 index 000000000..9ce7c7328 --- /dev/null +++ b/images/installer/root/usr/local/bin/run-system-container.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +export ANSIBLE_LOG_PATH=/var/log/ansible.log +exec ansible-playbook -i /etc/ansible/hosts ${OPTS} ${PLAYBOOK_FILE} diff --git a/images/installer/root/usr/local/bin/usage b/images/installer/root/usr/local/bin/usage new file mode 100755 index 000000000..3518d7f19 --- /dev/null +++ b/images/installer/root/usr/local/bin/usage @@ -0,0 +1,33 @@ +#!/bin/bash -e +cat <<"EOF" + +The origin-ansible image provides several options to control the behaviour of the containers. +For more details on these options see the documentation: + + https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md + +At a minimum, when running a container using this image you must provide: + +* ssh keys so that Ansible can reach your hosts. These should be mounted as a volume under + /opt/app-root/src/.ssh +* An inventory file. This can be mounted inside the container as a volume and specified with the + INVENTORY_FILE environment variable. Alternatively you can serve the inventory file from a web + server and use the INVENTORY_URL environment variable to fetch it. +* The playbook to run. This is set using the PLAYBOOK_FILE environment variable. + +Here is an example of how to run a containerized origin-ansible with +the openshift_facts playbook, which collects and displays facts about your +OpenShift environment. The inventory and ssh keys are mounted as volumes +(the latter requires setting the uid in the container and SELinux label +in the key file via :Z so they can be accessed) and the PLAYBOOK_FILE +environment variable is set to point to the playbook within the image: + +docker run -tu `id -u` \ + -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \ + -v /etc/ansible/hosts:/tmp/inventory:Z,ro \ + -e INVENTORY_FILE=/tmp/inventory \ + -e OPTS="-v" \ + -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \ + openshift/origin-ansible + +EOF diff --git a/images/installer/root/usr/local/bin/usage.ocp b/images/installer/root/usr/local/bin/usage.ocp new file mode 100755 index 000000000..50593af6e --- /dev/null +++ b/images/installer/root/usr/local/bin/usage.ocp @@ -0,0 +1,33 @@ +#!/bin/bash -e +cat <<"EOF" + +The ose-ansible image provides several options to control the behaviour of the containers. +For more details on these options see the documentation: + + https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md + +At a minimum, when running a container using this image you must provide: + +* ssh keys so that Ansible can reach your hosts. These should be mounted as a volume under + /opt/app-root/src/.ssh +* An inventory file. This can be mounted inside the container as a volume and specified with the + INVENTORY_FILE environment variable. Alternatively you can serve the inventory file from a web + server and use the INVENTORY_URL environment variable to fetch it. +* The playbook to run. This is set using the PLAYBOOK_FILE environment variable. + +Here is an example of how to run a containerized ose-ansible with +the openshift_facts playbook, which collects and displays facts about your +OpenShift environment. The inventory and ssh keys are mounted as volumes +(the latter requires setting the uid in the container and SELinux label +in the key file via :Z so they can be accessed) and the PLAYBOOK_FILE +environment variable is set to point to the playbook within the image: + +docker run -tu `id -u` \ + -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \ + -v /etc/ansible/hosts:/tmp/inventory:Z,ro \ + -e INVENTORY_FILE=/tmp/inventory \ + -e OPTS="-v" \ + -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \ + openshift3/ose-ansible + +EOF diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup new file mode 100755 index 000000000..b76e60a4d --- /dev/null +++ b/images/installer/root/usr/local/bin/user_setup @@ -0,0 +1,17 @@ +#!/bin/sh +set -x + +# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) +mkdir -p ${HOME} +chown ${USER_UID}:0 ${HOME} +chmod ug+rwx ${HOME} + +# runtime user will need to be able to self-insert in /etc/passwd +chmod g+rw /etc/passwd + +# ensure that the ansible content is accessible +chmod -R g+r ${WORK_DIR} +find ${WORK_DIR} -type d -exec chmod g+x {} + + +# no need for this script to remain in the image after running +rm $0 |