diff options
Diffstat (limited to 'roles')
86 files changed, 2783 insertions, 155 deletions
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md index 18d182ec9..f8588c4bf 100644 --- a/roles/installer_checkpoint/README.md +++ b/roles/installer_checkpoint/README.md @@ -16,7 +16,7 @@ displaying and logging of the installer status at the end of a playbook run. To ensure the callback plugin is loaded, regardless of ansible.cfg file configuration, the plugin has been placed inside the installer_checkpoint role -which must be called early in playbook execution. The `std_include.yml` playbook +which must be called early in playbook execution. The `init/main.yml` playbook is run first for all entry point playbooks, therefore, the initialization of the checkpoint plugin has been placed at the beginning of that file. @@ -89,7 +89,7 @@ phase/component and then a final play for setting `installer_hase_initialize` to "Complete". ```yaml -# common/openshift-cluster/std_include.yml +# init/main.yml --- - name: Initialization Checkpoint Start hosts: all @@ -124,7 +124,7 @@ phase/component and then a final play for setting `installer_hase_initialize` to Each phase or component of the installer will follow a similar pattern, with the exception that the `installer_checkpoint` role does not need to be called since -it was already loaded by the play in `std_include.yml`. It is important to +it was already loaded by the play in `init/main.yml`. It is important to place the 'In Progress' and 'Complete' plays as the first and last plays of the phase or component. diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md index 7b618f902..269788a11 100644 --- a/roles/kuryr/README.md +++ b/roles/kuryr/README.md @@ -31,6 +31,11 @@ pods. This allows to have interconnectivity between pods and OpenStack VMs. * ``kuryr_openstack_pod_service_id=service_subnet_uuid`` * ``kuryr_openstack_pod_project_id=pod_project_uuid`` * ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid`` +* ``kuryr_openstack_enable_pools=True`` +* ``kuryr_openstack_pool_max=0`` +* ``kuryr_openstack_pool_min=1`` +* ``kuryr_openstack_pool_batch=5`` +* ``kuryr_openstack_pool_update_frequency=20`` ## Kuryr resources diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2 index e874d6c25..6bf6c1db2 100644 --- a/roles/kuryr/templates/configmap.yaml.j2 +++ b/roles/kuryr/templates/configmap.yaml.j2 @@ -161,6 +161,14 @@ data: # The driver that provides VIFs for Kubernetes Pods. (string value) pod_vif_driver = nested-vlan + # The driver that manages VIFs pools for Kubernetes Pods (string value) + vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }} + + [vif_pool] + ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} + ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} + ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} + ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} [neutron] # Configuration options for OpenStack Neutron diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 1ceaf5d0d..92515889b 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -71,6 +71,12 @@ options: required: false default: None aliases: [] + role_namespace: + description: + - The namespace where to find the role + required: false + default: None + aliases: [] debug: description: - Turn on debug output. @@ -122,6 +128,14 @@ EXAMPLES = ''' resource_kind: cluster-role resource_name: system:build-strategy-docker state: present + +- name: oc adm policy add-role-to-user system:build-strategy-docker ausername --role-namespace foo + oc_adm_policy_user: + user: ausername + resource_kind: cluster-role + resource_name: system:build-strategy-docker + state: present + role_namespace: foo ''' # -*- -*- -*- End included fragment: doc/policy_user -*- -*- -*- @@ -2065,6 +2079,9 @@ class PolicyUser(OpenShiftCLI): self.config.config_options['name']['value'], self.config.config_options['user']['value']] + if self.config.config_options['role_namespace']['value'] is not None: + cmd.extend(['--role-namespace', self.config.config_options['role_namespace']['value']]) + return self.openshift_cmd(cmd, oadm=True) @staticmethod @@ -2085,6 +2102,7 @@ class PolicyUser(OpenShiftCLI): 'user': {'value': params['user'], 'include': False}, 'resource_kind': {'value': params['resource_kind'], 'include': False}, 'name': {'value': params['resource_name'], 'include': False}, + 'role_namespace': {'value': params['role_namespace'], 'include': False}, }) policyuser = PolicyUser(nconfig, params['debug']) @@ -2149,6 +2167,7 @@ def main(): debug=dict(default=False, type='bool'), resource_name=dict(required=True, type='str'), namespace=dict(default='default', type='str'), + role_namespace=dict(default=None, type='str'), kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), user=dict(required=True, type='str'), diff --git a/roles/lib_openshift/src/ansible/oc_adm_policy_user.py b/roles/lib_openshift/src/ansible/oc_adm_policy_user.py index a22496866..642001021 100644 --- a/roles/lib_openshift/src/ansible/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/ansible/oc_adm_policy_user.py @@ -14,6 +14,7 @@ def main(): debug=dict(default=False, type='bool'), resource_name=dict(required=True, type='str'), namespace=dict(default='default', type='str'), + role_namespace=dict(default=None, type='str'), kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), user=dict(required=True, type='str'), diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 6fc8145c8..481564c2d 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -148,6 +148,9 @@ class PolicyUser(OpenShiftCLI): self.config.config_options['name']['value'], self.config.config_options['user']['value']] + if self.config.config_options['role_namespace']['value'] is not None: + cmd.extend(['--role-namespace', self.config.config_options['role_namespace']['value']]) + return self.openshift_cmd(cmd, oadm=True) @staticmethod @@ -168,6 +171,7 @@ class PolicyUser(OpenShiftCLI): 'user': {'value': params['user'], 'include': False}, 'resource_kind': {'value': params['resource_kind'], 'include': False}, 'name': {'value': params['resource_name'], 'include': False}, + 'role_namespace': {'value': params['role_namespace'], 'include': False}, }) policyuser = PolicyUser(nconfig, params['debug']) diff --git a/roles/lib_openshift/src/doc/policy_user b/roles/lib_openshift/src/doc/policy_user index 351c9af65..a98652571 100644 --- a/roles/lib_openshift/src/doc/policy_user +++ b/roles/lib_openshift/src/doc/policy_user @@ -20,6 +20,12 @@ options: required: false default: None aliases: [] + role_namespace: + description: + - The namespace where to find the role + required: false + default: None + aliases: [] debug: description: - Turn on debug output. @@ -71,4 +77,12 @@ EXAMPLES = ''' resource_kind: cluster-role resource_name: system:build-strategy-docker state: present + +- name: oc adm policy add-role-to-user system:build-strategy-docker ausername --role-namespace foo + oc_adm_policy_user: + user: ausername + resource_kind: cluster-role + resource_name: system:build-strategy-docker + state: present + role_namespace: foo ''' diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 index a8c7f9a95..fe0fe83d4 100644 --- a/roles/openshift_aws/templates/user_data.j2 +++ b/roles/openshift_aws/templates/user_data.j2 @@ -20,6 +20,7 @@ runcmd: - [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] {% endif %} {% if launch_config_item.key != 'master' %} +- [ systemctl, restart, NetworkManager] - [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] - [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] {% endif %} diff --git a/roles/openshift_cluster_autoscaler/README.md b/roles/openshift_cluster_autoscaler/README.md new file mode 100644 index 000000000..d775a8a71 --- /dev/null +++ b/roles/openshift_cluster_autoscaler/README.md @@ -0,0 +1,58 @@ +Openshift cluster autoscaler +================================ + +Install the cluster autoscaler + +Requirements +------------ + +* One or more Master servers +* A cloud provider that supports the cluster-autoscaler + +Role Variables +-------------- +Check defaults/main.yml + +Dependencies +------------ + + +Example Playbook +---------------- + +#!/usr/bin/ansible-playbook +``` +--- +- hosts: masters + gather_facts: no + remote_user: root + tasks: + - name: include role autoscaler + include_role: + name: openshift_cluster_autoscaler + vars: + openshift_clusterid: opstest + openshift_cluster_autoscaler_aws_key: <aws_key> + openshift_cluster_autoscaler_aws_secret_key: <aws_secret_key> +``` + + +Notes +----- + +This is currently experimental software. This role allows users to install the cluster-autoscaler and the necessary authorization pieces that allow the autoscaler to function. + + +This feature requires cloud provider credentials or a serviceaccount that has access to scale up/down nodes within the scaling groups. + +https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_cluster_autoscaler/defaults/main.yml b/roles/openshift_cluster_autoscaler/defaults/main.yml new file mode 100644 index 000000000..707e3c79b --- /dev/null +++ b/roles/openshift_cluster_autoscaler/defaults/main.yml @@ -0,0 +1,40 @@ +--- +openshift_cluster_autoscaler_name: cluster-autoscaler +openshift_cluster_autoscaler_version: 4 + +openshift_cluster_autoscaler_template_location: /tmp +openshift_cluster_autoscaler_template_name: "{{ openshift_cluster_autoscaler_name }}" + +openshift_cluster_autoscaler_node_selector: + type: infra + +openshift_cluster_autoscaler_state: present +openshift_cluster_autoscaler_namespace: openshift-infra +openshift_cluster_autoscaler_delete_config: True +openshift_cluster_autoscaler_skip_nodes_local_storage: False +openshift_cluster_autoscaler_serviceaccount: cluster-autoscaler +openshift_cluster_autoscaler_region: us-east-1 + +openshift_cluster_autoscaler_image: docker.io/openshift/kubernetes-autoscaler:v0.6.1 + +openshift_cluster_autoscaler_cloud_provider: aws + +# AWS specific variables here +openshift_cluster_autoscaler_aws_creds_name: aws-creds +openshift_cluster_autoscaler_aws_secret_name: autoscaler-credentials +openshift_cluster_autoscaler_aws_creds_path: "/var/run/secrets/{{ openshift_cluster_autoscaler_aws_creds_name }}/creds" +openshift_cluster_autoscaler_aws_env_vars: +- name: AWS_REGION + value: "{{ openshift_cluster_autoscaler_region }}" +- name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ openshift_cluster_autoscaler_aws_creds_path }}" + +## End AWS vars ## + +openshift_cluster_autoscaler_env_vars: [] + + +openshift_cluster_autoscaler_node_groups: +- min: 3 + max: 10 + name: "{{ openshift_cluster_autoscaler_node_group_name | default(openshift_clusterid ~ ' openshift compute') }}" diff --git a/roles/openshift_cluster_autoscaler/files/clusterrole.yml b/roles/openshift_cluster_autoscaler/files/clusterrole.yml new file mode 100644 index 000000000..a4691888a --- /dev/null +++ b/roles/openshift_cluster_autoscaler/files/clusterrole.yml @@ -0,0 +1,41 @@ +--- +apiVersion: v1 +kind: ClusterRole +metadata: + name: system:cluster-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + - persistentvolumeclaims + - persistentvolumes + - pods + - replicationcontrollers + - services + verbs: + - get + - list + - watch + attributeRestrictions: null +- apiGroups: + - extensions + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - watch + attributeRestrictions: null +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + attributeRestrictions: null diff --git a/roles/openshift_cluster_autoscaler/files/role.yml b/roles/openshift_cluster_autoscaler/files/role.yml new file mode 100644 index 000000000..c2abb5435 --- /dev/null +++ b/roles/openshift_cluster_autoscaler/files/role.yml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: Role +metadata: + name: cluster-autoscaler +rules: +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler + - cluster-autoscaler-status + verbs: + - create + - get + - patch + - update + attributeRestrictions: null +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + attributeRestrictions: null +- apiGroups: + - "" + resources: + - events + verbs: + - create + attributeRestrictions: null diff --git a/roles/openshift_cluster_autoscaler/meta/main.yml b/roles/openshift_cluster_autoscaler/meta/main.yml new file mode 100644 index 000000000..d2bbd2576 --- /dev/null +++ b/roles/openshift_cluster_autoscaler/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_openshift diff --git a/roles/openshift_cluster_autoscaler/tasks/aws.yml b/roles/openshift_cluster_autoscaler/tasks/aws.yml new file mode 100644 index 000000000..f08e97ebc --- /dev/null +++ b/roles/openshift_cluster_autoscaler/tasks/aws.yml @@ -0,0 +1,17 @@ +--- +- when: openshift_cluster_autoscaler_cloud_provider == 'aws' + block: + - name: laydown the aws-credentials + template: + src: aws-creds.j2 + dest: "{{ openshift_cluster_autoscaler_template_location }}/{{ openshift_cluster_autoscaler_aws_creds_name }}" + when: openshift_cluster_autoscaler_state == 'present' + + - name: create the aws creds secret + oc_secret: + state: "{{ openshift_cluster_autoscaler_state }}" + name: "{{ openshift_cluster_autoscaler_aws_secret_name }}" + namespace: "{{ openshift_cluster_autoscaler_namespace }}" + files: + - name: creds + path: "{{ openshift_cluster_autoscaler_template_location }}/{{ openshift_cluster_autoscaler_aws_creds_name }}" diff --git a/roles/openshift_cluster_autoscaler/tasks/main.yml b/roles/openshift_cluster_autoscaler/tasks/main.yml new file mode 100644 index 000000000..173dcf044 --- /dev/null +++ b/roles/openshift_cluster_autoscaler/tasks/main.yml @@ -0,0 +1,72 @@ +--- +- name: create the cluster-autoscaler serviceaccount + oc_serviceaccount: + name: "{{ openshift_cluster_autoscaler_serviceaccount }}" + namespace: "{{ openshift_cluster_autoscaler_namespace }}" + state: "{{ openshift_cluster_autoscaler_state }}" + +- name: copy the cluster-autoscaler files + copy: + src: "{{ item }}" + dest: "{{ openshift_cluster_autoscaler_template_location }}/{{ item }}" + with_items: + - clusterrole.yml + - role.yml + when: openshift_cluster_autoscaler_state == 'present' + +- name: "Ensure the cluster-autoscaler roles are {{ openshift_cluster_autoscaler_state }}" + oc_obj: + namespace: "{{ openshift_cluster_autoscaler_namespace }}" + state: "{{ openshift_cluster_autoscaler_state }}" + kind: "{{ item.type }}" + name: "{{ item.name }}" + delete_after: "{{ openshift_cluster_autoscaler_delete_config }}" + files: + - "{{ openshift_cluster_autoscaler_template_location }}/{{ item.fname }}" + with_items: + - fname: clusterrole.yml + type: clusterrole + name: "system:{{ openshift_cluster_autoscaler_name }}" + - fname: role.yml + type: role + name: "{{ openshift_cluster_autoscaler_name }}" + +- include: aws.yml + when: openshift_cluster_autoscaler_cloud_provider == 'aws' + +- name: create the policies + oc_adm_policy_user: + state: "{{ openshift_cluster_autoscaler_state }}" + user: "system:serviceaccount:openshift-infra:{{ openshift_cluster_autoscaler_serviceaccount }}" + namespace: "{{ openshift_cluster_autoscaler_namespace }}" + resource_kind: "{{ item.name }}" + resource_name: cluster-autoscaler + role_namespace: "{{ item.role_namespace | default(omit) }}" + with_items: + - name: cluster-role + - name: role + role_namespace: "{{ openshift_cluster_autoscaler_namespace }}" + +- name: "grant cluster-reader to {{ openshift_cluster_autoscaler_serviceaccount }}" + oc_adm_policy_user: + state: "{{ openshift_cluster_autoscaler_state }}" + user: "system:serviceaccount:{{ openshift_cluster_autoscaler_namespace }}:{{ openshift_cluster_autoscaler_serviceaccount }}" + namespace: "{{ openshift_cluster_autoscaler_namespace }}" + resource_name: cluster-reader + resource_kind: cluster-role + +- name: laydown the deployment file for cluster-autoscaler + template: + src: cluster-autoscaler.j2 + dest: "{{ openshift_cluster_autoscaler_template_location }}/{{ openshift_cluster_autoscaler_template_name }}" + when: openshift_cluster_autoscaler_state == 'present' + +- name: "Ensure the cluster-autoscaler is {{ openshift_cluster_autoscaler_state }}" + oc_obj: + namespace: "{{ openshift_cluster_autoscaler_namespace }}" + state: "{{ openshift_cluster_autoscaler_state }}" + kind: deployment + name: "{{ openshift_cluster_autoscaler_name }}" + delete_after: "{{ openshift_cluster_autoscaler_delete_config }}" + files: + - "{{ openshift_cluster_autoscaler_template_location }}/{{ openshift_cluster_autoscaler_template_name }}" diff --git a/roles/openshift_cluster_autoscaler/templates/aws-creds.j2 b/roles/openshift_cluster_autoscaler/templates/aws-creds.j2 new file mode 100644 index 000000000..28399e652 --- /dev/null +++ b/roles/openshift_cluster_autoscaler/templates/aws-creds.j2 @@ -0,0 +1,3 @@ +[default] +aws_access_key_id = {{ openshift_cluster_autoscaler_aws_key }} +aws_secret_access_key = {{ openshift_cluster_autoscaler_aws_secret_key }} diff --git a/roles/openshift_cluster_autoscaler/templates/cluster-autoscaler.j2 b/roles/openshift_cluster_autoscaler/templates/cluster-autoscaler.j2 new file mode 100644 index 000000000..f76bce37b --- /dev/null +++ b/roles/openshift_cluster_autoscaler/templates/cluster-autoscaler.j2 @@ -0,0 +1,58 @@ +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ openshift_cluster_autoscaler_name }} + name: {{ openshift_cluster_autoscaler_name }} + namespace: {{ openshift_cluster_autoscaler_namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ openshift_cluster_autoscaler_name }} + role: infra + template: + metadata: + labels: + app: {{ openshift_cluster_autoscaler_name }} + role: infra + spec: + containers: + - args: + - --alsologtostderr + - --v={{ openshift_cluster_autoscaler_version }} + - --skip-nodes-with-local-storage={{ openshift_cluster_autoscaler_skip_nodes_local_storage }} + - --leader-elect-resource-lock=configmaps + - --namespace={{ openshift_cluster_autoscaler_namespace }} +{% if openshift_cluster_autoscaler_scale_down_delay is defined %} + - --scale-down-delay={{ openshift_cluster_autoscaler_scale_down_delay }} +{% endif %} + - --cloud-provider={{ openshift_cluster_autoscaler_cloud_provider }} +{% for group in openshift_cluster_autoscaler_node_groups %} + - --nodes={{ group.min }}:{{ group.max }}:{{ group.name }} +{% endfor %} +{% if openshift_cluster_autoscaler_cloud_provider == 'aws' %} + env: {{ openshift_cluster_autoscaler_aws_env_vars | union(openshift_cluster_autoscaler_env_vars) | to_json }} +{% else %} + env: {{ openshift_cluster_autoscaler_env_vars }} +{% endif %} + image: {{ openshift_cluster_autoscaler_image }} + name: autoscaler +{% if openshift_cluster_autoscaler_cloud_provider == 'aws' %} + volumeMounts: + - mountPath: {{ openshift_cluster_autoscaler_aws_creds_path | dirname }} + name: {{ openshift_cluster_autoscaler_aws_creds_name }} + readOnly: true +{% endif %} + nodeSelector: {{ openshift_cluster_autoscaler_node_selector | to_json }} + dnsPolicy: ClusterFirst + serviceAccountName: {{ openshift_cluster_autoscaler_serviceaccount }} + terminationGracePeriodSeconds: 30 +{% if openshift_cluster_autoscaler_cloud_provider == 'aws' %} + volumes: + - name: {{ openshift_cluster_autoscaler_aws_creds_name }} + secret: + defaultMode: 420 + secretName: {{ openshift_cluster_autoscaler_aws_secret_name }} +{% endif %} diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 6c5bb8693..27cfc17d6 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -84,6 +84,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'. - `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'. - `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'. +- `openshift_logging_es_pvc_storage_class_name`: The name of the storage class to use for a static PVC. Defaults to ''. - `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'. - `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'. - `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 626732d16..497c6e0c5 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -99,7 +99,7 @@ openshift_logging_es_log_appenders: ['file'] openshift_logging_es_memory_limit: "8Gi" openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}" openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}" -openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" +openshift_logging_es_pvc_size: '' openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}" openshift_logging_es_recover_after_time: 5m openshift_logging_es_storage_group: "65534" diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index cec295d65..2fefdc894 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -79,10 +79,12 @@ openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" _es_containers: "{{ outer_item.0.containers}}" @@ -104,10 +106,12 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} loop_control: @@ -144,6 +148,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -185,6 +190,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 15f6a23e6..7f8e88036 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -3,17 +3,6 @@ msg: Only one Fluentd nodeselector key pair should be provided when: openshift_logging_fluentd_nodeselector.keys() | count > 1 -- name: Set default image variables based on deployment_type - include_vars: "{{ item }}" - with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" - - "default_images.yml" - -- name: Set logging image facts - set_fact: - openshift_logging_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" - openshift_logging_image_version: "{{ openshift_logging_image_version | default(__openshift_logging_image_version) }}" - - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX register: mktemp diff --git a/roles/openshift_logging/vars/default_images.yml b/roles/openshift_logging/vars/default_images.yml deleted file mode 100644 index 1a77808f6..000000000 --- a/roles/openshift_logging/vars/default_images.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml deleted file mode 100644 index f60fa8d7d..000000000 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml index 9cae9f936..a0d221c32 100644 --- a/roles/openshift_logging_curator/defaults/main.yml +++ b/roles/openshift_logging_curator/defaults/main.yml @@ -1,7 +1,5 @@ --- ### General logging settings -openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local" diff --git a/roles/openshift_logging_curator/tasks/determine_version.yaml b/roles/openshift_logging_curator/tasks/determine_version.yaml index 94f8b4a97..2013f4e38 100644 --- a/roles/openshift_logging_curator/tasks/determine_version.yaml +++ b/roles/openshift_logging_curator/tasks/determine_version.yaml @@ -1,16 +1,16 @@ --- # debating making this a module instead? - fail: - msg: Missing version to install provided by 'openshift_logging_image_version' - when: not openshift_logging_image_version or openshift_logging_image_version == '' + msg: Missing version to install provided by 'openshift_logging_curator_image_version' + when: not openshift_logging_curator_image_version or openshift_logging_curator_image_version == '' - set_fact: curator_version: "{{ __latest_curator_version }}" - when: openshift_logging_image_version == 'latest' + when: openshift_logging_curator_image_version == 'latest' # should we just assume that we will have the correct major version? -- set_fact: curator_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" - when: openshift_logging_image_version != 'latest' +- set_fact: curator_version="{{ openshift_logging_curator_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" + when: openshift_logging_curator_image_version != 'latest' - fail: msg: Invalid version specified for Curator diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index fcaf18ed4..7ddf57450 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -1,4 +1,17 @@ --- +- name: Set default image variables based on deployment_type + include_vars: "{{ var_file_name }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + loop_control: + loop_var: var_file_name + +- name: Set curator image facts + set_fact: + openshift_logging_curator_image_prefix: "{{ openshift_logging_curator_image_prefix | default(__openshift_logging_curator_image_prefix) }}" + openshift_logging_curator_image_version: "{{ openshift_logging_curator_image_version | default(__openshift_logging_curator_image_version) }}" + - include: determine_version.yaml # allow passing in a tempdir @@ -35,7 +48,7 @@ name: "aggregated-logging-curator" namespace: "{{ openshift_logging_namespace }}" when: - - openshift_logging_image_pull_secret == '' + - openshift_logging_image_pull_secret == '' # configmap - copy: @@ -65,12 +78,12 @@ name: "logging-curator" namespace: "{{ openshift_logging_namespace }}" files: - - name: ca - path: "{{ generated_certs_dir }}/ca.crt" - - name: key - path: "{{ generated_certs_dir }}/system.logging.curator.key" - - name: cert - path: "{{ generated_certs_dir }}/system.logging.curator.crt" + - name: ca + path: "{{ generated_certs_dir }}/ca.crt" + - name: key + path: "{{ generated_certs_dir }}/system.logging.curator.key" + - name: cert + path: "{{ generated_certs_dir }}/system.logging.curator.crt" - set_fact: curator_name: "{{ 'logging-curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" @@ -104,7 +117,7 @@ namespace: "{{ openshift_logging_namespace }}" kind: dc files: - - "{{ tempdir }}/templates/curator-dc.yaml" + - "{{ tempdir }}/templates/curator-dc.yaml" delete_after: true - name: Delete temp directory diff --git a/roles/openshift_logging_curator/vars/default_images.yml b/roles/openshift_logging_curator/vars/default_images.yml new file mode 100644 index 000000000..208b41afa --- /dev/null +++ b/roles/openshift_logging_curator/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default('latest') }}" diff --git a/roles/openshift_logging_curator/vars/openshift-enterprise.yml b/roles/openshift_logging_curator/vars/openshift-enterprise.yml new file mode 100644 index 000000000..79cf131fd --- /dev/null +++ b/roles/openshift_logging_curator/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 9fc6fd1d8..0ea913224 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -1,7 +1,5 @@ --- ### Common settings -openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" openshift_logging_elasticsearch_namespace: logging @@ -33,6 +31,7 @@ openshift_logging_elasticsearch_pvc_name: "" openshift_logging_elasticsearch_pvc_size: "" openshift_logging_elasticsearch_pvc_dynamic: false openshift_logging_elasticsearch_pvc_pv_selector: {} +openshift_logging_elasticsearch_pvc_storage_class_name: "" openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce'] openshift_logging_elasticsearch_storage_group: ['65534'] diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml index 1a952b5cf..c53a06019 100644 --- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -1,18 +1,16 @@ --- # debating making this a module instead? - fail: - msg: Missing version to install provided by 'openshift_logging_image_version' - when: not openshift_logging_image_version or openshift_logging_image_version == '' + msg: Missing version to install provided by 'openshift_logging_elasticsearch_image_version' + when: not openshift_logging_elasticsearch_image_version or openshift_logging_elasticsearch_image_version == '' - set_fact: es_version: "{{ __latest_es_version }}" - when: openshift_logging_image_version == 'latest' - -- debug: var=openshift_logging_image_version + when: openshift_logging_elasticsearch_image_version == 'latest' # should we just assume that we will have the correct major version? -- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" - when: openshift_logging_image_version != 'latest' +- set_fact: es_version="{{ openshift_logging_elasticsearch_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" + when: openshift_logging_elasticsearch_image_version != 'latest' - fail: msg: Invalid version specified for Elasticsearch diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index e7ef443bd..2bd02af60 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -15,18 +15,22 @@ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" -- include: determine_version.yaml - - name: Set default image variables based on deployment_type - include_vars: "{{ item }}" + include_vars: "{{ var_file_name }}" with_first_found: - "{{ openshift_deployment_type | default(deployment_type) }}.yml" - "default_images.yml" + loop_control: + loop_var: var_file_name -- name: Set elasticsearch_prefix image facts +- name: Set elasticsearch image facts set_fact: openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}" openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}" + openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_elasticsearch_image_prefix | default(__openshift_logging_elasticsearch_image_prefix) }}" + openshift_logging_elasticsearch_image_version: "{{ openshift_logging_elasticsearch_image_version | default(__openshift_logging_elasticsearch_image_version) }}" + +- include: determine_version.yaml # allow passing in a tempdir - name: Create temp directory for doing work in diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml index b7d105caf..cef49dd92 100644 --- a/roles/openshift_logging_elasticsearch/vars/default_images.yml +++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml @@ -1,3 +1,5 @@ --- -__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/" -__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" +__openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default('latest') }}" +__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/') }}" +__openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_image_version | default('v1.0.0') }}" diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml index 2fd960bb5..07d92896f 100644 --- a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml +++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml @@ -1,3 +1,5 @@ --- +__openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" __openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_elasticsearch_proxy_image_version: "v3.7" +__openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml index 4c0350c98..62542f496 100644 --- a/roles/openshift_logging_eventrouter/defaults/main.yaml +++ b/roles/openshift_logging_eventrouter/defaults/main.yaml @@ -1,6 +1,4 @@ --- -openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_eventrouter_replicas: 1 openshift_logging_eventrouter_sink: stdout openshift_logging_eventrouter_nodeselector: "" diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml index 58e5a559f..b1f93eeb9 100644 --- a/roles/openshift_logging_eventrouter/tasks/main.yaml +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -1,4 +1,17 @@ --- +- name: Set default image variables based on deployment_type + include_vars: "{{ var_file_name }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + loop_control: + loop_var: var_file_name + +- name: Set eventrouter image facts + set_fact: + openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_eventrouter_image_prefix | default(__openshift_logging_eventrouter_image_prefix) }}" + openshift_logging_eventrouter_image_version: "{{ openshift_logging_eventrouter_image_version | default(__openshift_logging_eventrouter_image_version) }}" + - include: "{{ role_path }}/tasks/install_eventrouter.yaml" when: openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging_eventrouter/vars/default_images.yml b/roles/openshift_logging_eventrouter/vars/default_images.yml new file mode 100644 index 000000000..dbfe2d697 --- /dev/null +++ b/roles/openshift_logging_eventrouter/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}" diff --git a/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml b/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml new file mode 100644 index 000000000..bb7dc6455 --- /dev/null +++ b/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 861935c99..9b58e4456 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -1,7 +1,5 @@ --- ### General logging settings -openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" openshift_logging_fluentd_namespace: logging diff --git a/roles/openshift_logging_fluentd/tasks/determine_version.yaml b/roles/openshift_logging_fluentd/tasks/determine_version.yaml index a1ba71b1b..6848eb512 100644 --- a/roles/openshift_logging_fluentd/tasks/determine_version.yaml +++ b/roles/openshift_logging_fluentd/tasks/determine_version.yaml @@ -1,16 +1,16 @@ --- # debating making this a module instead? - fail: - msg: Missing version to install provided by 'openshift_logging_image_version' - when: not openshift_logging_image_version or openshift_logging_image_version == '' + msg: Missing version to install provided by 'openshift_logging_fluentd_image_version' + when: not openshift_logging_fluentd_image_version or openshift_logging_fluentd_image_version == '' - set_fact: fluentd_version: "{{ __latest_fluentd_version }}" - when: openshift_logging_image_version == 'latest' + when: openshift_logging_fluentd_image_version == 'latest' # should we just assume that we will have the correct major version? -- set_fact: fluentd_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" - when: openshift_logging_image_version != 'latest' +- set_fact: fluentd_version="{{ openshift_logging_fluentd_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" + when: openshift_logging_fluentd_image_version != 'latest' - fail: msg: Invalid version specified for Fluentd diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 2f89c3f9f..f8683ab75 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -34,6 +34,19 @@ msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal' +- name: Set default image variables based on deployment_type + include_vars: "{{ var_file_name }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + loop_control: + loop_var: var_file_name + +- name: Set fluentd image facts + set_fact: + openshift_logging_fluentd_image_prefix: "{{ openshift_logging_fluentd_image_prefix | default(__openshift_logging_fluentd_image_prefix) }}" + openshift_logging_fluentd_image_version: "{{ openshift_logging_fluentd_image_version | default(__openshift_logging_fluentd_image_version) }}" + - include: determine_version.yaml # allow passing in a tempdir @@ -69,7 +82,7 @@ name: "aggregated-logging-fluentd" namespace: "{{ openshift_logging_fluentd_namespace }}" when: - - openshift_logging_image_pull_secret == '' + - openshift_logging_image_pull_secret == '' # set service account scc - name: Set privileged permissions for Fluentd @@ -146,12 +159,12 @@ name: logging-fluentd namespace: "{{ openshift_logging_fluentd_namespace }}" files: - - name: ca - path: "{{ generated_certs_dir }}/ca.crt" - - name: key - path: "{{ generated_certs_dir }}/system.logging.fluentd.key" - - name: cert - path: "{{ generated_certs_dir }}/system.logging.fluentd.crt" + - name: ca + path: "{{ generated_certs_dir }}/ca.crt" + - name: key + path: "{{ generated_certs_dir }}/system.logging.fluentd.key" + - name: cert + path: "{{ generated_certs_dir }}/system.logging.fluentd.crt" # create Fluentd daemonset # this should change based on the type of fluentd deployment to be done... @@ -187,7 +200,7 @@ namespace: "{{ openshift_logging_fluentd_namespace }}" kind: daemonset files: - - "{{ tempdir }}/templates/logging-fluentd.yaml" + - "{{ tempdir }}/templates/logging-fluentd.yaml" delete_after: true # Scale up Fluentd diff --git a/roles/openshift_logging_fluentd/vars/default_images.yml b/roles/openshift_logging_fluentd/vars/default_images.yml new file mode 100644 index 000000000..6d127b730 --- /dev/null +++ b/roles/openshift_logging_fluentd/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default('latest') }}" diff --git a/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml b/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml new file mode 100644 index 000000000..d0c74f1fb --- /dev/null +++ b/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 1366e96cd..6cdf7c8f3 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -2,8 +2,6 @@ ### Common settings openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local" openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local" -openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" openshift_logging_kibana_namespace: logging @@ -25,8 +23,6 @@ openshift_logging_kibana_edge_term_policy: Redirect openshift_logging_kibana_ops_deployment: false # Proxy settings -openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_kibana_proxy_debug: false openshift_logging_kibana_proxy_cpu_limit: null openshift_logging_kibana_proxy_cpu_request: 100m diff --git a/roles/openshift_logging_kibana/tasks/determine_version.yaml b/roles/openshift_logging_kibana/tasks/determine_version.yaml index 53e15af5f..63e5a89f1 100644 --- a/roles/openshift_logging_kibana/tasks/determine_version.yaml +++ b/roles/openshift_logging_kibana/tasks/determine_version.yaml @@ -1,16 +1,16 @@ --- # debating making this a module instead? - fail: - msg: Missing version to install provided by 'openshift_logging_image_version' - when: not openshift_logging_image_version or openshift_logging_image_version == '' + msg: Missing version to install provided by 'openshift_logging_kibana_image_version' + when: not openshift_logging_kibana_image_version or openshift_logging_kibana_image_version == '' - set_fact: kibana_version: "{{ __latest_kibana_version }}" - when: openshift_logging_image_version == 'latest' + when: openshift_logging_kibana_image_version == 'latest' # should we just assume that we will have the correct major version? -- set_fact: kibana_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" - when: openshift_logging_image_version != 'latest' +- set_fact: kibana_version="{{ openshift_logging_kibana_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" + when: openshift_logging_kibana_image_version != 'latest' - fail: msg: Invalid version specified for Kibana diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 8ef8ede9a..9d99114c5 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -1,5 +1,19 @@ --- # fail is we don't have an endpoint for ES to connect to? +- name: Set default image variables based on deployment_type + include_vars: "{{ var_file_name }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + loop_control: + loop_var: var_file_name + +- name: Set kibana image facts + set_fact: + openshift_logging_kibana_image_prefix: "{{ openshift_logging_kibana_image_prefix | default(__openshift_logging_kibana_image_prefix) }}" + openshift_logging_kibana_image_version: "{{ openshift_logging_kibana_image_version | default(__openshift_logging_kibana_image_version) }}" + openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_kibana_proxy_image_prefix | default(__openshift_logging_kibana_proxy_image_prefix) }}" + openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_kibana_proxy_image_version | default(__openshift_logging_kibana_proxy_image_version) }}" - include: determine_version.yaml @@ -37,7 +51,7 @@ name: "aggregated-logging-kibana" namespace: "{{ openshift_logging_namespace }}" when: - - openshift_logging_image_pull_secret == '' + - openshift_logging_image_pull_secret == '' - set_fact: kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" @@ -58,7 +72,7 @@ content: "{{ 200 | oo_random_word }}" dest: "{{ generated_certs_dir }}/session_secret" when: - - not session_secret_file.stat.exists + - not session_secret_file.stat.exists # gen oauth_secret if necessary - name: Generate oauth secret @@ -66,19 +80,19 @@ content: "{{ 64 | oo_random_word }}" dest: "{{ generated_certs_dir }}/oauth_secret" when: - - not oauth_secret_file.stat.exists + - not oauth_secret_file.stat.exists - name: Retrieving the cert to use when generating secrets for the logging components slurp: src: "{{ generated_certs_dir }}/{{ item.file }}" register: key_pairs with_items: - - { name: "ca_file", file: "ca.crt" } - - { name: "kibana_internal_key", file: "kibana-internal.key"} - - { name: "kibana_internal_cert", file: "kibana-internal.crt"} - - { name: "server_tls", file: "server-tls.json"} - - { name: "session_secret", file: "session_secret" } - - { name: "oauth_secret", file: "oauth_secret" } + - { name: "ca_file", file: "ca.crt" } + - { name: "kibana_internal_key", file: "kibana-internal.key"} + - { name: "kibana_internal_cert", file: "kibana-internal.crt"} + - { name: "server_tls", file: "server-tls.json"} + - { name: "session_secret", file: "session_secret" } + - { name: "oauth_secret", file: "oauth_secret" } # services - name: Set {{ kibana_name }} service @@ -92,8 +106,8 @@ labels: logging-infra: 'support' ports: - - port: 443 - targetPort: "oaproxy" + - port: 443 + targetPort: "oaproxy" # create routes # TODO: set up these certs differently? @@ -144,7 +158,7 @@ namespace: "{{ openshift_logging_namespace }}" kind: route files: - - "{{ tempdir }}/templates/kibana-route.yaml" + - "{{ tempdir }}/templates/kibana-route.yaml" # preserve list of current hostnames - name: Get current oauthclient hostnames @@ -173,7 +187,7 @@ namespace: "{{ openshift_logging_namespace }}" kind: oauthclient files: - - "{{ tempdir }}/templates/oauth-client.yml" + - "{{ tempdir }}/templates/oauth-client.yml" delete_after: true # create Kibana secret @@ -183,12 +197,12 @@ name: "logging-kibana" namespace: "{{ openshift_logging_namespace }}" files: - - name: ca - path: "{{ generated_certs_dir }}/ca.crt" - - name: key - path: "{{ generated_certs_dir }}/system.logging.kibana.key" - - name: cert - path: "{{ generated_certs_dir }}/system.logging.kibana.crt" + - name: ca + path: "{{ generated_certs_dir }}/ca.crt" + - name: key + path: "{{ generated_certs_dir }}/system.logging.kibana.key" + - name: cert + path: "{{ generated_certs_dir }}/system.logging.kibana.crt" # create Kibana-proxy secret - name: Set Kibana Proxy secret @@ -205,16 +219,16 @@ #- name: server-tls.json # path: "{{ generated_certs_dir }}/server-tls.json" contents: - - path: oauth-secret - data: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}" - - path: session-secret - data: "{{ key_pairs | entry_from_named_pair('session_secret') | b64decode }}" - - path: server-key - data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}" - - path: server-cert - data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}" - - path: server-tls.json - data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}" + - path: oauth-secret + data: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}" + - path: session-secret + data: "{{ key_pairs | entry_from_named_pair('session_secret') | b64decode }}" + - path: server-key + data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}" + - path: server-cert + data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}" + - path: server-tls.json + data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}" # create Kibana DC - name: Generate Kibana DC template @@ -245,7 +259,7 @@ namespace: "{{ openshift_logging_namespace }}" kind: dc files: - - "{{ tempdir }}/templates/kibana-dc.yaml" + - "{{ tempdir }}/templates/kibana-dc.yaml" delete_after: true # update master configs? diff --git a/roles/openshift_logging_kibana/vars/default_images.yml b/roles/openshift_logging_kibana/vars/default_images.yml new file mode 100644 index 000000000..db0f9b622 --- /dev/null +++ b/roles/openshift_logging_kibana/vars/default_images.yml @@ -0,0 +1,5 @@ +--- +__openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default('latest') }}" +__openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}" diff --git a/roles/openshift_logging_kibana/vars/openshift-enterprise.yml b/roles/openshift_logging_kibana/vars/openshift-enterprise.yml new file mode 100644 index 000000000..0be2e7252 --- /dev/null +++ b/roles/openshift_logging_kibana/vars/openshift-enterprise.yml @@ -0,0 +1,5 @@ +--- +__openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" +__openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 9de686576..cd15da939 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -1,7 +1,5 @@ --- ### General logging settings -openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" -openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default('latest') }}" openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" diff --git a/roles/openshift_logging_mux/tasks/determine_version.yaml b/roles/openshift_logging_mux/tasks/determine_version.yaml index 229bcf3d5..769475dd5 100644 --- a/roles/openshift_logging_mux/tasks/determine_version.yaml +++ b/roles/openshift_logging_mux/tasks/determine_version.yaml @@ -1,16 +1,16 @@ --- # debating making this a module instead? - fail: - msg: Missing version to install provided by 'openshift_logging_image_version' - when: not openshift_logging_image_version or openshift_logging_image_version == '' + msg: Missing version to install provided by 'openshift_logging_mux_image_version' + when: not openshift_logging_mux_image_version or openshift_logging_mux_image_version == '' - set_fact: mux_version: "{{ __latest_mux_version }}" - when: openshift_logging_image_version == 'latest' + when: openshift_logging_mux_image_version == 'latest' # should we just assume that we will have the correct major version? -- set_fact: mux_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" - when: openshift_logging_image_version != 'latest' +- set_fact: mux_version="{{ openshift_logging_mux_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" + when: openshift_logging_mux_image_version != 'latest' - fail: msg: Invalid version specified for mux diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 5b257139e..242d92188 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -7,6 +7,19 @@ msg: Operations logs destination is required when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == '' +- name: Set default image variables based on deployment_type + include_vars: "{{ var_file_name }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + loop_control: + loop_var: var_file_name + +- name: Set mux image facts + set_fact: + openshift_logging_mux_image_prefix: "{{ openshift_logging_mux_image_prefix | default(__openshift_logging_mux_image_prefix) }}" + openshift_logging_mux_image_version: "{{ openshift_logging_mux_image_version | default(__openshift_logging_mux_image_version) }}" + - include: determine_version.yaml # allow passing in a tempdir @@ -42,7 +55,7 @@ name: "aggregated-logging-mux" namespace: "{{ openshift_logging_mux_namespace }}" when: - - openshift_logging_image_pull_secret == '' + - openshift_logging_image_pull_secret == '' # set service account scc - name: Set privileged permissions for Mux @@ -112,14 +125,14 @@ name: logging-mux namespace: "{{ openshift_logging_mux_namespace }}" files: - - name: ca - path: "{{ generated_certs_dir }}/ca.crt" - - name: key - path: "{{ generated_certs_dir }}/system.logging.mux.key" - - name: cert - path: "{{ generated_certs_dir }}/system.logging.mux.crt" - - name: shared_key - path: "{{ generated_certs_dir }}/mux_shared_key" + - name: ca + path: "{{ generated_certs_dir }}/ca.crt" + - name: key + path: "{{ generated_certs_dir }}/system.logging.mux.key" + - name: cert + path: "{{ generated_certs_dir }}/system.logging.mux.crt" + - name: shared_key + path: "{{ generated_certs_dir }}/mux_shared_key" # services - name: Set logging-mux service for external communication @@ -133,11 +146,11 @@ labels: logging-infra: 'support' ports: - - name: mux-forward - port: "{{ openshift_logging_mux_port }}" - targetPort: "mux-forward" + - name: mux-forward + port: "{{ openshift_logging_mux_port }}" + targetPort: "mux-forward" external_ips: - - "{{ ansible_eth0.ipv4.address }}" + - "{{ ansible_eth0.ipv4.address }}" when: openshift_logging_mux_allow_external | bool - name: Set logging-mux service for internal communication @@ -151,9 +164,9 @@ labels: logging-infra: 'support' ports: - - name: mux-forward - port: "{{ openshift_logging_mux_port }}" - targetPort: "mux-forward" + - name: mux-forward + port: "{{ openshift_logging_mux_port }}" + targetPort: "mux-forward" when: not openshift_logging_mux_allow_external | bool # create Mux DC @@ -188,7 +201,7 @@ selector: "{{ openshift_logging_mux_file_buffer_pvc_pv_selector }}" storage_class_name: "{{ openshift_logging_mux_file_buffer_pvc_storage_class_name | default('', true) }}" when: - - openshift_logging_mux_file_buffer_storage_type == "pvc" + - openshift_logging_mux_file_buffer_storage_type == "pvc" - name: Set logging-mux DC oc_obj: @@ -197,7 +210,7 @@ namespace: "{{ openshift_logging_mux_namespace }}" kind: dc files: - - "{{ tempdir }}/templates/logging-mux-dc.yaml" + - "{{ tempdir }}/templates/logging-mux-dc.yaml" delete_after: true - name: Add mux namespaces diff --git a/roles/openshift_logging_mux/vars/default_images.yml b/roles/openshift_logging_mux/vars/default_images.yml new file mode 100644 index 000000000..bd5dc4504 --- /dev/null +++ b/roles/openshift_logging_mux/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default('latest') }}" diff --git a/roles/openshift_logging_mux/vars/openshift-enterprise.yml b/roles/openshift_logging_mux/vars/openshift-enterprise.yml new file mode 100644 index 000000000..1e7eb9d8d --- /dev/null +++ b/roles/openshift_logging_mux/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 97a8735ee..3fb94fff8 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -67,3 +67,6 @@ openshift_master_bootstrap_enabled: False openshift_master_csr_sa: node-bootstrapper openshift_master_csr_namespace: openshift-infra + +openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml" +openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json" diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml new file mode 100644 index 000000000..4f8b758fd --- /dev/null +++ b/roles/openshift_master/tasks/restart.yml @@ -0,0 +1,22 @@ +--- +- name: Restart master API + service: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + when: openshift_master_ha | bool +- name: Wait for master API to come back online + wait_for: + host: "{{ openshift.common.hostname }}" + state: started + delay: 10 + port: "{{ openshift.master.api_port }}" + timeout: 600 + when: openshift_master_ha | bool +- name: Restart master controllers + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: restarted + # Ignore errrors since it is possible that type != simple for + # pre-3.1.1 installations. + ignore_errors: true + when: openshift_master_ha | bool diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 8420dfb8c..b0fa72f19 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -2,9 +2,6 @@ # systemd_units.yml is included both in the openshift_master role and in the upgrade # playbooks. -- include: upgrade_facts.yml - when: openshift_master_defaults_in_use is not defined - - name: Set HA Service Info for containerized installs set_fact: containerized_svc_dir: "/etc/systemd/system" diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml new file mode 100644 index 000000000..92371921d --- /dev/null +++ b/roles/openshift_master/tasks/upgrade.yml @@ -0,0 +1,45 @@ +--- +- include: upgrade/rpm_upgrade.yml + when: not openshift.common.is_containerized | bool + +- include: upgrade/upgrade_scheduler.yml + +# master_config_hook is passed in from upgrade play. +- include: "upgrade/{{ master_config_hook }}" + when: master_config_hook is defined + +- include: journald.yml + +- include: systemd_units.yml + +- name: Check for ca-bundle.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + register: ca_bundle_stat + failed_when: false + +- name: Check for ca.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca.crt" + register: ca_crt_stat + failed_when: false + +- name: Migrate ca.crt to ca-bundle.crt + command: mv ca.crt ca-bundle.crt + args: + chdir: "{{ openshift.common.config_base }}/master" + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + +- name: Link ca.crt to ca-bundle.crt + file: + src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + path: "{{ openshift.common.config_base }}/master/ca.crt" + state: link + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + +- name: Update oreg value + yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: 'imageConfig.format' + value: "{{ oreg_url | default(oreg_url_master) }}" + when: oreg_url is defined or oreg_url_master is defined diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml new file mode 100644 index 000000000..f914a9978 --- /dev/null +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -0,0 +1,20 @@ +--- +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. + +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that +- name: Upgrade master packages + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml new file mode 100644 index 000000000..8558bf3e9 --- /dev/null +++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml @@ -0,0 +1,173 @@ +--- +# Upgrade predicates +- vars: + prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" + prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" + default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" + # older_predicates are the set of predicates that have previously been + # hard-coded into openshift_facts + older_predicates: + - - name: MatchNodeSelector + - name: PodFitsResources + - name: PodFitsPorts + - name: NoDiskConflict + - name: NoVolumeZoneConflict + - name: MaxEBSVolumeCount + - name: MaxGCEPDVolumeCount + - name: Region + argument: + serviceAffinity: + labels: + - region + - - name: MatchNodeSelector + - name: PodFitsResources + - name: PodFitsPorts + - name: NoDiskConflict + - name: NoVolumeZoneConflict + - name: Region + argument: + serviceAffinity: + labels: + - region + - - name: MatchNodeSelector + - name: PodFitsResources + - name: PodFitsPorts + - name: NoDiskConflict + - name: Region + argument: + serviceAffinity: + labels: + - region + # older_predicates_no_region are the set of predicates that have previously + # been hard-coded into openshift_facts, with the Region predicate removed + older_predicates_no_region: + - - name: MatchNodeSelector + - name: PodFitsResources + - name: PodFitsPorts + - name: NoDiskConflict + - name: NoVolumeZoneConflict + - name: MaxEBSVolumeCount + - name: MaxGCEPDVolumeCount + - - name: MatchNodeSelector + - name: PodFitsResources + - name: PodFitsPorts + - name: NoDiskConflict + - name: NoVolumeZoneConflict + - - name: MatchNodeSelector + - name: PodFitsResources + - name: PodFitsPorts + - name: NoDiskConflict + block: + + # Handle case where openshift_master_predicates is defined + - block: + - debug: + msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" + when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] + + - debug: + msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" + when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates + when: openshift_master_scheduler_predicates | default(none) is not none + + # Handle cases where openshift_master_predicates is not defined + - block: + - debug: + msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] + + - set_fact: + openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] + + - set_fact: + openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" + when: + - openshift_master_scheduler_current_predicates != default_predicates_no_region + - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] + + when: openshift_master_scheduler_predicates | default(none) is none + + +# Upgrade priorities +- vars: + prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" + prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}" + default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}" + # older_priorities are the set of priorities that have previously been + # hard-coded into openshift_facts + older_priorities: + - - name: LeastRequestedPriority + weight: 1 + - name: SelectorSpreadPriority + weight: 1 + - name: Zone + weight: 2 + argument: + serviceAntiAffinity: + label: zone + # older_priorities_no_region are the set of priorities that have previously + # been hard-coded into openshift_facts, with the Zone priority removed + older_priorities_no_zone: + - - name: LeastRequestedPriority + weight: 1 + - name: SelectorSpreadPriority + weight: 1 + block: + + # Handle case where openshift_master_priorities is defined + - block: + - debug: + msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" + when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] + + - debug: + msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" + when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities + when: openshift_master_scheduler_priorities | default(none) is not none + + # Handle cases where openshift_master_priorities is not defined + - block: + - debug: + msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] + + - set_fact: + openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] + + - set_fact: + openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" + when: + - openshift_master_scheduler_current_priorities != default_priorities_no_zone + - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] + + when: openshift_master_scheduler_priorities | default(none) is none + + +# Update scheduler +- vars: + scheduler_config: + kind: Policy + apiVersion: v1 + predicates: "{{ openshift_upgrade_scheduler_predicates + | default(openshift_master_scheduler_current_predicates) }}" + priorities: "{{ openshift_upgrade_scheduler_priorities + | default(openshift_master_scheduler_current_priorities) }}" + block: + - name: Update scheduler config + copy: + content: "{{ scheduler_config | to_nice_json }}" + dest: "{{ openshift_master_scheduler_conf }}" + backup: true + when: > + openshift_upgrade_scheduler_predicates is defined or + openshift_upgrade_scheduler_priorities is defined diff --git a/roles/openshift_master/tasks/upgrade/v3_6/master_config_upgrade.yml b/roles/openshift_master/tasks/upgrade/v3_6/master_config_upgrade.yml new file mode 100644 index 000000000..db0c8f886 --- /dev/null +++ b/roles/openshift_master/tasks/upgrade/v3_6/master_config_upgrade.yml @@ -0,0 +1,15 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/roles/openshift_master/tasks/upgrade/v3_7/master_config_upgrade.yml b/roles/openshift_master/tasks/upgrade/v3_7/master_config_upgrade.yml new file mode 100644 index 000000000..1d4d1919c --- /dev/null +++ b/roles/openshift_master/tasks/upgrade/v3_7/master_config_upgrade.yml @@ -0,0 +1,20 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml index ea280640f..a5545c81b 100644 --- a/roles/openshift_node/files/bootstrap.yml +++ b/roles/openshift_node/files/bootstrap.yml @@ -61,3 +61,11 @@ with_items: - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}" regexp: "^BOOTSTRAP_CONFIG_NAME=.*" + + - name: "Start the {{ openshift_service_type }}-node service" + systemd: + daemon_reload: yes + state: restarted + enabled: True + name: "{{ openshift_service_type }}-node" + no_block: true diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml new file mode 100644 index 000000000..5f182e0d6 --- /dev/null +++ b/roles/openshift_openstack/defaults/main.yml @@ -0,0 +1,96 @@ +--- +openshift_openstack_stack_state: 'present' + +openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0 +openshift_openstack_node_ingress_cidr: 0.0.0.0/0 +openshift_openstack_lb_ingress_cidr: 0.0.0.0/0 +openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0 +openshift_openstack_num_etcd: 0 +openshift_openstack_num_masters: 1 +openshift_openstack_num_nodes: 1 +openshift_openstack_num_dns: 0 +openshift_openstack_num_infra: 1 +openshift_openstack_dns_nameservers: [] +openshift_openstack_nodes_to_remove: [] + + +openshift_openstack_cluster_node_labels: + app: + region: primary + infra: + region: infra + +openshift_openstack_install_debug_packages: false +openshift_openstack_required_packages: + - docker + - NetworkManager + - wget + - git + - net-tools + - bind-utils + - bridge-utils +openshift_openstack_debug_packages: + - bash-completion + - vim-enhanced + +# container-storage-setup +openshift_openstack_container_storage_setup: + docker_dev: "/dev/sdb" + docker_vg: "docker-vol" + docker_data_size: "95%VG" + docker_dm_basesize: "3G" + container_root_lv_name: "dockerlv" + container_root_lv_mount_path: "/var/lib/docker" + + +# populate-dns +openshift_openstack_dns_records_add: [] +openshift_openstack_external_nsupdate_keys: {} + +openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}" +openshift_openstack_app_subdomain: "apps" + + +# heat vars +openshift_openstack_clusterid: openshift +openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" +openshift_openstack_subnet_prefix: "192.168.99" +openshift_openstack_master_hostname: master +openshift_openstack_infra_hostname: infra-node +openshift_openstack_node_hostname: app-node +openshift_openstack_lb_hostname: lb +openshift_openstack_etcd_hostname: etcd +openshift_openstack_dns_hostname: dns +openshift_openstack_keypair_name: openshift +openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_provider_network_name: null +openshift_openstack_external_network_name: null +openshift_openstack_private_network: >- + {% if openshift_openstack_provider_network_name | default(None) -%} + {{ openshift_openstack_provider_network_name }} + {%- else -%} + {{ openshift_openstack_private_network_name | default ('openshift-ansible-' + openshift_openstack_stack_name + '-net') }} + {%- endif -%} +openshift_openstack_master_server_group_policies: [] +openshift_openstack_infra_server_group_policies: [] +openshift_openstack_docker_volume_size: 15 +openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_etcd_volume_size: 2 +openshift_openstack_dns_volume_size: 1 +openshift_openstack_lb_volume_size: 5 +openshift_openstack_use_bastion: false +openshift_openstack_ui_ssh_tunnel: false +openshift_openstack_ephemeral_volumes: false diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml new file mode 100644 index 000000000..57c7238d1 --- /dev/null +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -0,0 +1,105 @@ +--- +# Check ansible +- name: Check Ansible version + assert: + that: > + (ansible_version.major == 2 and ansible_version.minor >= 3) or + (ansible_version.major > 2) + msg: "Ansible version must be at least 2.3" + +# Check shade +- name: Try to import python module shade + command: python -c "import shade" + ignore_errors: yes + register: shade_result +- name: Check if shade is installed + assert: + that: 'shade_result.rc == 0' + msg: "Python module shade is not installed" + +# Check jmespath +- name: Try to import python module shade + command: python -c "import jmespath" + ignore_errors: yes + register: jmespath_result +- name: Check if jmespath is installed + assert: + that: 'jmespath_result.rc == 0' + msg: "Python module jmespath is not installed" + +# Check python-dns +- name: Try to import python DNS module + command: python -c "import dns" + ignore_errors: yes + register: pythondns_result +- name: Check if python-dns is installed + assert: + that: 'pythondns_result.rc == 0' + msg: "Python module python-dns is not installed" + +# Check jinja2 +- name: Try to import jinja2 module + command: python -c "import jinja2" + ignore_errors: yes + register: jinja_result +- name: Check if jinja2 is installed + assert: + that: 'jinja_result.rc == 0' + msg: "Python module jinja2 is not installed" + +# Check Glance image +- name: Try to get image facts + os_image_facts: + image: "{{ openshift_openstack_default_image_name }}" + register: image_result +- name: Check that image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ openshift_openstack_default_image_name }} is not available" + +# Check network name +- name: Try to get network facts + os_networks_facts: + name: "{{ openshift_openstack_external_network_name }}" + register: network_result + when: not openshift_openstack_provider_network_name|default(None) +- name: Check that network is available + assert: + that: "network_result.ansible_facts.openstack_networks" + msg: "Network {{ openshift_openstack_external_network_name }} is not available" + when: not openshift_openstack_provider_network_name|default(None) + +# Check keypair +# TODO kpilatov: there is no Ansible module for getting OS keypairs +# (os_keypair is not suitable for this) +# this method does not force python-openstackclient dependency +- name: Try to show keypair + command: > + python -c 'import shade; cloud = shade.openstack_cloud(); + exit(cloud.get_keypair("{{ openshift_openstack_keypair_name }}") is None)' + ignore_errors: yes + register: key_result +- name: Check that keypair is available + assert: + that: 'key_result.rc == 0' + msg: "Keypair {{ openshift_openstack_keypair_name }} is not available" + +# Check that custom images are available +- include: custom_image_check.yaml + with_items: + - "{{ openshift_openstack_master_image }}" + - "{{ openshift_openstack_infra_image }}" + - "{{ openshift_openstack_node_image }}" + - "{{ openshift_openstack_lb_image }}" + - "{{ openshift_openstack_etcd_image }}" + - "{{ openshift_openstack_dns_image }}" + +# Check that custom flavors are available +- include: custom_flavor_check.yaml + with_items: + - "{{ openshift_openstack_master_flavor }}" + - "{{ openshift_openstack_infra_flavor }}" + - "{{ openshift_openstack_node_flavor }}" + - "{{ openshift_openstack_lb_flavor }}" + - "{{ openshift_openstack_etcd_flavor }}" + - "{{ openshift_openstack_dns_flavor }}" diff --git a/roles/openshift_openstack/tasks/cleanup.yml b/roles/openshift_openstack/tasks/cleanup.yml new file mode 100644 index 000000000..258334a6b --- /dev/null +++ b/roles/openshift_openstack/tasks/cleanup.yml @@ -0,0 +1,6 @@ +--- + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml new file mode 100644 index 000000000..82307b208 --- /dev/null +++ b/roles/openshift_openstack/tasks/container-storage-setup.yml @@ -0,0 +1,37 @@ +--- +- block: + - name: create the docker-storage config file + template: + src: docker-storage-setup-overlayfs.j2 + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + when: + - ansible_distribution_version | version_compare('7.4', '>=') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file + template: + src: docker-storage-setup-dm.j2 + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + when: + - ansible_distribution_version | version_compare('7.4', '<') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file for CentOS + template: + src: docker-storage-setup-dm.j2 + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + # TODO(shadower): Find out which CentOS version supports overlayfs2 + when: + - ansible_distribution == "CentOS" diff --git a/roles/openshift_openstack/tasks/custom_flavor_check.yaml b/roles/openshift_openstack/tasks/custom_flavor_check.yaml new file mode 100644 index 000000000..5fb7a76ff --- /dev/null +++ b/roles/openshift_openstack/tasks/custom_flavor_check.yaml @@ -0,0 +1,10 @@ +--- +- name: Try to get flavor facts + os_flavor_facts: + name: "{{ item }}" + register: flavor_result + +- name: Check that custom flavor is available + assert: + that: "flavor_result.ansible_facts.openstack_flavors" + msg: "Flavor {{ item }} is not available." diff --git a/roles/openshift_openstack/tasks/custom_image_check.yaml b/roles/openshift_openstack/tasks/custom_image_check.yaml new file mode 100644 index 000000000..4ae163406 --- /dev/null +++ b/roles/openshift_openstack/tasks/custom_image_check.yaml @@ -0,0 +1,10 @@ +--- +- name: Try to get image facts + os_image_facts: + image: "{{ item }}" + register: image_result + +- name: Check that custom image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ item }} is not available." diff --git a/roles/openshift_openstack/tasks/generate-templates.yml b/roles/openshift_openstack/tasks/generate-templates.yml new file mode 100644 index 000000000..3a8b588e9 --- /dev/null +++ b/roles/openshift_openstack/tasks/generate-templates.yml @@ -0,0 +1,29 @@ +--- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: openshift-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: Print out the Heat template directory + debug: var=stack_template_pre + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml new file mode 100644 index 000000000..e1a18425f --- /dev/null +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -0,0 +1,26 @@ +--- +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" + +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}" + +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" + +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg + +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/openshift_openstack/tasks/net_vars_check.yaml b/roles/openshift_openstack/tasks/net_vars_check.yaml new file mode 100644 index 000000000..18b9b21b9 --- /dev/null +++ b/roles/openshift_openstack/tasks/net_vars_check.yaml @@ -0,0 +1,14 @@ +--- +- name: Check the provider network configuration + fail: + msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" + when: + - openshift_openstack_provider_network_name is defined + - openstack_private_data_network_name is defined + +- name: Check the flannel network configuration + fail: + msg: "A dedicated containers data network is only supported with Flannel SDN" + when: + - openstack_private_data_network_name is defined + - not openshift_use_flannel|default(False)|bool diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml new file mode 100644 index 000000000..89e58d830 --- /dev/null +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -0,0 +1,11 @@ +--- +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" + +- include: hostname.yml + +- include: container-storage-setup.yml + +- include: node-network.yml diff --git a/roles/openshift_openstack/tasks/node-network.yml b/roles/openshift_openstack/tasks/node-network.yml new file mode 100644 index 000000000..f494e5158 --- /dev/null +++ b/roles/openshift_openstack/tasks/node-network.yml @@ -0,0 +1,19 @@ +--- +- name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + +- name: enable and start NetworkManager + service: + name: NetworkManager + state: restarted + enabled: yes + +# TODO(shadower): add the flannel interface tasks from post-provision-openstack.yml diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml new file mode 100644 index 000000000..7864f5269 --- /dev/null +++ b/roles/openshift_openstack/tasks/node-packages.yml @@ -0,0 +1,15 @@ +--- +# TODO: subscribe to RHEL and install docker and other packages here + +- name: Install required packages + yum: + name: "{{ item }}" + state: latest + with_items: "{{ openshift_openstack_required_packages }}" + +- name: Install debug packages (optional) + yum: + name: "{{ item }}" + state: latest + with_items: "{{ openshift_openstack_debug_packages }}" + when: openshift_openstack_install_debug_packages|bool diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml new file mode 100644 index 000000000..c03aceb94 --- /dev/null +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -0,0 +1,128 @@ +--- +- name: "Generate list of private A records" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Add wildcard records to the private A records for infrahosts" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_openstack_app_subdomain, 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Add public master cluster hostname records to the private A records (single master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openshift_openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the private A records (multi-master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openshift_openstack_num_masters > 1 + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ openshift_openstack_external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}" + when: + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys['private'] is defined + + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ openshift_openstack_full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + openshift_openstack_full_dns_domain) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + when: hostvars[item]['public_v4'] is defined + +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_openstack_app_subdomain, 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + when: hostvars[item]['public_v4'] is defined + +- name: "Add public master cluster hostname records to the public A records (single master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openshift_openstack_num_masters == 1 + - not openshift_openstack_use_bastion|bool + +- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openshift_openstack_num_masters == 1 + - openshift_openstack_use_bastion|bool + +- name: "Add public master cluster hostname records to the public A records (multi-master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openshift_openstack_num_masters > 1 + +- name: "Set the public DNS server details to use the external value (if provided)" + set_fact: + nsupdate_server_public: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}" + when: + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys['public'] is defined + +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ openshift_openstack_full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + openshift_openstack_full_dns_domain) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" + + +- name: "Generate the final openshift_openstack_dns_records_add" + set_fact: + openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}" + + +- name: "Add DNS A records" + nsupdate: + key_name: "{{ item.0.key_name }}" + key_secret: "{{ item.0.key_secret }}" + key_algorithm: "{{ item.0.key_algorithm }}" + server: "{{ item.0.server }}" + zone: "{{ item.0.zone }}" + record: "{{ item.1.hostname }}" + value: "{{ item.1.ip }}" + type: "{{ item.1.type }}" + # TODO(shadower): add a cleanup playbook that removes these records, too! + state: present + with_subelements: + - "{{ openshift_openstack_dns_records_add | default({}) }}" + - entries + register: nsupdate_add_result + until: nsupdate_add_result|succeeded + retries: 10 + delay: 1 diff --git a/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml b/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml new file mode 100644 index 000000000..fc51f6dc2 --- /dev/null +++ b/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml @@ -0,0 +1,59 @@ +--- +- name: Attach the volume to the VM + os_server_volume: + state: present + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" + register: volume_attachment + +- set_fact: + attached_device: >- + {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} + +- delegate_to: "{{ groups['masters'][0] }}" + block: + - name: Wait for the device to appear + wait_for: path={{ attached_device }} + + - name: Create a temp directory for mounting the volume + tempfile: + prefix: cinder-volume + state: directory + register: cinder_mount_dir + + - name: Format the device + filesystem: + fstype: "{{ cinder_fs }}" + dev: "{{ attached_device }}" + + - name: Mount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: mounted + fstype: "{{ cinder_fs }}" + + - name: Change mode on the filesystem + file: + path: "{{ cinder_mount_dir.path }}" + state: directory + recurse: true + mode: 0777 + + - name: Unmount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: absent + fstype: "{{ cinder_fs }}" + + - name: Delete the temp directory + file: + name: "{{ cinder_mount_dir.path }}" + state: absent + +- name: Detach the volume from the VM + os_server_volume: + state: absent + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml new file mode 100644 index 000000000..dccbe334c --- /dev/null +++ b/roles/openshift_openstack/tasks/provision.yml @@ -0,0 +1,25 @@ +--- +- name: Generate the templates + include: generate-templates.yml + when: + - openshift_openstack_stack_state == 'present' + +- name: Handle the Stack (create/delete) + ignore_errors: False + register: stack_create + os_stack: + name: "{{ openshift_openstack_stack_name }}" + state: "{{ openshift_openstack_stack_state }}" + template: "{{ stack_template_path | default(omit) }}" + wait: yes + +- name: Add the new nodes to the inventory + meta: refresh_inventory + +- name: CleanUp + include: cleanup.yml + when: + - openshift_openstack_stack_state == 'present' + +# TODO(shadower): create the registry and PV Cinder volumes if specified +# and include the `prepare-and-format-cinder-volume` tasks to set it up diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 new file mode 100644 index 000000000..32c6b5838 --- /dev/null +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -0,0 +1,4 @@ +DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" +DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 new file mode 100644 index 000000000..1bf366bdc --- /dev/null +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -0,0 +1,7 @@ +DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" +DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" +STORAGE_DRIVER=overlay2 +CONTAINER_ROOT_LV_NAME="{{ openshift_openstack_container_storage_setup.container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ openshift_openstack_container_storage_setup.container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..bfa65b460 --- /dev/null +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -0,0 +1,888 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + +{% if openshift_openstack_num_dns|int > 0 %} + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ips: + description: Floating IPs of the DNS + value: { get_attr: [ dns, floating_ip ] } + + dns_private_ips: + description: Private IPs of the DNS + value: { get_attr: [ dns, private_ip ] } +{% endif %} + +conditions: + no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %} + +resources: + +{% if not openshift_openstack_provider_network_name %} + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ openshift_openstack_stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} + dns_nameservers: +{% for nameserver in openshift_openstack_dns_nameservers %} + - {{ nameserver }} +{% endfor %} + +{% if openshift_use_flannel|default(False)|bool %} + data_net: + type: OS::Neutron::Net + properties: + name: openshift-ansible-{{ openshift_openstack_stack_name }}-data-net + port_security_enabled: false + + data_subnet: + type: OS::Neutron::Subnet + properties: + name: openshift-ansible-{{ openshift_openstack_stack_name }}-data-subnet + network: { get_resource: data_net } + cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} + gateway_ip: null +{% endif %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ openshift_openstack_stack_name }} + external_gateway_info: + network: {{ openshift_openstack_external_network_name }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +{% endif %} + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ openshift_openstack_stack_name }} +# public_key: {{ openshift_openstack_keypair_name }} + + common-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-common-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Basic ssh/icmp security group for cluster_id OpenShift cluster + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} +{% if openshift_openstack_use_bastion|bool %} + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }} +{% endif %} + - direction: ingress + protocol: icmp + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} + +{% if openshift_openstack_flat_secgrp|default(False)|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2380 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" +{% else %} + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 +{% if openshift_use_flannel|default(False)|bool %} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 +{% endif %} + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" +{% endif %} + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + +{% if openshift_openstack_num_dns|int > 0 %} + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ openshift_openstack_stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ openshift_openstack_stack_name }} + rules: + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" +{% endif %} + +{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ openshift_openstack_stack_name }}-lb-secgrp + description: Security group for {{ openshift_openstack_stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} +{% if openshift_openstack_ui_ssh_tunnel|bool %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} +{% endif %} +{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} +{% endif %} +{% endif %} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ openshift_openstack_num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_etcd_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ openshift_openstack_stack_name }} + type: etcd + image: {{ openshift_openstack_etcd_image }} + flavor: {{ openshift_openstack_etcd_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openshift_openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} + attach_float_net: false +{% endif %} + volume_size: {{ openshift_openstack_etcd_volume_size }} +{% if not openshift_openstack_provider_network_name %} + depends_on: + - interface +{% endif %} + +{% if openshift_openstack_master_server_group_policies|length > 0 %} + master_server_group: + type: OS::Nova::ServerGroup + properties: + name: master_server_group + policies: {{ openshift_openstack_master_server_group_policies }} +{% endif %} +{% if openshift_openstack_infra_server_group_policies|length > 0 %} + infra_server_group: + type: OS::Nova::ServerGroup + properties: + name: infra_server_group + policies: {{ openshift_openstack_infra_server_group_policies }} +{% endif %} +{% if openshift_openstack_num_masters|int > 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_lb_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ openshift_openstack_stack_name }} + type: lb + image: {{ openshift_openstack_lb_image }} + flavor: {{ openshift_openstack_lb_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% endif %} + secgrp: + - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} +{% endif %} + volume_size: {{ openshift_openstack_lb_volume_size }} +{% if not openshift_openstack_provider_network_name %} + depends_on: + - interface +{% endif %} +{% endif %} + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ openshift_openstack_num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_master_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ openshift_openstack_stack_name }} + type: master + image: {{ openshift_openstack_master_image }} + flavor: {{ openshift_openstack_master_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +{% if openshift_openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } +{% if openshift_openstack_num_etcd|int == 0 %} + - { get_resource: etcd-secgrp } +{% endif %} +{% endif %} + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} + attach_float_net: false +{% endif %} + volume_size: {{ openshift_openstack_master_volume_size }} +{% if openshift_openstack_master_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: master_server_group } +{% endif %} +{% if not openshift_openstack_provider_network_name %} + depends_on: + - interface +{% endif %} + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ openshift_openstack_num_nodes }} + removal_policies: + - resource_list: {{ openshift_openstack_nodes_to_remove }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_node_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ openshift_openstack_stack_name }} + type: node + subtype: app + node_labels: +{% for k, v in openshift_openstack_cluster_node_labels.app.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} + image: {{ openshift_openstack_node_image }} + flavor: {{ openshift_openstack_node_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: + - { get_resource: {% if openshift_openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} + attach_float_net: false +{% endif %} + volume_size: {{ openshift_openstack_node_volume_size }} +{% if not openshift_openstack_provider_network_name %} + depends_on: + - interface +{% endif %} + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ openshift_openstack_num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_infra_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ openshift_openstack_stack_name }} + type: node + subtype: infra + node_labels: +{% for k, v in openshift_openstack_cluster_node_labels.infra.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} + image: {{ openshift_openstack_infra_image }} + flavor: {{ openshift_openstack_infra_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +# TODO(bogdando) filter only required node rules into infra-secgrp +{% if openshift_openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} +{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %} + - { get_resource: lb-secgrp } +{% endif %} + - { get_resource: infra-secgrp } + - { get_resource: common-secgrp } +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} +{% endif %} + volume_size: {{ openshift_openstack_infra_volume_size }} +{% if openshift_openstack_infra_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: infra_server_group } +{% endif %} +{% if not openshift_openstack_provider_network_name %} + depends_on: + - interface +{% endif %} + +{% if openshift_openstack_num_dns|int > 0 %} + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ openshift_openstack_num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_dns_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ openshift_openstack_stack_name }} + type: dns + image: {{ openshift_openstack_dns_image }} + flavor: {{ openshift_openstack_dns_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ openshift_openstack_stack_name }} +{% endif %} + secgrp: + - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} +{% endif %} + volume_size: {{ openshift_openstack_dns_volume_size }} +{% if not openshift_openstack_provider_network_name %} + depends_on: + - interface +{% endif %} +{% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..a829da34f --- /dev/null +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,270 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + +{% if not openshift_openstack_provider_network_name %} + subnet: + type: string + label: Subnet ID + description: Subnet resource +{% endif %} + +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + +{% if not openshift_openstack_provider_network_name %} + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} +{% endif %} + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + attach_float_net: + type: boolean + default: true + + label: Attach-float-net + description: A switch for floating network port connection + +{% if not openshift_openstack_provider_network_name %} + floating_network: + type: string + default: '' + label: Floating network + description: Network to allocate floating IP from +{% endif %} + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } +{% if openshift_openstack_provider_network_name %} + - 0 +{% else %} + - 1 +{% endif %} + - addr + +conditions: + no_floating: {not: { get_param: attach_float_net} } +{% if openshift_use_flannel|default(False)|bool %} + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} + - port: { get_resource: port } +{% endif %} +{% endif %} + user_data: + get_file: user-data + user_data_format: RAW + user_data_update_policy: IGNORE + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } + +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } +{% if not openshift_openstack_provider_network_name %} + fixed_ips: + - subnet: { get_param: subnet } +{% endif %} + security_groups: { get_param: secgrp } + +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not openshift_openstack_provider_network_name %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + +{% if not openshift_openstack_provider_network_name %} + floating-ip: + condition: { not: no_floating } + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } +{% endif %} + +{% if not openshift_openstack_ephemeral_volumes|default(false)|bool %} + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb +{% endif %} diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openshift_openstack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openshift_project_request_template/README.md b/roles/openshift_project_request_template/README.md new file mode 100644 index 000000000..81c3aca5c --- /dev/null +++ b/roles/openshift_project_request_template/README.md @@ -0,0 +1,33 @@ +OpenShift Project Request Template +================================== + +Configure template used when creating new projects. If enabled only the template is managed. It must still be enabled in the OpenShift master configuration. The base template is created using `oc adm create-bootstrap-project-template` and can be modified by setting `openshift_project_request_template_edits`. + + +Requirements +------------ + + +Role Variables +-------------- + +From this role: + +| Name | Default value | Description | +|----------------------------------------------|-----------------|------------------------------------------------| +| openshift_project_request_template_manage | false | Whether to manage the project request template | +| openshift_project_request_template_namespace | default | Namespace for template | +| openshift_project_request_template_name | project-request | Template name | +| openshift_project_request_template_edits | [] | Changes for template | + + +Dependencies +------------ + +* lib_utils + + +License +------- + +Apache License Version 2.0 diff --git a/roles/openshift_project_request_template/defaults/main.yml b/roles/openshift_project_request_template/defaults/main.yml new file mode 100644 index 000000000..2dab6f99e --- /dev/null +++ b/roles/openshift_project_request_template/defaults/main.yml @@ -0,0 +1,5 @@ +--- +openshift_project_request_template_manage: false +openshift_project_request_template_namespace: default +openshift_project_request_template_name: project-request +openshift_project_request_template_edits: [] diff --git a/roles/openshift_project_request_template/meta/main.yml b/roles/openshift_project_request_template/meta/main.yml new file mode 100644 index 000000000..3bc6dfb45 --- /dev/null +++ b/roles/openshift_project_request_template/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Michael Hanselmann + description: Configure project request template + company: VSHN AG + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: lib_utils diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml new file mode 100644 index 000000000..c31ee5795 --- /dev/null +++ b/roles/openshift_project_request_template/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Create temp file for template + command: mktemp /tmp/openshift-ansible-XXXXXX.yaml + register: mktemp + changed_when: False + +- name: Generate default project template + command: | + {{ openshift.common.client_binary | quote }} \ + --config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \ + --output yaml \ + adm create-bootstrap-project-template \ + --name {{ openshift_project_request_template_name | quote }} + register: default_project_template + +- name: Write default project template to file + copy: + mode=0600 + content="{{ default_project_template.stdout }}" + dest="{{ mktemp.stdout }}" + +- name: Apply template modifications + yedit: + state: present + src: "{{ mktemp.stdout }}" + edits: "{{ openshift_project_request_template_edits }}" + when: "openshift_project_request_template_edits | length > 0" + +- name: Create or update project request template + command: | + {{ openshift.common.client_binary }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + --namespace {{ openshift_project_request_template_namespace | quote }} \ + apply --filename {{ mktemp.stdout }} + +- name: Delete temp file + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 00c3c1987..21da4bc9d 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -148,25 +148,6 @@ selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}" when: openshift_prometheus_alertbuffer_storage_type == 'pvc' -# create prometheus stateful set -- name: Set prometheus template - template: - src: prometheus.j2 - dest: "{{ tempdir }}/templates/prometheus.yaml" - vars: - namespace: "{{ openshift_prometheus_namespace }}" -# prom_replicas: "{{ openshift_prometheus_replicas }}" - -- name: Set prometheus stateful set - oc_obj: - state: "{{ state }}" - name: "prometheus" - namespace: "{{ openshift_prometheus_namespace }}" - kind: statefulset - files: - - "{{ tempdir }}/templates/prometheus.yaml" - delete_after: true - # prometheus configmap # Copy the additional rules file if it is defined - name: Copy additional rules file to host @@ -236,3 +217,22 @@ namespace: "{{ openshift_prometheus_namespace }}" from_file: alertmanager.yml: "{{ tempdir }}/alertmanager.yml" + +# create prometheus stateful set +- name: Set prometheus template + template: + src: prometheus.j2 + dest: "{{ tempdir }}/templates/prometheus.yaml" + vars: + namespace: "{{ openshift_prometheus_namespace }}" +# prom_replicas: "{{ openshift_prometheus_replicas }}" + +- name: Set prometheus stateful set + oc_obj: + state: "{{ state }}" + name: "prometheus" + namespace: "{{ openshift_prometheus_namespace }}" + kind: statefulset + files: + - "{{ tempdir }}/templates/prometheus.yaml" + delete_after: true |