diff options
Diffstat (limited to 'roles')
41 files changed, 623 insertions, 346 deletions
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e36dfa7b9..1c830cb4e 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,5 +1,6 @@ --- docker_cli_auth_config_path: '/root/.docker' +openshift_docker_signature_verification: False # oreg_url is defined by user input. oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 888ae40e7..7ccab37a5 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -115,11 +115,12 @@ dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='\ - {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\ - {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ - {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ - {% if docker_options is defined %} {{ docker_options }}{% endif %}\ - {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" + {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ + {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \ + {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if docker_options is defined %} {{ docker_options }}{% endif %} \ + {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \ + --signature-verification={{ openshift_docker_signature_verification | bool }}'" when: docker_check.stat.isreg is defined and docker_check.stat.isreg notify: - restart docker diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index d1dc4caf8..324f52689 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1478,11 +1478,23 @@ class OCcsr(OpenShiftCLI): return False + def get_csr_request(self, request): + '''base64 decode the request object and call openssl to determine the + subject and specifically the CN: from the request + + Output: + (0, '... + Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal + ...') + ''' + import base64 + return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1] + def match_node(self, csr): '''match an inc csr to a node in self.nodes''' for node in self.nodes: - # we have a match - if node['name'] in csr['metadata']['name']: + # we need to match based upon the csr's request certificate's CN + if node['name'] in self.get_csr_request(csr['spec']['request']): node['csrs'][csr['metadata']['name']] = csr # check that the username is the node and type is 'Approved' diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py index ea11c6ca9..22b8f9165 100644 --- a/roles/lib_openshift/src/class/oc_adm_csr.py +++ b/roles/lib_openshift/src/class/oc_adm_csr.py @@ -66,11 +66,23 @@ class OCcsr(OpenShiftCLI): return False + def get_csr_request(self, request): + '''base64 decode the request object and call openssl to determine the + subject and specifically the CN: from the request + + Output: + (0, '... + Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal + ...') + ''' + import base64 + return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1] + def match_node(self, csr): '''match an inc csr to a node in self.nodes''' for node in self.nodes: - # we have a match - if node['name'] in csr['metadata']['name']: + # we need to match based upon the csr's request certificate's CN + if node['name'] in self.get_csr_request(csr['spec']['request']): node['csrs'][csr['metadata']['name']] = csr # check that the username is the node and type is 'Approved' diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index ea09857b0..5371588cf 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -4,7 +4,6 @@ openshift_aws_create_iam_cert: True openshift_aws_create_security_groups: True openshift_aws_create_launch_config: True openshift_aws_create_scale_group: True -openshift_aws_kubernetes_cluster_status: owned # or shared openshift_aws_node_group_type: master openshift_aws_wait_for_ssh: True @@ -13,6 +12,7 @@ openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" +openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' @@ -89,6 +89,10 @@ openshift_aws_node_group_config_node_volumes: delete_on_termination: True openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_termination_policy: Default +openshift_aws_node_group_replace_instances: [] +openshift_aws_node_group_replace_all_instances: False +openshift_aws_node_group_config_extra_labels: {} openshift_aws_node_group_config: tags: "{{ openshift_aws_node_group_config_tags }}" @@ -105,7 +109,11 @@ openshift_aws_node_group_config: tags: host-type: master sub-host-type: default + labels: + type: master wait_for_instances: True + termination_policy: "{{ openshift_aws_node_group_termination_policy }}" + replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" compute: instance_type: m4.xlarge ami: "{{ openshift_aws_ami }}" @@ -119,6 +127,10 @@ openshift_aws_node_group_config: tags: host-type: node sub-host-type: compute + labels: + type: compute + termination_policy: "{{ openshift_aws_node_group_termination_policy }}" + replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" infra: instance_type: m4.xlarge ami: "{{ openshift_aws_ami }}" @@ -132,6 +144,10 @@ openshift_aws_node_group_config: tags: host-type: node sub-host-type: infra + labels: + type: infra + termination_policy: "{{ openshift_aws_node_group_termination_policy }}" + replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" openshift_aws_elb_security_groups: - "{{ openshift_aws_clusterid }}" @@ -211,3 +227,7 @@ openshift_aws_vpc: az: "us-east-1e" - cidr: 172.31.16.0/20 az: "us-east-1a" + +openshift_aws_node_run_bootstrap_startup: True +openshift_aws_node_user_data: '' +openshift_aws_node_config_namespace: openshift-node diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index e6be9969c..8b7b02a0e 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -4,6 +4,11 @@ when: - openshift_aws_ami is undefined +- fail: + msg: "Ensure that openshift_deployment_type is defined." + when: + - openshift_deployment_type is undefined + - name: query vpc ec2_vpc_net_facts: region: "{{ openshift_aws_region }}" @@ -27,23 +32,7 @@ image_id: "{{ openshift_aws_ami }}" instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" - user_data: |- - #cloud-config - {% if openshift_aws_node_group_type != 'master' %} - write_files: - - path: /root/csr_kubeconfig - owner: root:root - permissions: '0640' - content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} - - path: /root/openshift_settings - owner: root:root - permissions: '0640' - content: - openshift_type: "{{ openshift_aws_node_group_type }}" - runcmd: - - [ systemctl, enable, atomic-openshift-node] - - [ systemctl, start, atomic-openshift-node] - {% endif %} + user_data: "{{ lookup('template', 'user_data.j2') }}" key_name: "{{ openshift_aws_ssh_key_name }}" ebs_optimized: False volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 1384bae59..25ae6ce1c 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -1,4 +1,8 @@ --- +- name: set openshift_node_bootstrap to True when building AMI + set_fact: + openshift_node_bootstrap: True + - name: query vpc ec2_vpc_net_facts: region: "{{ openshift_aws_region }}" @@ -53,10 +57,6 @@ timeout: 300 search_regex: OpenSSH -- name: Pause 10 seconds to ensure ssh actually accepts logins - pause: - seconds: 20 - - name: add host to nodes add_host: groups: nodes diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index 3e969fc43..eb31636e7 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -28,5 +28,7 @@ load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}" vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" + replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}" + replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}" tags: - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index 0cb749dcc..d319fdd1a 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -1,4 +1,11 @@ --- +- name: Remove any ansible facts created during AMI creation + file: + path: "/etc/ansible/facts.d/{{ item }}" + state: absent + with_items: + - openshift.fact + - name: fetch newly created instances ec2_remote_facts: region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 new file mode 100644 index 000000000..ed9c0ed0b --- /dev/null +++ b/roles/openshift_aws/templates/user_data.j2 @@ -0,0 +1,26 @@ +{% if openshift_aws_node_user_data is defined and openshift_aws_node_user_data != '' %} +{{ openshift_aws_node_user_data }} +{% else %} +#cloud-config +write_files: +- path: /root/openshift_bootstrap/openshift_settings.yaml + owner: 'root:root' + permissions: '0640' + content: | + openshift_group_type: {{ openshift_aws_node_group_type }} +{% if openshift_aws_node_group_type != 'master' %} +- path: /etc/origin/node/csr_kubeconfig + owner: 'root:root' + permissions: '0640' + encoding: b64 + content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }} +{% endif %} +runcmd: +{% if openshift_aws_node_run_bootstrap_startup %} +- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] +{% endif %} +{% if openshift_aws_node_group_type != 'master' %} +- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] +- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] +{% endif %} +{% endif %} diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ba1d8f29d..33028fea4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -498,6 +498,20 @@ def set_selectors(facts): facts['hosted']['etcd'] = {} if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: facts['hosted']['etcd']['selector'] = None + if 'prometheus' not in facts: + facts['prometheus'] = {} + if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: + facts['prometheus']['selector'] = None + if 'alertmanager' not in facts['prometheus']: + facts['prometheus']['alertmanager'] = {} + # pylint: disable=line-too-long + if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: + facts['prometheus']['alertmanager']['selector'] = None + if 'alertbuffer' not in facts['prometheus']: + facts['prometheus']['alertbuffer'] = {} + # pylint: disable=line-too-long + if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: + facts['prometheus']['alertbuffer']['selector'] = None return facts @@ -1779,7 +1793,8 @@ class OpenShiftFacts(object): 'node', 'logging', 'loggingops', - 'metrics'] + 'metrics', + 'prometheus'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -2068,6 +2083,66 @@ class OpenShiftFacts(object): ) ) + defaults['prometheus'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus']['alertmanager'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus-alertmanager', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus']['alertbuffer'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus-alertbuffer', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + return defaults def guess_host_provider(self): diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index d72a11de1..64c7cd019 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -313,7 +313,7 @@ fi # wait until all node groups are stable {% for node_group in openshift_gcp_node_group_config %} # wait for stable {{ node_group.name }} -( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) & +( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) & {% endfor %} diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 47dc9171d..8fc70cecb 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -16,4 +16,4 @@ | oo_openshift_env }}" openshift_env_structures: - 'openshift.hosted.router.*' - with_items: [hosted, logging, loggingops, metrics] + with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 554aa5bb2..fc48b7f71 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -40,8 +40,6 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_ # config the es plugin to write kibana index based on the index mode openshift_logging_elasticsearch_kibana_index_mode: 'unique' -openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy" -openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" openshift_logging_elasticsearch_proxy_cpu_limit: "100m" openshift_logging_elasticsearch_proxy_memory_limit: "64Mi" openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index df2c17aa0..aeff2d198 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -17,6 +17,17 @@ - include: determine_version.yaml +- name: Set default image variables based on deployment_type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set elasticsearch_prefix image facts + set_fact: + openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}" + openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}" + # allow passing in a tempdir - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX @@ -52,7 +63,7 @@ name: "aggregated-logging-elasticsearch" namespace: "{{ openshift_logging_elasticsearch_namespace }}" when: - - openshift_logging_image_pull_secret == '' + - openshift_logging_image_pull_secret == '' # rolebinding reader - copy: @@ -66,7 +77,7 @@ kind: clusterrole namespace: "{{ openshift_logging_elasticsearch_namespace }}" files: - - "{{ tempdir }}/rolebinding-reader.yml" + - "{{ tempdir }}/rolebinding-reader.yml" delete_after: true # SA roles @@ -107,8 +118,8 @@ - fail: msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}" when: - - "prometheus_out.stderr | length > 0" - - "'already exists' not in prometheus_out.stderr" + - "prometheus_out.stderr | length > 0" + - "'already exists' not in prometheus_out.stderr" # View role and binding - name: Generate logging-elasticsearch-view-role @@ -120,8 +131,8 @@ roleRef: name: view subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch + - kind: ServiceAccount + name: aggregated-logging-elasticsearch changed_when: no - name: Set logging-elasticsearch-view-role role @@ -131,18 +142,18 @@ kind: rolebinding namespace: "{{ openshift_logging_elasticsearch_namespace }}" files: - - "{{ tempdir }}/logging-elasticsearch-view-role.yaml" + - "{{ tempdir }}/logging-elasticsearch-view-role.yaml" delete_after: true # configmap - assert: that: - - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes + - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}" - assert: that: - - "{{ openshift_logging_es_log_appenders | length > 0 }}" + - "{{ openshift_logging_es_log_appenders | length > 0 }}" msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}" - template: @@ -194,22 +205,22 @@ name: "logging-elasticsearch" namespace: "{{ openshift_logging_elasticsearch_namespace }}" files: - - name: key - path: "{{ generated_certs_dir }}/logging-es.jks" - - name: truststore - path: "{{ generated_certs_dir }}/truststore.jks" - - name: searchguard.key - path: "{{ generated_certs_dir }}/elasticsearch.jks" - - name: searchguard.truststore - path: "{{ generated_certs_dir }}/truststore.jks" - - name: admin-key - path: "{{ generated_certs_dir }}/system.admin.key" - - name: admin-cert - path: "{{ generated_certs_dir }}/system.admin.crt" - - name: admin-ca - path: "{{ generated_certs_dir }}/ca.crt" - - name: admin.jks - path: "{{ generated_certs_dir }}/system.admin.jks" + - name: key + path: "{{ generated_certs_dir }}/logging-es.jks" + - name: truststore + path: "{{ generated_certs_dir }}/truststore.jks" + - name: searchguard.key + path: "{{ generated_certs_dir }}/elasticsearch.jks" + - name: searchguard.truststore + path: "{{ generated_certs_dir }}/truststore.jks" + - name: admin-key + path: "{{ generated_certs_dir }}/system.admin.key" + - name: admin-cert + path: "{{ generated_certs_dir }}/system.admin.crt" + - name: admin-ca + path: "{{ generated_certs_dir }}/ca.crt" + - name: admin.jks + path: "{{ generated_certs_dir }}/system.admin.jks" # services - name: Set logging-{{ es_component }}-cluster service @@ -223,7 +234,7 @@ labels: logging-infra: 'support' ports: - - port: 9300 + - port: 9300 - name: Set logging-{{ es_component }} service oc_service: @@ -236,8 +247,8 @@ labels: logging-infra: 'support' ports: - - port: 9200 - targetPort: "restapi" + - port: 9200 + targetPort: "restapi" - name: Set logging-{{ es_component}}-prometheus service oc_service: @@ -247,9 +258,9 @@ labels: logging-infra: 'support' ports: - - name: proxy - port: 443 - targetPort: 4443 + - name: proxy + port: 443 + targetPort: 4443 selector: component: "{{ es_component }}-prometheus" provider: openshift @@ -277,46 +288,46 @@ # so we check for the presence of 'stderr' to determine if the obj exists or not # the RC for existing and not existing is both 0 - when: - - logging_elasticsearch_pvc.results.stderr is defined - - openshift_logging_elasticsearch_storage_type == "pvc" + - logging_elasticsearch_pvc.results.stderr is defined + - openshift_logging_elasticsearch_storage_type == "pvc" block: - # storageclasses are used by default but if static then disable - # storageclasses with the storageClassName set to "" in pvc.j2 - - name: Creating ES storage template - static - template: - src: pvc.j2 - dest: "{{ tempdir }}/templates/logging-es-pvc.yml" - vars: - obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" - size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" - access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" - pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" - storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" - when: - - not openshift_logging_elasticsearch_pvc_dynamic | bool - - # Storageclasses are used by default if configured - - name: Creating ES storage template - dynamic - template: - src: pvc.j2 - dest: "{{ tempdir }}/templates/logging-es-pvc.yml" - vars: - obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" - size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" - access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" - pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" - when: - - openshift_logging_elasticsearch_pvc_dynamic | bool - - - name: Set ES storage - oc_obj: - state: present - kind: pvc - name: "{{ openshift_logging_elasticsearch_pvc_name }}" - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - files: - - "{{ tempdir }}/templates/logging-es-pvc.yml" - delete_after: true + # storageclasses are used by default but if static then disable + # storageclasses with the storageClassName set to "" in pvc.j2 + - name: Creating ES storage template - static + template: + src: pvc.j2 + dest: "{{ tempdir }}/templates/logging-es-pvc.yml" + vars: + obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" + size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" + access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" + pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" + storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" + when: + - not openshift_logging_elasticsearch_pvc_dynamic | bool + + # Storageclasses are used by default if configured + - name: Creating ES storage template - dynamic + template: + src: pvc.j2 + dest: "{{ tempdir }}/templates/logging-es-pvc.yml" + vars: + obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" + size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" + access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" + pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" + when: + - openshift_logging_elasticsearch_pvc_dynamic | bool + + - name: Set ES storage + oc_obj: + state: present + kind: pvc + name: "{{ openshift_logging_elasticsearch_pvc_name }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + files: + - "{{ tempdir }}/templates/logging-es-pvc.yml" + delete_after: true - set_fact: es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" @@ -337,6 +348,7 @@ logging_component: elasticsearch deploy_name: "{{ es_deploy_name }}" image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}" + proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}" es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}" es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" @@ -352,7 +364,7 @@ namespace: "{{ openshift_logging_elasticsearch_namespace }}" kind: dc files: - - "{{ tempdir }}/templates/logging-es-dc.yml" + - "{{ tempdir }}/templates/logging-es-dc.yml" delete_after: true - name: Retrieving the cert to use when generating secrets for the {{ es_component }} component @@ -360,37 +372,37 @@ src: "{{ generated_certs_dir }}/{{ item.file }}" register: key_pairs with_items: - - { name: "ca_file", file: "ca.crt" } - - { name: "es_key", file: "system.logging.es.key" } - - { name: "es_cert", file: "system.logging.es.crt" } + - { name: "ca_file", file: "ca.crt" } + - { name: "es_key", file: "system.logging.es.key" } + - { name: "es_cert", file: "system.logging.es.crt" } when: openshift_logging_es_allow_external | bool - set_fact: es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}" when: - - openshift_logging_es_key | trim | length > 0 - - openshift_logging_es_allow_external | bool + - openshift_logging_es_key | trim | length > 0 + - openshift_logging_es_allow_external | bool changed_when: false - set_fact: es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}" when: - - openshift_logging_es_cert | trim | length > 0 - - openshift_logging_es_allow_external | bool + - openshift_logging_es_cert | trim | length > 0 + - openshift_logging_es_allow_external | bool changed_when: false - set_fact: es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}" when: - - openshift_logging_es_ca_ext | trim | length > 0 - - openshift_logging_es_allow_external | bool + - openshift_logging_es_ca_ext | trim | length > 0 + - openshift_logging_es_allow_external | bool changed_when: false - set_fact: es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}" when: - - es_ca is not defined - - openshift_logging_es_allow_external | bool + - es_ca is not defined + - openshift_logging_es_allow_external | bool changed_when: false - name: Generating Elasticsearch {{ es_component }} route template @@ -421,7 +433,7 @@ namespace: "{{ openshift_logging_elasticsearch_namespace }}" kind: route files: - - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml" + - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml" when: openshift_logging_es_allow_external | bool ## Placeholder for migration when necessary ## diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 1ed886627..ce3b2eb83 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -40,7 +40,7 @@ spec: {% endif %} containers: - name: proxy - image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}} + image: {{ proxy_image }} imagePullPolicy: Always args: - --upstream-ca=/etc/elasticsearch/secret/admin-ca @@ -86,7 +86,7 @@ spec: requests: memory: "{{es_memory_limit}}" {% if es_container_security_context %} - securityContext: {{ es_container_security_context | to_yaml }} + securityContext: {{ es_container_security_context | to_yaml }} {% endif %} ports: - diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml new file mode 100644 index 000000000..b7d105caf --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/" +__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml new file mode 100644 index 000000000..c87d48e27 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_logging_elasticsearch_proxy_image_version: "v3.7" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index f861a8e4d..b6875ebd4 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -54,3 +54,88 @@ r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_p openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}" + +openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}" +openshift_master_config_dir: "{{ openshift_master_config_dir_default }}" +openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" + +openshift_master_node_config_networkconfig_mtu: 1450 + +openshift_master_node_config_kubeletargs_cpu: 500m +openshift_master_node_config_kubeletargs_mem: 512M + +openshift_master_bootstrap_enabled: False + +openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}" + +openshift_master_config_imageconfig_format: "{{ oreg_url if oreg_url != '' else 'registry.access.redhat.com/openshift3/ose-${component}:${version}' }}" + +# these are for the default settings in a generated node-config.yaml +openshift_master_node_config_default_edits: +- key: nodeName + state: absent +- key: dnsBindAddress + value: 127.0.0.1:53 +- key: dnsDomain + value: cluster.local +- key: dnsRecursiveResolvConf + value: /etc/origin/node/resolv.conf +- key: imageConfig.format + value: "{{ openshift_master_config_imageconfig_format }}" +- key: kubeletArguments.cloud-config + value: + - "/etc/origin/cloudprovider/{{ openshift_master_cloud_provider }}.conf" +- key: kubeletArguments.cloud-provider + value: + - "{{ openshift_master_cloud_provider }}" +- key: kubeletArguments.kube-reserved + value: + - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}" +- key: kubeletArguments.system-reserved + value: + - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}" +- key: enable-controller-attach-detach + value: + - 'true' +- key: networkConfig.mtu + value: 8951 +- key: networkConfig.networkPluginName + value: "{{ r_openshift_master_sdn_network_plugin_name }}" +- key: networkPluginName + value: "{{ r_openshift_master_sdn_network_plugin_name }}" + + +# We support labels for all nodes here +openshift_master_node_config_kubeletargs_default_labels: [] +# We do support overrides for node group labels +openshift_master_node_config_kubeletargs_master_labels: [] +openshift_master_node_config_kubeletargs_infra_labels: [] +openshift_master_node_config_kubeletargs_compute_labels: [] + +openshift_master_node_config_master: + type: master + edits: + - key: kubeletArguments.node-labels + value: "{{ openshift_master_node_config_kubeletargs_default_labels | + union(openshift_master_node_config_kubeletargs_master_labels) | + union(['type=master']) }}" +openshift_master_node_config_infra: + type: infra + edits: + - key: kubeletArguments.node-labels + value: "{{ openshift_master_node_config_kubeletargs_default_labels | + union(openshift_master_node_config_kubeletargs_infra_labels) | + union(['type=infra']) }}" +openshift_master_node_config_compute: + type: compute + edits: + - key: kubeletArguments.node-labels + value: "{{ openshift_master_node_config_kubeletargs_default_labels | + union(openshift_master_node_config_kubeletargs_compute_labels) | + union(['type=compute']) }}" + +openshift_master_node_configs: +- "{{ openshift_master_node_config_infra }}" +- "{{ openshift_master_node_config_compute }}" + +openshift_master_bootstrap_namespace: openshift-node diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index a657668a9..a1cda2ad4 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: lib_utils - role: lib_os_firewall diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml index 0013f5289..eee89743c 100644 --- a/roles/openshift_master/tasks/bootstrap.yml +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -26,3 +26,66 @@ copy: content: "{{ kubeconfig_out.stdout }}" dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig" + +- name: create a temp dir for this work + command: mktemp -d /tmp/openshift_node_config-XXXXXX + register: mktempout + run_once: true + +# This generate is so that we do not have to maintain +# our own copy of the template. This is generated by +# the product and the following settings will be +# generated by the master +- name: generate a node-config dynamically + command: > + {{ openshift_master_client_binary }} adm create-node-config + --node-dir={{ mktempout.stdout }}/ + --node=CONFIGMAP + --hostnames=test + --certificate-authority={{ openshift_master_config_dir }}/ca.crt + --signer-cert={{ openshift_master_config_dir }}/ca.crt + --signer-key={{ openshift_master_config_dir }}/ca.key + --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt + --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt + register: configgen + run_once: true + +- name: remove the default settings + yedit: + state: "{{ item.state | default('present') }}" + src: "{{ mktempout.stdout }}/node-config.yaml" + key: "{{ item.key }}" + value: "{{ item.value | default(omit) }}" + with_items: "{{ openshift_master_node_config_default_edits }}" + run_once: true + +- name: copy the generated config into each group + copy: + src: "{{ mktempout.stdout }}/node-config.yaml" + remote_src: true + dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" + with_items: "{{ openshift_master_node_configs }}" + run_once: true + +- name: "specialize the generated configs for node-config-{{ item.type }}" + yedit: + src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" + edits: "{{ item.edits }}" + with_items: "{{ openshift_master_node_configs }}" + run_once: true + +- name: create node-config.yaml configmap + oc_configmap: + name: "node-config-{{ item.type }}" + namespace: "{{ openshift_master_bootstrap_namespace }}" + from_file: + node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" + with_items: "{{ openshift_master_node_configs }}" + run_once: true + +- name: remove templated files + file: + dest: "{{ mktempout.stdout }}/" + state: absent + with_items: "{{ openshift_master_node_configs }}" + run_once: true diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 739b0d968..b310a8f64 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -4,7 +4,8 @@ openshift_node_debug_level: "{{ debug_level | default(2) }}" r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -openshift_service_type: "{{ openshift.common.service_type }}" +openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" +openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}" openshift_image_tag: '' @@ -17,7 +18,6 @@ openshift_node_ami_prep_packages: - openvswitch - docker - etcd -#- pcs - haproxy - dnsmasq - ntp @@ -54,7 +54,6 @@ openshift_node_ami_prep_packages: # - container-selinux # - atomic # -openshift_deployment_type: origin openshift_node_bootstrap: False diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml new file mode 100644 index 000000000..ea280640f --- /dev/null +++ b/roles/openshift_node/files/bootstrap.yml @@ -0,0 +1,63 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost + gather_facts: yes + vars: + origin_dns: + file: /etc/dnsmasq.d/origin-dns.conf + lines: + - regex: ^listen-address + state: present + line: "listen-address={{ ansible_default_ipv4.address }}" + node_dns: + file: /etc/dnsmasq.d/node-dnsmasq.conf + lines: + - regex: "^server=/in-addr.arpa/127.0.0.1$" + line: server=/in-addr.arpa/127.0.0.1 + - regex: "^server=/cluster.local/127.0.0.1$" + line: server=/cluster.local/127.0.0.1 + + tasks: + - include_vars: openshift_settings.yaml + + - name: set the data for node_dns + lineinfile: + create: yes + insertafter: EOF + path: "{{ node_dns.file }}" + regexp: "{{ item.regex }}" + line: "{{ item.line | default(omit) }}" + with_items: "{{ node_dns.lines }}" + + - name: set the data for origin_dns + lineinfile: + create: yes + state: "{{ item.state | default('present') }}" + insertafter: "{{ item.after | default(omit) }}" + path: "{{ origin_dns.file }}" + regexp: "{{ item.regex }}" + line: "{{ item.line | default(omit)}}" + with_items: "{{ origin_dns.lines }}" + + - when: + - openshift_group_type is defined + - openshift_group_type != '' + - openshift_group_type != 'master' + block: + - name: determine the openshift_service_type + stat: + path: /etc/sysconfig/atomic-openshift-node + register: service_type_results + + - name: set openshift_service_type fact based on stat results + set_fact: + openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}" + + - name: update the sysconfig to have necessary variables + lineinfile: + dest: "/etc/sysconfig/{{ openshift_service_type }}-node" + line: "{{ item.line }}" + regexp: "{{ item.regexp }}" + with_items: + - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}" + regexp: "^BOOTSTRAP_CONFIG_NAME=.*" diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 25a6fc721..b102c1b18 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -3,7 +3,11 @@ systemd: name: openvswitch state: restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool + when: + - (not skip_node_svc_handlers | default(False) | bool) + - not (ovs_service_status_changed | default(false) | bool) + - openshift_node_use_openshift_sdn | bool + - not openshift_node_bootstrap register: l_openshift_node_stop_openvswitch_result until: not l_openshift_node_stop_openvswitch_result | failed retries: 3 @@ -11,10 +15,11 @@ notify: - restart openvswitch pause - - name: restart openvswitch pause pause: seconds=15 - when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool + when: + - (not skip_node_svc_handlers | default(False) | bool) + - openshift.common.is_containerized | bool - name: restart node systemd: diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml new file mode 100644 index 000000000..38c2b794d --- /dev/null +++ b/roles/openshift_node/tasks/aws.yml @@ -0,0 +1,21 @@ +--- +- name: Configure AWS Cloud Provider Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^AWS_ACCESS_KEY_ID=' + line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" + - regex: '^AWS_SECRET_ACCESS_KEY=' + line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" + register: sys_env_update + no_log: True + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind == 'aws' + - openshift_cloudprovider_aws_access_key is defined + - openshift_cloudprovider_aws_secret_key is defined + notify: + - restart node diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index 6bd2df362..8c03f6c41 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -17,17 +17,29 @@ [Unit] After=cloud-init.service -- name: update the sysconfig to have KUBECONFIG +- name: update the sysconfig to have necessary variables lineinfile: dest: "/etc/sysconfig/{{ openshift_service_type }}-node" - line: "KUBECONFIG=/root/csr_kubeconfig" + line: "{{ item.line | default(omit) }}" + regexp: "{{ item.regexp }}" + state: "{{ item.state | default('present') }}" + with_items: + # add the kubeconfig + - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig" regexp: "^KUBECONFIG=.*" + # remove the config file. This comes from openshift_facts + - regexp: "^CONFIG_FILE=.*" + state: absent -- name: update the ExecStart to have bootstrap - lineinfile: - dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service" - line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" - regexp: "^ExecStart=.*" +- name: include aws sysconfig credentials + include: aws.yml + static: yes + +#- name: update the ExecStart to have bootstrap +# lineinfile: +# dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service" +# line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" +# regexp: "^ExecStart=.*" - name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services" systemd: @@ -42,6 +54,30 @@ path: /etc/origin/.config_managed register: rpmgenerated_config +- name: create directories for bootstrapping + file: + state: directory + dest: "{{ item }}" + with_items: + - /root/openshift_bootstrap + - /var/lib/origin/openshift.local.config + - /var/lib/origin/openshift.local.config/node + - "/etc/docker/certs.d/docker-registry.default.svc:5000" + +- name: laydown the bootstrap.yml file for on boot configuration + copy: + src: bootstrap.yml + dest: /root/openshift_bootstrap/bootstrap.yml + +- name: symlink master ca for docker-registry + file: + src: "{{ item }}" + dest: "/etc/docker/certs.d/docker-registry.default.svc:5000/{{ item | basename }}" + state: link + force: yes + with_items: + - /var/lib/origin/openshift.local.config/node/node-client-ca.crt + - when: rpmgenerated_config.stat.exists block: - name: Remove RPM generated config files if present @@ -50,6 +86,7 @@ state: absent with_items: - master + - .config_managed # with_fileglob doesn't work correctly due to a few issues. # Could change this to fileglob when it gets fixed. @@ -62,5 +99,7 @@ file: path: "{{ item.path }}" state: absent - when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path" + when: + - "'resolv.conf' not in item.path" + - "'node-dnsmasq.conf' not in item.path" with_items: "{{ find_results.files }}" diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index e5fcaf9af..c08f43118 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -46,26 +46,16 @@ notify: - restart node -- name: Configure AWS Cloud Provider Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^AWS_ACCESS_KEY_ID=' - line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" - - regex: '^AWS_SECRET_ACCESS_KEY=' - line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" - no_log: True - when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined - notify: - - restart node +- name: include aws provider credentials + include: aws.yml + static: yes # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again -- when: openshift.common.is_containerized | bool +- when: + - openshift.common.is_containerized | bool + - not openshift_node_bootstrap block: - name: Wait for master API to become available before proceeding # Using curl here since the uri module requires python-httplib2 and @@ -90,26 +80,28 @@ enabled: yes state: started -- name: Start and enable node - systemd: - name: "{{ openshift.common.service_type }}-node" - enabled: yes - state: started - daemon_reload: yes - register: node_start_result - until: not node_start_result | failed - retries: 1 - delay: 30 - ignore_errors: true +- when: not openshift_node_bootstrap + block: + - name: Start and enable node + systemd: + name: "{{ openshift.common.service_type }}-node" + enabled: yes + state: started + daemon_reload: yes + register: node_start_result + until: not node_start_result | failed + retries: 1 + delay: 30 + ignore_errors: true -- name: Dump logs from node service if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node - when: node_start_result | failed + - name: Dump logs from node service if it failed + command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node + when: node_start_result | failed -- name: Abort if node failed to start - fail: - msg: Node failed to start please inspect the logs and try again - when: node_start_result | failed + - name: Abort if node failed to start + fail: + msg: Node failed to start please inspect the logs and try again + when: node_start_result | failed -- set_fact: - node_service_status_changed: "{{ node_start_result | changed }}" + - set_fact: + node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 1539d6e3b..6b7e40491 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -3,12 +3,12 @@ block: - name: Install Node package package: - name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - name: Install sdn-ovs package package: - name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - openshift_node_use_openshift_sdn | bool diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 59b8bb76e..eae9ca7bc 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -66,15 +66,10 @@ sysctl_file: "/etc/sysctl.d/99-openshift.conf" reload: yes -- name: include bootstrap node config - include: bootstrap.yml - when: openshift_node_bootstrap - - include: registry_auth.yml - name: include standard node config include: config.yml - when: not openshift_node_bootstrap #### Storage class plugins here #### - name: NFS storage plugin configuration @@ -98,3 +93,7 @@ - include: config/workaround-bz1331590-ovs-oom-fix.yml when: openshift_node_use_openshift_sdn | default(true) | bool + +- name: include bootstrap node config + include: bootstrap.yml + when: openshift_node_bootstrap diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 0856737f6..7602d8ee6 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -12,17 +12,17 @@ After=dnsmasq.service [Service] Type=notify -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node Environment=GOTRACEBACK=crash ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string: -ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/openshift start node {% if openshift_node_bootstrap %} --kubeconfig=${KUBECONFIG} --bootstrap-config-name=${BOOTSTRAP_CONFIG_NAME}{% endif %} --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=65536 LimitCORE=infinity WorkingDirectory=/var/lib/origin/ -SyslogIdentifier={{ openshift.common.service_type }}-node +SyslogIdentifier={{ openshift_service_type }}-node Restart=always RestartSec=5s TimeoutStartSec=300 diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 5aa8aecec..c08bec4cb 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -10,50 +10,30 @@ openshift_prometheus_node_selector: {"region":"infra"} # images openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0" openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev" -openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev" +openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1" openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1" # additional prometheus rules file openshift_prometheus_additional_rules_file: null -# All the required exports -openshift_prometheus_pv_exports: - - prometheus - - prometheus-alertmanager - - prometheus-alertbuffer -# PV template files and their created object names -openshift_prometheus_pv_data: - - pv_name: prometheus - pv_template: prom-pv-server.yml - pv_label: Prometheus Server PV - - pv_name: prometheus-alertmanager - pv_template: prom-pv-alertmanager.yml - pv_label: Prometheus Alertmanager PV - - pv_name: prometheus-alertbuffer - pv_template: prom-pv-alertbuffer.yml - pv_label: Prometheus Alert Buffer PV - -# Hostname/IP of the NFS server. Currently defaults to first master -openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}" - # storage openshift_prometheus_storage_type: pvc openshift_prometheus_pvc_name: prometheus -openshift_prometheus_pvc_size: 10G +openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}" openshift_prometheus_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_pvc_pv_selector: {} +openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}" openshift_prometheus_alertmanager_storage_type: pvc openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager -openshift_prometheus_alertmanager_pvc_size: 10G +openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_alertmanager_pvc_pv_selector: {} +openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}" openshift_prometheus_alertbuffer_storage_type: pvc openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer -openshift_prometheus_alertbuffer_pvc_size: 10G +openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_alertbuffer_pvc_pv_selector: {} +openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}" # container resources openshift_prometheus_cpu_limit: null diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports deleted file mode 100644 index 3ccedb1fd..000000000 --- a/roles/openshift_prometheus/files/openshift_prometheus.exports +++ /dev/null @@ -1,3 +0,0 @@ -/exports/prometheus *(rw,no_root_squash,no_wdelay) -/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay) -/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay) diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml deleted file mode 100644 index 4e79da05f..000000000 --- a/roles/openshift_prometheus/tasks/create_pvs.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Check for existance and then conditionally: -# - evaluate templates -# - PVs -# -# These tasks idempotently create required Prometheus PV objects. Do not -# call this file directly. This file is intended to be ran as an -# include that has a 'with_items' attached to it. Hence the use below -# of variables like "{{ item.pv_label }}" - -- name: "Check if the {{ item.pv_label }} template has been created already" - oc_obj: - namespace: "{{ openshift_prometheus_namespace }}" - state: list - kind: pv - name: "{{ item.pv_name }}" - register: prom_pv_check - -# Skip all of this if the PV already exists -- block: - - name: "Ensure the {{ item.pv_label }} template is evaluated" - template: - src: "{{ item.pv_template }}.j2" - dest: "{{ tempdir }}/templates/{{ item.pv_template }}" - - - name: "Ensure {{ item.pv_label }} is created" - oc_obj: - namespace: "{{ openshift_prometheus_namespace }}" - kind: pv - name: "{{ item.pv_name }}" - state: present - delete_after: True - files: - - "{{ tempdir }}/templates/{{ item.pv_template }}" - when: - - not prom_pv_check.results.results.0 diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index a9bce2fb1..cb75eedca 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -54,15 +54,6 @@ resource_name: cluster-reader user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" - -###################################################################### -# NFS -# In the case that we are not running on a cloud provider, volumes must be statically provisioned - -- include: nfs.yaml - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - - # create prometheus and alerts services # TODO join into 1 task with loop - name: Create prometheus service diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml deleted file mode 100644 index 0b45f2cee..000000000 --- a/roles/openshift_prometheus/tasks/nfs.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Tasks to statically provision NFS volumes -# Include if not using dynamic volume provisioning -- name: Ensure the /exports/ directory exists - file: - path: /exports/ - state: directory - mode: 0755 - owner: root - group: root - -- name: Ensure the prom-pv0X export directories exist - file: - path: "/exports/{{ item }}" - state: directory - mode: 0777 - owner: nfsnobody - group: nfsnobody - with_items: "{{ openshift_prometheus_pv_exports }}" - -- name: Ensure the NFS exports for Prometheus PVs exist - copy: - src: openshift_prometheus.exports - dest: /etc/exports.d/openshift_prometheus.exports - register: nfs_exports_updated - -- name: Ensure the NFS export table is refreshed if exports were added - command: exportfs -ar - when: - - nfs_exports_updated.changed - - -###################################################################### -# Create the required Prometheus PVs. Check out these online docs if you -# need a refresher on includes looping with items: -# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0 -# * http://stackoverflow.com/a/35128533 -# -# TODO: Handle the case where a PV template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- include: create_pvs.yaml - with_items: "{{ openshift_prometheus_pv_data }}" diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 deleted file mode 100644 index 55a5e19c3..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: prometheus-alertbuffer - labels: - storage: prometheus-alertbuffer -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/prometheus-alertbuffer - server: {{ openshift_prometheus_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 deleted file mode 100644 index 4ee518735..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: prometheus-alertmanager - labels: - storage: prometheus-alertmanager -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/prometheus-alertmanager - server: {{ openshift_prometheus_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 deleted file mode 100644 index 933bf0f60..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: prometheus - labels: - storage: prometheus -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/prometheus - server: {{ openshift_prometheus_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2 index 98c117f19..66eab6df4 100644 --- a/roles/openshift_prometheus/templates/prometheus_deployment.j2 +++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2 @@ -38,7 +38,7 @@ spec: cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}" {% endif %} limits: -{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}" {% endif %} {% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 3047fbaf9..c4e023c1e 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -35,6 +35,9 @@ - "{{ openshift.logging }}" - "{{ openshift.loggingops }}" - "{{ openshift.hosted.etcd }}" + - "{{ openshift.prometheus }}" + - "{{ openshift.prometheus.alertmanager }}" + - "{{ openshift.prometheus.alertbuffer }}" - name: Configure exports template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 0141e0d25..c2a741035 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -3,3 +3,6 @@ {{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} {{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} +{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} +{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} +{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} |