diff options
Diffstat (limited to 'roles')
50 files changed, 269 insertions, 160 deletions
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index dbe0b0d28..888ae40e7 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -139,6 +139,13 @@ notify: - restart docker +# The following task is needed as the systemd module may report a change in +# state even though docker is already running. +- name: Detect if docker is already started + command: "systemctl show docker -p ActiveState" + changed_when: False + register: r_docker_already_running_result + - name: Start the Docker service systemd: name: docker @@ -151,7 +158,7 @@ delay: 30 - set_fact: - docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}" + docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}" - name: Check for credentials file for registry auth stat: diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 807b9541a..78f231416 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -70,7 +70,8 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_peer: 127.0.0.1 +# required role variable +#etcd_peer: 127.0.0.1 etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" diff --git a/roles/etcd/tasks/auxiliary/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml index 95a0e7c0a..1ed2db5bc 100644 --- a/roles/etcd/tasks/auxiliary/clean_data.yml +++ b/roles/etcd/tasks/auxiliary/clean_data.yml @@ -1,5 +1,5 @@ --- - name: Remove member data file: - path: /var/lib/etcd/member + path: "{{ etcd_data_dir }}/member" state: absent diff --git a/roles/etcd/tasks/auxiliary/disable_etcd.yml b/roles/etcd/tasks/auxiliary/disable_etcd.yml new file mode 100644 index 000000000..7c6d0409d --- /dev/null +++ b/roles/etcd/tasks/auxiliary/disable_etcd.yml @@ -0,0 +1,5 @@ +--- +- name: Disable etcd members + service: + name: "{{ etcd_service }}" + state: stopped diff --git a/roles/etcd/tasks/auxiliary/force_new_cluster.yml b/roles/etcd/tasks/auxiliary/force_new_cluster.yml new file mode 100644 index 000000000..ae8a36130 --- /dev/null +++ b/roles/etcd/tasks/auxiliary/force_new_cluster.yml @@ -0,0 +1,31 @@ +--- +- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host + lineinfile: + line: "ETCD_FORCE_NEW_CLUSTER=true" + dest: /etc/etcd/etcd.conf + backup: true + +- name: Start etcd + systemd: + name: "{{ etcd_service }}" + state: started + +- name: Wait for cluster to become healthy after bringing up first member + command: > + etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health + register: l_etcd_migrate_health + until: l_etcd_migrate_health.rc == 0 + retries: 3 + delay: 30 + +- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host + lineinfile: + line: "ETCD_FORCE_NEW_CLUSTER=true" + dest: /etc/etcd/etcd.conf + state: absent + backup: true + +- name: Restart first etcd host + systemd: + name: "{{ etcd_service }}" + state: restarted diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml new file mode 100644 index 000000000..6daa6dc51 --- /dev/null +++ b/roles/etcd/tasks/backup.archive.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/archive.yml diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml new file mode 100644 index 000000000..cc540cbca --- /dev/null +++ b/roles/etcd/tasks/backup.copy.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/copy.yml diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml new file mode 100644 index 000000000..26ec15043 --- /dev/null +++ b/roles/etcd/tasks/backup.fetch.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/fetch.yml diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml new file mode 100644 index 000000000..24bd0540d --- /dev/null +++ b/roles/etcd/tasks/backup.force_new_cluster.yml @@ -0,0 +1,12 @@ +--- +- include: backup/vars.yml + +- name: Move content of etcd backup under the etcd data directory + command: > + mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}" + +- name: Set etcd group for the etcd data directory + command: > + chown -R etcd:etcd "{{ l_etcd_data_dir }}" + +- include: auxiliary/force_new_cluster.yml diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml new file mode 100644 index 000000000..77a637360 --- /dev/null +++ b/roles/etcd/tasks/backup.unarchive.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/unarchive.yml diff --git a/roles/etcd/tasks/backup/archive.yml b/roles/etcd/tasks/backup/archive.yml new file mode 100644 index 000000000..f6aa68a6e --- /dev/null +++ b/roles/etcd/tasks/backup/archive.yml @@ -0,0 +1,5 @@ +--- +- name: Archive backup + archive: + path: "{{ l_etcd_backup_dir }}" + dest: "{{ l_etcd_backup_dir }}.tgz" diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml index 42d27c081..ec1a1989c 100644 --- a/roles/etcd/tasks/backup/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml @@ -1,21 +1,5 @@ --- -# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time -# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes -# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different -- set_fact: - l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" - -- set_fact: - l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}" - -- set_fact: - l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}" - -- set_fact: - l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}" - -- set_fact: - l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}" +- include: vars.yml # TODO: replace shell module with command and update later checks - name: Check available disk space for etcd backup diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml new file mode 100644 index 000000000..16604bae8 --- /dev/null +++ b/roles/etcd/tasks/backup/copy.yml @@ -0,0 +1,5 @@ +--- +- name: Copy etcd backup + copy: + src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz" + dest: "{{ l_etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/fetch.yml b/roles/etcd/tasks/backup/fetch.yml new file mode 100644 index 000000000..610ce1960 --- /dev/null +++ b/roles/etcd/tasks/backup/fetch.yml @@ -0,0 +1,8 @@ +--- +- name: Fetch etcd backup + fetch: + src: "{{ l_etcd_backup_dir }}.tgz" + dest: "{{ etcd_backup_sync_directory }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml new file mode 100644 index 000000000..6c75d00a7 --- /dev/null +++ b/roles/etcd/tasks/backup/unarchive.yml @@ -0,0 +1,14 @@ +--- +- shell: ls /var/lib/etcd + register: output + +- debug: + msg: "output: {{ output }}" + +- name: Unarchive backup + # can't use unarchive https://github.com/ansible/ansible/issues/30821 + # unarchive: + # src: "{{ l_etcd_backup_dir }}.tgz" + # dest: "{{ l_etcd_backup_dir }}" + command: > + tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml new file mode 100644 index 000000000..3c009f557 --- /dev/null +++ b/roles/etcd/tasks/backup/vars.yml @@ -0,0 +1,18 @@ +--- +# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time +# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes +# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different +- set_fact: + l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" + +- set_fact: + l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}" + +- set_fact: + l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}" + +- set_fact: + l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}" + +- set_fact: + l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}" diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml new file mode 100644 index 000000000..129e1831c --- /dev/null +++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_master_etcd_certificates.yml diff --git a/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml new file mode 100644 index 000000000..e65b3e5a2 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml @@ -0,0 +1,7 @@ +--- +- name: Backup master etcd certificates + shell: > + tar -czvf /etc/origin/master/master-etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz + /etc/origin/master/master.etcd-* + args: + warn: no diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml new file mode 100644 index 000000000..75c110972 --- /dev/null +++ b/roles/etcd/tasks/check_cluster_health.yml @@ -0,0 +1,2 @@ +--- +- include: migration/check_cluster_health.yml diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml new file mode 100644 index 000000000..9202e6e48 --- /dev/null +++ b/roles/etcd/tasks/disable_etcd.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/disable_etcd.yml diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml new file mode 100644 index 000000000..513eed17a --- /dev/null +++ b/roles/etcd/tasks/fetch_backup.yml @@ -0,0 +1,8 @@ +--- +- include: backup/vars.yml + +- include: backup/archive.yml + +- include: backup/sync_backup.yml + +- include: backup/ diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index e735bf50a..024479fb4 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -17,6 +17,7 @@ {{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}, {%- endif -%} {% endfor -%} + when: etcd_initial_cluster is undefined - name: Check etcd system container package command: > diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 8462bb4c8..3027a9447 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -29,8 +29,8 @@ ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }} ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1 {% else %} -{% if initial_etcd_cluster is defined and initial_etcd_cluster %} -ETCD_INITIAL_CLUSTER={{ initial_etcd_cluster }} +{% if etcd_initial_cluster is defined and etcd_initial_cluster %} +ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }} {% else %} ETCD_INITIAL_CLUSTER={{ initial_cluster() }} {% endif %} diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index ac369b882..25f9405af 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -136,7 +136,7 @@ class CallbackModule(CallbackBase): }, 'installer_phase_management': { 'title': 'Management Install', - 'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml' + 'playbook': 'playbooks/byo/openshift-management/config.yml' }, } diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index ff96081fe..4aca5c7a8 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -1,7 +1,29 @@ openshift_aws ================================== -Provision AWS infrastructure helpers. +Provision AWS infrastructure and instances. + +This role contains many task-areas to provision resources and perform actions +against an AWS account for the purposes of dynamically building an openshift +cluster. + +This role is primarily intended to be used with "include_role" and "tasks_from". + +include_role can be called from the tasks section in a play. See example +playbook below for reference. + +These task-areas are: + +* provision a vpc: vpc.yml +* provision elastic load balancers: elb.yml +* upload IAM ssl certificates to use with load balancers: iam_cert.yml +* provision an S3 bucket: s3.yml +* provision an instance to build an AMI: provision_instance.yml +* provision a security group in AWS: security_group.yml +* provision ssh keys and users in AWS: ssh_keys.yml +* provision an AMI in AWS: seal_ami.yml +* provision scale groups: scale_group.yml +* provision launch configs: launch_config.yml Requirements ------------ @@ -9,56 +31,9 @@ Requirements * Ansible 2.3 * Boto -Role Variables --------------- - -From this role: - -| Name | Default value -|---------------------------------------------------|----------------------- -| openshift_aws_clusterid | default -| openshift_aws_elb_scheme | internet-facing -| openshift_aws_launch_config_bootstrap_token | '' -| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}} -| openshift_aws_ami_copy_wait | False -| openshift_aws_users | [] -| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} -| openshift_aws_node_group_type | master -| openshift_aws_elb_cert_arn | '' -| openshift_aws_kubernetes_cluster_status | owned -| openshift_aws_s3_mode | create -| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'} -| openshift_aws_create_ssh_keys | False -| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms -| openshift_aws_use_custom_ami | False -| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }} -| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }} -| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2} -| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}} -| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}'] -| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'} -| openshift_aws_create_security_groups | False -| openshift_aws_create_iam_cert | False -| openshift_aws_create_scale_group | True -| openshift_aws_ami_encrypt | False -| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] -| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'} -| openshift_aws_region | us-east-1 -| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} -| openshift_aws_elb_idle_timout | 400 -| openshift_aws_subnet_name | us-east-1c -| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }} -| openshift_aws_create_launch_config | True -| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'} -| openshift_aws_ami_name | openshift-gi -| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] -| openshift_aws_vpc_name | {{ openshift_aws_clusterid }} -| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}} -| - - -Dependencies ------------- +Appropriate AWS credentials and permissions are required. + + Example Playbook diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 215ff4b72..ba1d8f29d 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1907,7 +1907,6 @@ class OpenShiftFacts(object): portal_net='172.30.0.0/16', client_binary='oc', admin_binary='oadm', dns_domain='cluster.local', - debug_level=2, config_base='/etc/origin') if 'master' in roles: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 73e935d3f..7e62a8c6d 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,4 +1,9 @@ --- +# openshift_master_defaults_in_use is a workaround to detect if we are consuming +# the plays from the role or outside of the role. +openshift_master_defaults_in_use: True +openshift_master_debug_level: "{{ debug_level | default(2) }}" + r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" @@ -26,6 +31,9 @@ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False +containerized_svc_dir: "/usr/lib/systemd/system" +ha_svc_template_path: "native-cluster" + # NOTE # r_openshift_master_*_default may be defined external to this role. # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_master/tasks/check_master_api_is_ready.yml b/roles/openshift_master/tasks/check_master_api_is_ready.yml new file mode 100644 index 000000000..7e8a7a596 --- /dev/null +++ b/roles/openshift_master/tasks/check_master_api_is_ready.yml @@ -0,0 +1,14 @@ +--- +- name: Wait for API to become available + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + register: l_api_available_output + until: l_api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + run_once: true + changed_when: false diff --git a/roles/openshift_master/tasks/configure_external_etcd.yml b/roles/openshift_master/tasks/configure_external_etcd.yml new file mode 100644 index 000000000..b0590ac84 --- /dev/null +++ b/roles/openshift_master/tasks/configure_external_etcd.yml @@ -0,0 +1,17 @@ +--- +- name: Remove etcdConfig section + yedit: + src: /etc/origin/master/master-config.yaml + key: "etcdConfig" + state: absent +- name: Set etcdClientInfo.ca to master.etcd-ca.crt + yedit: + src: /etc/origin/master/master-config.yaml + key: etcdClientInfo.ca + value: master.etcd-ca.crt +- name: Set etcdClientInfo.urls to the external etcd + yedit: + src: /etc/origin/master/master-config.yaml + key: etcdClientInfo.urls + value: + - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 82b4b420c..824a5886e 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -311,23 +311,7 @@ # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. -- name: Wait for API to become available - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - register: l_api_available_output - until: l_api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - run_once: true - changed_when: false +- include: check_master_api_is_ready.yml when: - openshift.master.cluster_method == 'native' - master_api_service_status_changed | bool diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 2644f235e..63d483760 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -1,14 +1,4 @@ --- -# We need to setup some variables as this play might be called directly -# from outside of the role. -- set_fact: - oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" - when: oreg_auth_credentials_path is not defined - -- set_fact: - oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" - when: oreg_host is not defined - - name: Check for credentials file for registry auth stat: path: "{{ oreg_auth_credentials_path }}" diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 8de62c59a..fcc66044b 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -1,31 +1,9 @@ --- -# This file is included both in the openshift_master role and in the upgrade -# playbooks. For that reason the ha_svc variables are use set_fact instead of -# the vars directory on the role. +# systemd_units.yml is included both in the openshift_master role and in the upgrade +# playbooks. -# This play may be consumed outside the role, we need to ensure that -# openshift_master_config_dir is set. -- name: Set openshift_master_config_dir if unset - set_fact: - openshift_master_config_dir: '/etc/origin/master' - when: openshift_master_config_dir is not defined - -# This play may be consumed outside the role, we need to ensure that -# r_openshift_master_data_dir is set. -- name: Set r_openshift_master_data_dir if unset - set_fact: - r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" - when: r_openshift_master_data_dir is not defined - -- include: registry_auth.yml - -- name: Remove the legacy master service if it exists - include: clean_systemd_units.yml - -- name: Init HA Service Info - set_fact: - containerized_svc_dir: "/usr/lib/systemd/system" - ha_svc_template_path: "native-cluster" +- include: upgrade_facts.yml + when: openshift_master_defaults_in_use is not defined - name: Set HA Service Info for containerized installs set_fact: @@ -34,6 +12,11 @@ when: - openshift.common.is_containerized | bool +- include: registry_auth.yml + +- name: Remove the legacy master service if it exists + include: clean_systemd_units.yml + # This is the image used for both HA and non-HA clusters: - name: Pre-pull master image command: > diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml new file mode 100644 index 000000000..f6ad438aa --- /dev/null +++ b/roles/openshift_master/tasks/upgrade_facts.yml @@ -0,0 +1,33 @@ +--- +# This file exists because we call systemd_units.yml from outside of the role +# during upgrades. When we remove this pattern, we can probably +# eliminate most of these set_fact items. + +- name: Set openshift_master_config_dir if unset + set_fact: + openshift_master_config_dir: '/etc/origin/master' + when: openshift_master_config_dir is not defined + +- name: Set r_openshift_master_data_dir if unset + set_fact: + r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" + when: r_openshift_master_data_dir is not defined + +- set_fact: + oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" + when: oreg_auth_credentials_path is not defined + +- set_fact: + oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" + when: oreg_host is not defined + +- name: Set openshift_master_debug_level + set_fact: + openshift_master_debug_level: "{{ debug_level | default(2) }}" + when: + - openshift_master_debug_level is not defined + +- name: Init HA Service Info + set_fact: + containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}" + ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}" diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index b931f1414..7ec26ceb7 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} CONFIG_FILE={{ openshift_master_config_file }} {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #} {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index 63eb3ea1b..cc21b37af 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} CONFIG_FILE={{ openshift_master_config_file }} {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #} {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 0adfd05b6..493fc510e 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} CONFIG_FILE={{ openshift_master_config_file }} {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #} {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index a95570d38..501be148e 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -34,7 +34,6 @@ cluster_method: "{{ openshift_master_cluster_method | default('native') }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 7928a0346..48584bd64 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -54,6 +54,7 @@ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" size: "{{ openshift_metrics_cassandra_pvc_size }}" pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" with_sequence: count={{ openshift_metrics_cassandra_replicas }} when: openshift_metrics_cassandra_storage_type == 'dynamic' changed_when: false diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 1214c08e5..298d1013f 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,4 +1,6 @@ --- +openshift_node_debug_level: "{{ debug_level | default(2) }}" + r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index b83b2c452..6bd2df362 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -29,7 +29,7 @@ line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" regexp: "^ExecStart=.*" -- name: "systemctl enable {{ openshift_service_type }}-node" +- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services" systemd: name: "{{ item }}" enabled: no diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml index 1186062eb..527580481 100644 --- a/roles/openshift_node/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@ create: true with_items: - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" + line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}" - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml deleted file mode 100644 index f92ff79b5..000000000 --- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Install Node docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: openshift.docker.node.service - notify: - - reload systemd units - - restart node diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 265bf2c46..1539d6e3b 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -27,5 +27,3 @@ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - - - include: config/install-node-docker-service-file.yml diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 6b4490f61..9c182ade6 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -1,11 +1,9 @@ --- -# This file is included both in the openshift_master role and in the upgrade -# playbooks. - name: Install Node service file template: dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: "node.service.j2" - when: not openshift.common.is_containerized | bool + src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" + when: not openshift.common.is_node_system_container | bool notify: - reload systemd units - restart node diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index 0d5fa664c..b45130400 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -11,7 +11,6 @@ - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" - debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" labels: "{{ openshift_node_labels | default(None) }}" diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index c7c0ff34a..73b98ad90 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -49,7 +49,6 @@ From openshift.node: | Name | Default Value | | |------------------------------------|---------------------|---------------------| -| openshift.node.debug_level |---------------------|---------------------| | openshift.node.node_image |---------------------|---------------------| | openshift.node.ovs_image |---------------------|---------------------| diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml index 6507b015d..10b4c6977 100644 --- a/roles/openshift_node_upgrade/defaults/main.yml +++ b/roles/openshift_node_upgrade/defaults/main.yml @@ -1,4 +1,6 @@ --- +openshift_node_debug_level: "{{ debug_level | default(2) }}" + openshift_use_openshift_sdn: True os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml index 1186062eb..527580481 100644 --- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@ create: true with_items: - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" + line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}" - regex: '^CONFIG_FILE=' line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index afff2f8ba..226f5290c 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -6,7 +6,7 @@ # - openshift.node.ovs_image # - openshift_use_openshift_sdn # - openshift.common.service_type -# - openshift.node.debug_level +# - openshift_node_debug_level # - openshift.common.config_base # - openshift.common.http_proxy # - openshift.common.portal_net diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml index 9ebb0d5ec..7b705c2d4 100644 --- a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -85,8 +85,6 @@ objects: volumeMounts: - name: db mountPath: /var/lib/heketi - - name: topology - mountPath: ${TOPOLOGY_PATH} - name: config mountPath: /etc/heketi readinessProbe: @@ -103,9 +101,6 @@ objects: port: 8080 volumes: - name: db - - name: topology - secret: - secretName: heketi-${CLUSTER_NAME}-topology-secret - name: config secret: secretName: heketi-${CLUSTER_NAME}-config-secret @@ -138,6 +133,3 @@ parameters: displayName: GlusterFS cluster name description: A unique name to identify this heketi service, useful for running multiple heketi instances value: glusterfs -- name: TOPOLOGY_PATH - displayName: heketi topology file location - required: True |