diff options
Diffstat (limited to 'roles')
19 files changed, 238 insertions, 219 deletions
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index e14d57702..178e0849c 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -42,60 +42,77 @@ openshift_aws_ami_tags: openshift_aws_s3_mode: create openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" -openshift_aws_elb_health_check: - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 - openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" -openshift_aws_elb_name_dict: - master: - external: "{{ openshift_aws_elb_basename }}-master-external" - internal: "{{ openshift_aws_elb_basename }}-master-internal" - infra: - external: "{{ openshift_aws_elb_basename }}-infra" - -openshift_aws_elb_idle_timout: 400 - -openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: master: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 - - protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + - protocol: ssl + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + ssl_certificate_id: '' + name: "{{ openshift_aws_elb_basename }}-master-external" + tags: "{{ openshift_aws_kube_tags }}" internal: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 80 - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: tcp + instance_port: "{{ openshift_master_api_port | default(8443) }}" + name: "{{ openshift_aws_elb_basename }}-master-internal" + tags: "{{ openshift_aws_kube_tags }}" infra: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + name: "{{ openshift_aws_elb_basename }}-infra" + tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_group_config_master_volumes: - device_name: /dev/sda1 @@ -172,7 +189,7 @@ openshift_aws_master_group_config: iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}" + elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}" openshift_aws_node_group_config: # The 'compute' key is always required here. @@ -205,10 +222,7 @@ openshift_aws_node_group_config: iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}" - -openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" -openshift_aws_elb_az_load_balancing: False + elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}" # build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" @@ -253,8 +267,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 compute: name: "{{ openshift_aws_clusterid }}_compute" @@ -268,8 +282,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 30000 diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 6f0028a3d..d8257cf31 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -5,18 +5,18 @@ - name: "Create ELB {{ l_elb_dict_item.key }}" ec2_elb_lb: - name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}" + name: "{{ item.value.name }}" state: present - cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" + cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" - idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + idle_timeout: "{{ item.value.idle_timout }}" region: "{{ openshift_aws_region }}" subnets: - "{{ subnetout.subnets[0].id }}" - health_check: "{{ openshift_aws_elb_health_check }}" - listeners: "{{ item.value }}" + health_check: "{{ item.value.health_check }}" + listeners: "{{ item.value.listeners }}" scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" - tags: "{{ openshift_aws_elb_tags }}" + tags: "{{ item.value.tags }}" wait: True register: new_elb with_dict: "{{ l_elb_dict_item.value }}" diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 530b0134d..c2e362acd 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,7 +3,7 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ openshift_aws_elb_name_dict['master']['internal'] }}" + - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}" delegate_to: localhost register: elbs diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml index a52f63bd5..fcc49c3ea 100644 --- a/roles/openshift_aws/tasks/provision_elb.yml +++ b/roles/openshift_aws/tasks/provision_elb.yml @@ -10,6 +10,5 @@ with_dict: "{{ openshift_aws_elb_dict }}" vars: l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" - l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" loop_control: loop_var: l_elb_dict_item diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index d6d31effd..452cc4ef6 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -15,8 +15,10 @@ import os import yaml import struct import socket +import ipaddress from distutils.util import strtobool from distutils.version import LooseVersion +from ansible.module_utils.six import u from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser @@ -1146,6 +1148,8 @@ def set_proxy_facts(facts): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) # We always add local dns domain and ourselves no matter what + kube_svc_ip = str(ipaddress.ip_network(u(common['portal_net']))[1]) + common['no_proxy'].append(kube_svc_ip) common['no_proxy'].append('.' + common['dns_domain']) common['no_proxy'].append('.svc') common['no_proxy'].append(common['hostname']) diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 22294e3d4..bc4d81eb7 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -43,7 +43,7 @@ - name: Update registry environment variables when pushing via dns set_fact: - openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}" + openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'REGISTRY_OPENSHIFT_SERVER_ADDR':'docker-registry.default.svc:5000'}) }}" when: openshift_push_via_dns | bool - name: Update registry proxy settings for dc/docker-registry diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index b5e234b7f..57bc97e3e 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -15,6 +15,8 @@ openshift_management_pod_rollout_retries: 30 # # Choose 'miq-template' for a podified database install # Choose 'miq-template-ext-db' for an external database install +# TODO: Swap this var declaration once CFME is fully supported +#openshift_management_app_template: "{{ 'cfme-template' if openshift_deployment_type == 'openshift-enterprise' else 'miq-template' }}" openshift_management_app_template: miq-template # If you are using the miq-template-ext-db template then you must add # the required database parameters to the diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml index e45ea8d43..80318fec0 100644 --- a/roles/openshift_management/tasks/accounts.yml +++ b/roles/openshift_management/tasks/accounts.yml @@ -5,14 +5,14 @@ oc_serviceaccount: namespace: "{{ openshift_management_project }}" state: present - name: "{{ openshift_management_flavor_short }}{{ item.name }}" + name: "{{ __openshift_management_flavor_short }}{{ item.name }}" with_items: - "{{ __openshift_system_account_sccs }}" - name: Ensure the CFME system accounts have all the required SCCs oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: scc resource_name: "{{ item.resource_name }}" with_items: @@ -21,7 +21,7 @@ - name: Ensure the CFME system accounts have the required roles oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: role resource_name: "{{ item.resource_name }}" with_items: diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index c4b204b98..5209eba56 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -71,15 +71,15 @@ # CREATE APP - name: Note the correct ext-db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" + openshift_management_template_name: "{{ __openshift_management_flavor }}-ext-db" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db - name: Note the correct podified db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}" + openshift_management_template_name: "{{ __openshift_management_flavor }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App is created oc_process: @@ -89,7 +89,7 @@ params: "{{ openshift_management_template_parameters }}" - name: Wait for the app to come up. May take several minutes, 30s check intervals, {{ openshift_management_pod_rollout_retries }} retries - command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + command: "oc logs {{ __openshift_management_flavor }}-0 -n {{ openshift_management_project }}" register: app_seeding_logs until: app_seeding_logs.stdout.find('Server starting complete') != -1 delay: 30 diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index d1b9a8d5c..1f8cac6c6 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -12,7 +12,7 @@ when: - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Note the DB PV Size from Template Parameters set_fact: @@ -31,7 +31,7 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-app" + name: "{{ __openshift_management_flavor_short }}-app" register: miq_app_pv_check - name: Check if the Management DB PV has been created @@ -39,15 +39,15 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-db" + name: "{{ __openshift_management_flavor_short }}-db" register: miq_db_pv_check when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-app-pv" + template_name: "{{ __openshift_management_flavor }}-app-pv" create: True params: PV_SIZE: "{{ openshift_management_app_pv_size }}" @@ -58,12 +58,12 @@ - name: Ensure the Management DB PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-db-pv" + template_name: "{{ __openshift_management_flavor }}-db-pv" create: True params: PV_SIZE: "{{ openshift_management_db_pv_size }}" BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" NFS_HOST: "{{ openshift_management_nfs_server }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 9e3a4d43a..4a00efb1d 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -17,8 +17,8 @@ tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-app" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" - name: Create the DB export @@ -27,10 +27,10 @@ tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-db" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 9f97cdcb9..f40af7349 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -13,59 +13,59 @@ ###################################################################### # STANDARD PODIFIED DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" register: miq_server_check - when: miq_server_check.results.results == [{}] block: - name: Copy over Management Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-template.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template.yaml" ###################################################################### # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +- when: __openshift_management_use_ext_db block: - name: Check if the Management Ext-DB Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" register: miq_ext_db_server_check - when: miq_ext_db_server_check.results.results == [{}] block: - name: Copy over Management Ext-DB Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{__openshift_management_flavor_short}}-template-ext-db.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Ext-DB Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template-ext-db.yaml" # End app template creation. ###################################################################### @@ -79,50 +79,50 @@ namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" register: miq_app_pv_check - when: miq_app_pv_check.results.results == [{}] block: - name: Copy over Management App PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management App PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" #--------------------------------------------------------------------- # Required for database if the installation is fully podified -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management DB PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" register: miq_db_pv_check - when: miq_db_pv_check.results.results == [{}] block: - name: Copy over Management DB PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management DB PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index b22f36a4f..2dc895190 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -100,4 +100,4 @@ 'openshift_management_template_parameters'" with_items: "{{ __openshift_management_required_db_conn_params }}" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml index da3ad0af7..d7b18df3a 100644 --- a/roles/openshift_management/vars/main.yml +++ b/roles/openshift_management/vars/main.yml @@ -30,14 +30,18 @@ __openshift_management_db_parameters: - DATABASE_PORT - DATABASE_NAME -# # Commented out until we can support both CFME and MIQ -# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" -#openshift_management_flavor: cloudforms -openshift_management_flavor: manageiq -# TODO: Make this conditional as well based on the prior variable -# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" -# openshift_management_flavor_short: cfme -openshift_management_flavor_short: miq +__openshift_management_flavors: + miq: + short: miq + long: manageiq + cfme: + short: cfme + long: cloudforms + +__openshift_management_flavor: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['long'] }}" +__openshift_management_flavor_short: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['short'] }}" + +__openshift_management_use_ext_db: "{{ true if 'ext-db' in openshift_management_app_template else false }}" ###################################################################### # ACCOUNTING diff --git a/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml new file mode 100644 index 000000000..28abcbcfc --- /dev/null +++ b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml @@ -0,0 +1,86 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: system:service-catalog:aggregate-to-admin +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: system:service-catalog:aggregate-to-edit +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:service-catalog:aggregate-to-view +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - get + - list + - watch diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 9b38a85c4..4d06c1872 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -74,74 +74,17 @@ template_name: kube-system-service-catalog-role-bindings namespace: kube-system -- oc_obj: - name: edit - kind: clusterrole - state: list - register: edit_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/edit - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/edit_sc_patch.yml" - vars: - original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" - when: - - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update edit role for service catalog and pod preset access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml - when: - - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: admin - kind: clusterrole - state: list - register: admin_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/admin - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/admin_sc_patch.yml" - vars: - original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" - when: - - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update admin role for service catalog and pod preset access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml - when: - - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: view - kind: clusterrole - state: list - register: view_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/view - template: - src: sc_view_role_patching.j2 - dest: "{{ mktemp.stdout }}/view_sc_patch.yml" - vars: - original_content: "{{ view_yaml.results.results[0] | to_yaml }}" - when: - - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update view role for service catalog access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml - when: - - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +- copy: + src: openshift_catalog_clusterroles.yml + dest: "{{ mktemp.stdout }}/openshift_catalog_clusterroles.yml" + +- name: Apply Service Catalog cluster roles + retries: 5 + delay: 2 + register: task_result + until: task_result.rc == 0 + shell: > + {{ openshift_client_binary }} auth reconcile --config={{ openshift.common.config_base }}/master/admin.kubeconfig -f {{ mktemp.stdout}}/openshift_catalog_clusterroles.yml - oc_adm_policy_user: namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 deleted file mode 100644 index 59cceafcf..000000000 --- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 +++ /dev/null @@ -1,27 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - create - - update - - delete - - get - - list - - watch - - patch -- apiGroups: - - "settings.k8s.io" - attributeRestrictions: null - resources: - - podpresets - verbs: - - create - - update - - delete - - get - - list - - watch diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 deleted file mode 100644 index 838993854..000000000 --- a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - get - - list - - watch diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index ff33338a6..f79a05c94 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -71,6 +71,9 @@ - set_fact: config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}" + - set_fact: + cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}" + # Update properties in the config template based on inventory vars when the # asset config does not exist. - name: Set web console config properties from inventory variables @@ -87,7 +90,7 @@ - key: features#inactivityTimeoutMinutes value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}" - key: features#clusterResourceOverridesEnabled - value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(false) }}" + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" - key: extensions#scriptURLs value: "{{ openshift_web_console_extension_script_urls | default([]) }}" - key: extensions#stylesheetURLs @@ -116,6 +119,8 @@ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}" - key: servingInfo#requestTimeoutSeconds value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" separator: '#' state: present when: config_to_migrate.assetConfig is defined |