From 3671e35eef5002d5ceb3e01962085ad7c61a0f5a Mon Sep 17 00:00:00 2001 From: Tristan Stevens Date: Mon, 23 Dec 2019 17:53:39 +0000 Subject: [PATCH 01/15] TLS, KMS, KTS configuration --- .gitignore | 3 +- README.md | 1 + action_plugins/scm_hosts.py | 21 +- cm_roles_test.yml | 8 + group_vars/all | 12 +- group_vars/cdh_servers.yml | 55 ++- group_vars/db_server.yml | 26 +- group_vars/db_server_mysql.yml | 75 ++++ group_vars/db_server_oracle.yml | 75 ++++ group_vars/encryption_zones.yml | 68 ++++ group_vars/kms_servers.yml | 5 + group_vars/krb5_server.yml | 6 +- group_vars/kts_servers.yml | 12 + group_vars/ldap_enc.yml | 32 ++ group_vars/scm_server.yml | 53 ++- group_vars/scm_server_enc.yml | 4 + group_vars/tls_enc.yml | 12 + hosts | 44 ++- inventory | 1 + roles/ca_server/tasks/main.yml | 169 +++++++++ .../templates/intermediate.openssl.cnf.j2 | 137 +++++++ roles/ca_server/templates/root.openssl.cnf.j2 | 132 +++++++ roles/ca_server_signing/tasks/main.yml | 42 +++ roles/cdh/files/hue-banner.txt | 1 + roles/cdh/tasks/main.yml | 113 +++++- roles/cdh/templates/cdsw.j2 | 67 ++++ roles/cdh/templates/hbase.j2 | 162 ++++++++ roles/cdh/templates/hdfs.j2 | 172 ++++++++- roles/cdh/templates/hive.j2 | 116 ++++-- roles/cdh/templates/host.j2 | 75 +++- roles/cdh/templates/hue.j2 | 172 +++++++-- roles/cdh/templates/impala.j2 | 214 ++++++++--- roles/cdh/templates/instantiator.j2 | 2 +- roles/cdh/templates/kafka.j2 | 46 +++ roles/cdh/templates/ks_indexer.j2 | 33 ++ roles/cdh/templates/oozie.j2 | 36 +- roles/cdh/templates/sentry.j2 | 26 +- roles/cdh/templates/solr.j2 | 98 +++++ roles/cdh/templates/spark.j2 | 43 ++- roles/cdh/templates/spark2.j2 | 58 +++ roles/cdh/templates/yarn.j2 | 102 ++++- roles/cdh/templates/zookeeper.j2 | 32 +- roles/cdh_teardown/tasks/main.yml | 92 +++++ roles/certs/tasks/main.yml | 303 +++++++++++++++ roles/certs_signed_install/tasks/main.yml | 61 +++ roles/cm_agents/tasks/36322.yml | 76 ++-- roles/cm_agents/tasks/main.yml | 21 +- roles/cm_agents/templates/config.ini.j2 | 229 ++++++++++++ roles/cm_agents_teardown/tasks/main.yml | 134 +++++++ roles/cm_agents_tls/tasks/36322.yml | 66 ++++ roles/cm_agents_tls/tasks/main.yml | 30 ++ roles/cm_agents_tls/templates/config.ini.j2 | 237 ++++++++++++ roles/cm_api/tasks/main.yml | 10 + roles/cm_repo/tasks/main.yml | 6 +- roles/cm_roles/tasks/main.yml | 186 ++++++++++ .../templates/externalUserMappings.j2 | 27 ++ .../templates/externalUserMappingsClu.j2 | 10 + .../templates/externalUserMappingsFull.j2 | 10 + .../templates/externalUserMappingsKey.j2 | 10 + .../templates/externalUserMappingsOp.j2 | 10 + .../templates/externalUserMappingsRO.j2 | 10 + .../templates/externalUserMappingsUser.j2 | 10 + roles/cm_server_teardown/tasks/main.yml | 48 +++ roles/db_connector/tasks/main.yml | 24 ++ roles/db_teardown/tasks/main.yml | 30 ++ roles/db_teardown_mysql_cdh/tasks/main.yml | 30 ++ roles/db_teardown_mysql_cm/tasks/main.yml | 6 + roles/db_teardown_oracle_cdh/files/drop.sql | 22 ++ roles/db_teardown_oracle_cdh/tasks/main.yml | 36 ++ roles/db_teardown_oracle_cm/files/drop.sql | 22 ++ roles/db_teardown_oracle_cm/tasks/main.yml | 15 + roles/dir_teardown/tasks/main.yml | 44 +++ roles/dn_dir_creation/tasks/main.yml | 37 ++ roles/dn_dir_teardown/tasks/main.yml | 4 + roles/haproxy/tasks/main.yml | 33 ++ roles/haproxy/templates/haproxy.j2 | 138 +++++++ roles/haproxy_teardown/tasks/main.yml | 21 ++ roles/java/tasks/main.yml | 84 +++-- roles/kafka_dir_teardown/tasks/main.yml | 6 + roles/kms_dir_teardown/tasks/main.yml | 4 + .../files/kms-default.json | 6 + roles/kms_encryption_zones/tasks/main.yml | 143 +++++++ .../templates/createZones.j2 | 32 ++ .../templates/kms-default.j2 | 6 + .../kms_encryption_zones/templates/kmsACLs.j2 | 7 + roles/kms_key_sync/tasks/main.yml | 126 +++++++ roles/kts/tasks/main.yml | 165 ++++++++ roles/kts/templates/kts.j2 | 115 ++++++ roles/kts_dir_teardown/tasks/main.yml | 4 + roles/kts_key_sync/tasks/main.yml | 351 ++++++++++++++++++ roles/kts_key_sync/templates/hdfs.j2 | 7 + roles/kts_key_sync/templates/kms.j2 | 33 ++ roles/kts_key_sync/templates/kmsRCG.j2 | 46 +++ roles/kts_key_sync/templates/kmshosts.j2 | 11 + roles/mariadb/tasks/databases.yml | 8 +- roles/mariadb/tasks/main.yml | 10 + .../tasks/mysql_secure_installation.yml | 6 +- roles/mariadb/templates/my.cnf.j2 | 6 +- roles/mn_dir_teardown/tasks/main.yml | 32 ++ roles/nn_dir_creation/tasks/main.yml | 52 +++ roles/pre_reqs/tasks/main.yml | 196 ++++++++++ roles/pre_reqs/templates/krb5.conf.j2 | 24 ++ roles/scm/tasks/api.yml | 2 +- roles/scm/tasks/cms.yml | 63 +++- roles/scm/tasks/license.yml | 15 +- roles/scm/tasks/main.yml | 49 ++- roles/scm/tasks/scm.yml | 40 +- roles/scm/templates/cms_base.j2 | 227 ++++++++++- roles/scm/templates/cms_service.j2 | 13 + roles/scm/templates/scm.j2 | 106 +++++- roles/yum_teardown_cm_agent/tasks/main.yml | 8 + roles/yum_teardown_cm_server/tasks/main.yml | 7 + site.yml | 119 +++++- teardown.yml | 92 +++++ teardown_restart.yml | 40 ++ 115 files changed, 6499 insertions(+), 371 deletions(-) create mode 100644 cm_roles_test.yml create mode 100644 group_vars/db_server_mysql.yml create mode 100644 group_vars/db_server_oracle.yml create mode 100644 group_vars/encryption_zones.yml create mode 100644 group_vars/kms_servers.yml create mode 100644 group_vars/kts_servers.yml create mode 100644 group_vars/ldap_enc.yml create mode 100644 group_vars/scm_server_enc.yml create mode 100644 group_vars/tls_enc.yml create mode 100644 inventory create mode 100644 roles/ca_server/tasks/main.yml create mode 100644 roles/ca_server/templates/intermediate.openssl.cnf.j2 create mode 100644 roles/ca_server/templates/root.openssl.cnf.j2 create mode 100644 roles/ca_server_signing/tasks/main.yml create mode 100644 roles/cdh/files/hue-banner.txt create mode 100644 roles/cdh/templates/cdsw.j2 create mode 100644 roles/cdh/templates/hbase.j2 create mode 100644 roles/cdh/templates/kafka.j2 create mode 100644 roles/cdh/templates/ks_indexer.j2 create mode 100644 roles/cdh/templates/solr.j2 create mode 100644 roles/cdh/templates/spark2.j2 create mode 100644 roles/cdh_teardown/tasks/main.yml create mode 100644 roles/certs/tasks/main.yml create mode 100644 roles/certs_signed_install/tasks/main.yml create mode 100644 roles/cm_agents/templates/config.ini.j2 create mode 100644 roles/cm_agents_teardown/tasks/main.yml create mode 100644 roles/cm_agents_tls/tasks/36322.yml create mode 100644 roles/cm_agents_tls/tasks/main.yml create mode 100644 roles/cm_agents_tls/templates/config.ini.j2 create mode 100644 roles/cm_api/tasks/main.yml create mode 100644 roles/cm_roles/tasks/main.yml create mode 100644 roles/cm_roles/templates/externalUserMappings.j2 create mode 100644 roles/cm_roles/templates/externalUserMappingsClu.j2 create mode 100644 roles/cm_roles/templates/externalUserMappingsFull.j2 create mode 100644 roles/cm_roles/templates/externalUserMappingsKey.j2 create mode 100644 roles/cm_roles/templates/externalUserMappingsOp.j2 create mode 100644 roles/cm_roles/templates/externalUserMappingsRO.j2 create mode 100644 roles/cm_roles/templates/externalUserMappingsUser.j2 create mode 100644 roles/cm_server_teardown/tasks/main.yml create mode 100644 roles/db_connector/tasks/main.yml create mode 100644 roles/db_teardown/tasks/main.yml create mode 100644 roles/db_teardown_mysql_cdh/tasks/main.yml create mode 100644 roles/db_teardown_mysql_cm/tasks/main.yml create mode 100644 roles/db_teardown_oracle_cdh/files/drop.sql create mode 100644 roles/db_teardown_oracle_cdh/tasks/main.yml create mode 100644 roles/db_teardown_oracle_cm/files/drop.sql create mode 100644 roles/db_teardown_oracle_cm/tasks/main.yml create mode 100644 roles/dir_teardown/tasks/main.yml create mode 100644 roles/dn_dir_creation/tasks/main.yml create mode 100644 roles/dn_dir_teardown/tasks/main.yml create mode 100644 roles/haproxy/tasks/main.yml create mode 100644 roles/haproxy/templates/haproxy.j2 create mode 100644 roles/haproxy_teardown/tasks/main.yml create mode 100644 roles/kafka_dir_teardown/tasks/main.yml create mode 100644 roles/kms_dir_teardown/tasks/main.yml create mode 100644 roles/kms_encryption_zones/files/kms-default.json create mode 100644 roles/kms_encryption_zones/tasks/main.yml create mode 100644 roles/kms_encryption_zones/templates/createZones.j2 create mode 100644 roles/kms_encryption_zones/templates/kms-default.j2 create mode 100644 roles/kms_encryption_zones/templates/kmsACLs.j2 create mode 100644 roles/kms_key_sync/tasks/main.yml create mode 100644 roles/kts/tasks/main.yml create mode 100644 roles/kts/templates/kts.j2 create mode 100644 roles/kts_dir_teardown/tasks/main.yml create mode 100644 roles/kts_key_sync/tasks/main.yml create mode 100644 roles/kts_key_sync/templates/hdfs.j2 create mode 100644 roles/kts_key_sync/templates/kms.j2 create mode 100644 roles/kts_key_sync/templates/kmsRCG.j2 create mode 100644 roles/kts_key_sync/templates/kmshosts.j2 create mode 100644 roles/mn_dir_teardown/tasks/main.yml create mode 100644 roles/nn_dir_creation/tasks/main.yml create mode 100644 roles/pre_reqs/tasks/main.yml create mode 100644 roles/pre_reqs/templates/krb5.conf.j2 create mode 100644 roles/scm/templates/cms_service.j2 create mode 100644 roles/yum_teardown_cm_agent/tasks/main.yml create mode 100644 roles/yum_teardown_cm_server/tasks/main.yml create mode 100644 teardown.yml create mode 100644 teardown_restart.yml diff --git a/.gitignore b/.gitignore index f5a0059..1ff6a72 100644 --- a/.gitignore +++ b/.gitignore @@ -3,5 +3,4 @@ *.iml .idea/ .DS_Store -site.retry -enable_kerberos.retry +*.retry diff --git a/README.md b/README.md index e205587..e9b5f8d 100644 --- a/README.md +++ b/README.md @@ -143,6 +143,7 @@ sudo ln -s /path/to/dynamic_inventory_cm hosts ``` **Set up SSH public key authentication for remote host(s)** + If you do not have ~/.ssh/id_rsa.pub and ~/.ssh/id_rsa files then you need to generate them with the ssh-keygen command before this: ``` ANSIBLE_HOST_KEY_CHECKING=False ansible all -m authorized_key -a key="{{ lookup('file', '~/.ssh/id_rsa.pub') }} user=$USER" -k diff --git a/action_plugins/scm_hosts.py b/action_plugins/scm_hosts.py index 21dc89a..b82f699 100755 --- a/action_plugins/scm_hosts.py +++ b/action_plugins/scm_hosts.py @@ -17,7 +17,7 @@ from ansible.plugins.action import ActionBase from cm_api.api_client import ApiException from cm_api.api_client import ApiResource - +import sys try: from __main__ import display except ImportError: @@ -41,15 +41,26 @@ def run(self, tmp=None, task_vars=None): # Get SCM host details from inventory try: scm_host = task_vars["groups"]["scm_server"][0] - scm_port = task_vars["hostvars"][scm_host]["scm_port"] - scm_user = task_vars["hostvars"][scm_host]["scm_default_user"] - scm_pass = task_vars["hostvars"][scm_host]["scm_default_pass"] + scm_port = task_vars["scm_port"] + scm_user = task_vars["scm_default_user"] + scm_pass = task_vars["scm_default_pass"] + scm_tls = task_vars["scm_web_tls"] + scm_port_tls = task_vars.get("scm_port_tls") + use_tls = task_vars.get("use_tls") + except KeyError as e: result['failed'] = True result['msg'] = e.message return result - api = self.get_api_handle(scm_host, scm_port, scm_user, scm_pass) + if use_tls is None or use_tls == False: + scm_port_in_use = scm_port + tls=False + else: + scm_port_in_use = scm_port_tls + tls=True + + api = self.get_api_handle(scm_host, scm_port_in_use, scm_user, scm_pass, tls) scm_host_list = api.get_all_hosts() display.vv("Retrieved %d host(s) from SCM" % len(scm_host_list)) diff --git a/cm_roles_test.yml b/cm_roles_test.yml new file mode 100644 index 0000000..e1e193b --- /dev/null +++ b/cm_roles_test.yml @@ -0,0 +1,8 @@ +--- +# Cloudera playbook + +- name: Install Cloudera Manager roles + hosts: scm_server + roles: + - cm_roles + tags: cm_roles_test diff --git a/group_vars/all b/group_vars/all index 261ae3b..d479093 100644 --- a/group_vars/all +++ b/group_vars/all @@ -2,9 +2,13 @@ ansible_become: true tmp_dir: /tmp -krb5_realm: AD.SEC.CLOUDERA.COM +agent_tls: true +krb5_realm: MIT.EXAMPLE.COM ad_domain: "{{ krb5_realm.lower() }}" -computer_ou: ou=Hosts,ou=morhidi,ou=HadoopClusters,ou=morhidi,dc=ad,dc=sec,dc=cloudera,dc=com -domain: vpc.cloudera.com -kdc: w2k8-1.ad.sec.cloudera.com +computer_ou: OU=computer_hosts,OU=hadoop_prd,DC=ad,DC=sec,DC=example,DC=com +domain: MIT.EXAMPLE.COM +kdc: mit.example.com admin_server: w2k8-1.ad.sec.cloudera.com +enc_types: rc4-hmac DES-CBC-MD5 DES-CBC-CRC +ad_account_prefix: prefix_ +kdc_account_creation_host_override: w2k8-1.ad.sec.cloudera.com diff --git a/group_vars/cdh_servers.yml b/group_vars/cdh_servers.yml index cfe2e4e..c6ec222 100644 --- a/group_vars/cdh_servers.yml +++ b/group_vars/cdh_servers.yml @@ -3,31 +3,66 @@ db_hostname: "{{ hostvars[groups['db_server'][0]]['inventory_hostname'] }}" scm_hostname: "{{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }}" -cdh_version: 5.8.3 -cluster_display_name: cluster_1 +cdh_version: 6.2.0 +cluster_display_name: Cluster1 + +cdh_tls: true +log_base: /var/log cdh_services: + - type: hdfs - dfs_data_dir_list: /dfs/dn - fs_checkpoint_dir_list: /dfs/snn - dfs_name_dir_list: /dfs/nn - dfs_journalnode_edits_dir: /dfs/jn + nameservice: nameservice1 + dfs_data_dir_list: /data/1/dfs/dn,/data/2/dfs/dn,/data/3/dfs/dn,/data/4/dfs/dn,/data/5/dfs/dn,/data/6/dfs/dn,/data/7/dfs/dn,/data/8/dfs/dn + fs_checkpoint_dir_list: /data/1/dfs/snn + dfs_name_dir_list: /data/1/dfs/nn + dfs_journalnode_edits_dir: /data/2/dfs/jn + hdfs_supergroup: group_np_hdfs_super + +# - type: cdsw - type: hive + - type: hbase + hbase_superuser: "@group_np_hdfs_super" + - type: hue + hue_timezone: Europe/London + leaflet_tile_layer: "http://osm.org/osm_tiles/{z}/{x}/{y}.png" + mapping_attribution: "Custom mapping" + secure_content_security_policy: "script-src 'self' 'unsafe-inline' 'unsafe-eval' *.google-analytics.com *.doubleclick.net *.mathjax.org data:;img-src 'self' *.google-analytics.com *.doubleclick.net *.gstatic.com data:;style-src 'self' 'unsafe-inline';connect-src 'self';child-src 'self' data:;object-src 'none'" - type: impala - scratch_dirs: /tmp/impala + impala_scratch_dirs: /data/1/impala/impalad,/data/2/impala/impalad,/data/3/impala/impalad,/data/4/impala/impalad,/data/5/impala/impalad,/data/6/impala/impalad,/data/7/impala/impalad,/data/8/impala/impalad + + - type: kafka + kafka_super_users: kafka + +# - type: keytrustee + + - type: ks_indexer - type: oozie + oozie_from_email_address: noreply@example.com + oozie_email_smtp_host: mail.exampe.com + oozie_https_port: 11444 - type: sentry + sentry_admin_group: hive,impala,hue,solr,kafka,group_sentry_admin + sentry_allow_connect: hive,impala,hue,hdfs,solr + sentry_user: sentry + + - type: solr - type: spark - + - type: yarn - yarn_nodemanager_local_dirs: /tmp/nm - yarn_nodemanager_log_dirs: /var/log/nm + yarn_nodemanager_local_dirs: /data/1/yarn/nm,/data/2/yarn/nm,/data/3/yarn/nm,/data/4/yarn/nm,/data/5/yarn/nm,/data/6/yarn/nm,/data/7/yarn/nm,/data/8/yarn/nm + yarn_nodemanager_log_dirs: /data/1/yarn/nm/log,/data/2/yarn/nm/log,/data/3/yarn/nm/log,/data/4/yarn/nm/log,/data/5/yarn/nm/log,/data/6/yarn/nm/log,/data/7/yarn/nm/log,/data/8/yarn/nm/log + CMJOBUSER: user1 + YARN_ADMIN_ACL: nobody group_yarn_admin,hue - type: zookeeper + zookeeper_data_log_dir: /data/3/zookeeper + zookeeper_edits_dir: /data/4/zookeeper + diff --git a/group_vars/db_server.yml b/group_vars/db_server.yml index b999b0e..5040a13 100644 --- a/group_vars/db_server.yml +++ b/group_vars/db_server.yml @@ -1,9 +1,9 @@ --- -mysql_datadir: /var/lib/mysql -mysql_socket: /var/lib/mysql/mysql.sock +mysql_datadir: /logs/mysql +mysql_socket: /logs/mysql/mysql.sock mysql_port: 3306 -mysql_log_bin: /var/lib/mysql/mysql_binary_log +mysql_log_bin: /logs/mysql/mysql_binary_log mysql_log: /var/log/mysqld.log mysql_pid_dir: /var/run/mysqld mysql_pid_file: "{{ mysql_pid_dir }}/mysqld.pid" @@ -15,43 +15,61 @@ databases: user: 'scm' pass: 'scm_password' type: 'mysql' + host: '' + port: '3306' amon: name: 'amon' user: 'amon' pass: 'amon_password' type: 'mysql' + host: '' + port: '3306' rman: name: 'rman' user: 'rman' pass: 'rman_password' type: 'mysql' + host: '' + port: '3306' nav: name: 'nav' user: 'nav' pass: 'nav_password' type: 'mysql' + host: '' + port: '3306' navms: name: 'navms' user: 'navms' pass: 'navms_password' type: 'mysql' + host: '' + port: '3306' metastore: name: 'metastore' user: 'hive' pass: 'hive_password' type: 'mysql' + host: '' + port: '3306' sentry: name: 'sentry' user: 'sentry' pass: 'sentry_password' type: 'mysql' + host: '' + port: '3306' hue: name: 'hue' user: 'hue' pass: 'hue_password' type: 'mysql' + host: '' + port: '3306' oozie: name: 'oozie' user: 'oozie' pass: 'oozie_password' - type: 'mysql' \ No newline at end of file + type: 'mysql' + host: '' + port: '3306' diff --git a/group_vars/db_server_mysql.yml b/group_vars/db_server_mysql.yml new file mode 100644 index 0000000..bf83de6 --- /dev/null +++ b/group_vars/db_server_mysql.yml @@ -0,0 +1,75 @@ +--- + +mysql_datadir: /logs/mysql +mysql_socket: /logs/mysql/mysql.sock +mysql_port: 3306 +mysql_log_bin: /logs/mysql/mysql_binary_log +mysql_log: /var/log/mysqld.log +mysql_pid_dir: /var/run/mysqld +mysql_pid_file: "{{ mysql_pid_dir }}/mysqld.pid" +mysql_root_password: changeme + +databases: + scm: + name: 'scm' + user: 'scm' + pass: 'scm_password' + type: 'mysql' + host: 'localhost' + port: '3306' + amon: + name: 'amon' + user: 'amon' + pass: 'amon_password' + type: 'mysql' + host: 'localhost' + port: '3306' + rman: + name: 'rman' + user: 'rman' + pass: 'rman_password' + type: 'mysql' + host: 'localhost' + port: '3306' + nav: + name: 'nav' + user: 'nav' + pass: 'nav_password' + type: 'mysql' + host: 'localhost' + port: '3306' + navms: + name: 'navms' + user: 'navms' + pass: 'navms_password' + type: 'mysql' + host: 'localhost' + port: '3306' + metastore: + name: 'metastore' + user: 'hive' + pass: 'hive_password' + type: 'mysql' + host: 'localhost' + port: '3306' + sentry: + name: 'sentry' + user: 'sentry' + pass: 'sentry_password' + type: 'mysql' + host: 'localhost' + port: '3306' + hue: + name: 'hue' + user: 'hue' + pass: 'hue_password' + type: 'mysql' + host: 'localhost' + port: '3306' + oozie: + name: 'oozie' + user: 'oozie' + pass: 'oozie_password' + type: 'mysql' + host: 'localhost' + port: '3306' diff --git a/group_vars/db_server_oracle.yml b/group_vars/db_server_oracle.yml new file mode 100644 index 0000000..7e5053d --- /dev/null +++ b/group_vars/db_server_oracle.yml @@ -0,0 +1,75 @@ +--- + +mysql_datadir: /logs/mysql +mysql_socket: /logs/mysql/mysql.sock +mysql_port: 1521 +mysql_log_bin: /logs/mysql/mysql_binary_log +mysql_log: /var/log/mysqld.log +mysql_pid_dir: /var/run/mysqld +mysql_pid_file: "{{ mysql_pid_dir }}/mysqld.pid" +mysql_root_password: changeme + +databases: + scm: + name: 'scm' + user: 'scm' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + amon: + name: 'amon' + user: 'amon' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + rman: + name: 'repman' + user: 'repman' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + nav: + name: 'nav' + user: 'nav' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + navms: + name: 'navms' + user: 'navms' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + metastore: + name: 'metastore' + user: 'metastore' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + sentry: + name: 'sentry' + user: 'sentry' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + hue: + name: 'hue' + user: 'hue' + pass: changeme + type: 'oracle' + host: '' + port: '1521' + oozie: + name: 'oozie' + user: 'oozie' + pass: changeme + type: 'oracle' + host: '' + port: '1521' diff --git a/group_vars/encryption_zones.yml b/group_vars/encryption_zones.yml new file mode 100644 index 0000000..5850966 --- /dev/null +++ b/group_vars/encryption_zones.yml @@ -0,0 +1,68 @@ +--- + +keyAdminGroup: group_hdfs_key + +encryption_keys: + - key: + keyname: solr-key + acl: "solr solr,group_hdfs_super" + - key: + keyname: hue-key + acl: "hue,hive,oozie hue,oozie,hive,,group_yarn_admin" + - key: + keyname: hbase-key + acl: "hbase hbase,hive,impala" + - key: + keyname: spark-key + acl: "*" + - key: + keyname: mapred-key + acl: "*" + - key: + keyname: hive-key + acl: "hive hive" + +encryption_zones: + + - zone: + key: solr-key + path: /solr + user: solr + group: solr + mode: 0755 + + - zone: + key: hue-key + path: /user/hue + user: hue + group: hue + mode: 775 + + - zone: + key: hbase-key + path: /hbase + user: hbase + group: hbase + mode: 700 + + - zone: + key: spark-key + path: /user/spark + user: spark + group: spark + mode: 1777 + + - zone: + key: mapred-key + path: /user/history + user: mapred + group: hadoop + mode: 777 + + - zone: + key: hive-key + path: /user/hive/warehouse + user: hive + group: hive + mode: 1775 + diff --git a/group_vars/kms_servers.yml b/group_vars/kms_servers.yml new file mode 100644 index 0000000..7ea2020 --- /dev/null +++ b/group_vars/kms_servers.yml @@ -0,0 +1,5 @@ +--- + +kms_conf_dir: /var/opt/cloudera/kms-keytrustee/keytrustee +kms_key_dir: /var/opt/cloudera/kms-keytrustee + diff --git a/group_vars/krb5_server.yml b/group_vars/krb5_server.yml index fd3d791..57fae71 100644 --- a/group_vars/krb5_server.yml +++ b/group_vars/krb5_server.yml @@ -1,10 +1,10 @@ --- # 'ad', 'mit', or 'none' to disable security -krb5_kdc_type: mit +krb5_kdc_type: 'ad' krb5_kdc_master_passwd: changeme -krb5_kdc_admin_user: "cloudera-scm/admin@{{ hostvars[groups['krb5_server'][0]]['default_realm'] }}" -krb5_kdc_admin_passwd: changeme +krb5_kdc_admin_user: "cloudera-scm@{{ hostvars[groups['krb5_server'][0]]['default_realm'] }}" +krb5_kdc_admin_passwd: "changeme" diff --git a/group_vars/kts_servers.yml b/group_vars/kts_servers.yml new file mode 100644 index 0000000..68143e7 --- /dev/null +++ b/group_vars/kts_servers.yml @@ -0,0 +1,12 @@ +--- + +kts_display_name: KeyTrusteeServerCluster +kts_org_name: OrgName1 + +kts_services: + + - type: kts + keytrustee_server_DB_ACTIVE_BASE_db_root: /var/opt/cloudera/keytrustee/db + keytrustee_server_DB_PASSIVE_BASE_db_root: /var/opt/cloudera/keytrustee/db + keytrustee_server_keytrustee_home: /var/opt/cloudera/keytrustee/.keytrustee + diff --git a/group_vars/ldap_enc.yml b/group_vars/ldap_enc.yml new file mode 100644 index 0000000..9f8ad77 --- /dev/null +++ b/group_vars/ldap_enc.yml @@ -0,0 +1,32 @@ +--- + +ldap_udom: + url: ldaps://mit.example.com + domain: mit.example.com + bind_dn: cloudera-scm@mit.example.com + bind_pass: password + base_dn: dc=mit,dc=example,dc=com + user_filter: (objectClass=user) + user_name_attr: sAMAccountName + group_filter: (objectClass=group) + group_name_attr: cn + group_member_attr: member + +ldap_rdom: + url: ldaps://mit.example.com + domain: mit.example.com + auth_type: ACTIVE_DIRECTORY + bind_dn: cloudera-scm@mit.example.com + bind_pass: password + base_dn: dc=mit,dc=example,dc=com + user_filter: (objectClass=user) + user_name_attr: sAMAccountName + group_filter: (objectClass=group) + group_name_attr: cn + group_member_attr: member + cm_clu_admin: GROUP_CDH_ADMIN + cm_full_admin: GROUP_CDH_ADMIN + cm_key_admin: GROUP_CDH_ADMIN + cm_operator: GROUP_CDH_ADMIN + cm_read_only: GROUP_CDH_ADMIN + cm_user_admin: GROUP_CDH_ADMIN diff --git a/group_vars/scm_server.yml b/group_vars/scm_server.yml index f3fa8cf..9da0420 100644 --- a/group_vars/scm_server.yml +++ b/group_vars/scm_server.yml @@ -1,14 +1,55 @@ --- -scm_version: 5.8.3 +scm_version: 6.3.0 scm_port: 7180 -scm_default_user: admin -scm_default_pass: admin +scm_port_tls: 7183 +scm_hostname: "{{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }}" +scm_dir: /opt/cloudera/cm +scm_web_tls: True +banner_text: "Ansible Cluster Build" +banner_colour: RED + +yum_repo_base: http:///RPMS/ scm_repositories: - - http://archive.cloudera.com/cdh5/parcels/5.8.3/ - - https://archive.cloudera.com/cdh5/parcels/{latest_supported}/ + - http:///PARCELS/CDH6.2/ + - http:///PARCELS/KTS/6.1.0/ + - http:///PARCELS/KMS/6.1.0/ + - http:///PARCELS/ANACONDA/4.4.1/ +# - http:///PARCELS/Oracle/134/ + +scm_csd: + - http:///CSD/CLOUDERA_DATA_SCIENCE_WORKBENCH-CDH5-1.4.2.jar +# - http:///CSD/SPARK2_ON_YARN-2.3.0.cloudera4.jar scm_products: - product: CDH - version: 5.8.3-1.cdh5.8.3.p0.2 + version: 6.2.0-1.cdh6.2.0.p0.967373 + + # - product: KAFKA + # version: 4.0.0-1.4.0.0.p0.1 + + - product: KEYTRUSTEE + version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 + +# - product: ORACLE_INSTANT_CLIENT +# version: 11.2-1.oracleinstantclient1.0.0.p0.134 + + - product: KEYTRUSTEE + version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 + +kts_products: + - product: KEYTRUSTEE_SERVER + version: 6.1.0-1.keytrustee6.1.0.p0.592761 + + - product: CDH + version: 6.2.0-1.cdh6.2.0.p0.967373 + +oom_heap_dump_dir: /logs/heapdumps +eventserver_index_dir: /logs/cloudera-scm-eventserver +hmon_firehose_storage_dir: /logs/cloudera-host-monitor +navms_data_dir: /logs/cloudera-scm-navigator +headlamp_scratch_dir: /logs/cloudera-scm-headlamp +smon_firehose_storage_dir: /logs/cloudera-service-monitor +nav_auditstream_filename_pattern: /var/log/cloudera-audit/audit_%d{yyyyMMdd}.evt +nav_auditstream_directory: /var/log/cloudera-audit diff --git a/group_vars/scm_server_enc.yml b/group_vars/scm_server_enc.yml new file mode 100644 index 0000000..3ace14c --- /dev/null +++ b/group_vars/scm_server_enc.yml @@ -0,0 +1,4 @@ +--- + +scm_default_user: admin +scm_default_pass: admin diff --git a/group_vars/tls_enc.yml b/group_vars/tls_enc.yml new file mode 100644 index 0000000..16e723e --- /dev/null +++ b/group_vars/tls_enc.yml @@ -0,0 +1,12 @@ +--- + +tls: + keystore_path: /opt/cloudera/security/jks/localhost.jks + keystore_password: changeme + key_password_file: /opt/cloudera/security/x509/key.pw + tls_cert: /opt/cloudera/security/x509/localhost.pem + private_key: /opt/cloudera/security/x509/localhost.key + cert_dir: /opt/cloudera/security/CAcerts/ + cert_chain: /opt/cloudera/security/CAcerts/root.pem + truststore_path: /usr/java/latest/jre/lib/security/jssecacerts + truststore_password: changeit diff --git a/hosts b/hosts index c88665a..4d707a2 100644 --- a/hosts +++ b/hosts @@ -1,40 +1,68 @@ # Note for AWS: 'Public DNS' name is too long for ansible_host, use 'Public IP' (https://github.com/ansible/ansible/issues/11536) [scm_server] - license_file=/path/to/cloudera_license.txt + license_file=/opt/cloudera_license.txt [db_server] [krb5_server] - default_realm=CLOUDERA.COM + default_realm=MIT.EXAMPLE.COM [utility_servers:children] scm_server db_server krb5_server -[gateway_servers] - host_template=HostTemplate-Gateway role_ref_names=HDFS-HTTPFS-1 +[gateway_servers:children] +gatewayen_servers +gatewaylb_servers + +[gatewayen_servers] + host_template=HostTemplate-GatewayEdge + +[gatewaylb_servers] +# host_template=HostTemplate-GatewayLB + host_template=HostTemplate-GatewayLB [master_servers] - host_template=HostTemplate-Master1 - host_template=HostTemplate-Master2 - host_template=HostTemplate-Master3 + host_template=HostTemplate-Master1 + host_template=HostTemplate-Master2 + host_template=HostTemplate-Master3 [worker_servers] + + [worker_servers:vars] host_template=HostTemplate-Workers +#host_template=HostTemplate-Kafka + +[kms_servers] +# +# [cdh_servers:children] utility_servers gateway_servers master_servers worker_servers +kms_servers + +[kts_servers] +# host_template=KeyTrusteeActive +# host_template=KeyTrusteePassive + +[haproxy] + [all:vars] -ansible_user=ec2-user \ No newline at end of file +# 'ad', 'mit', or 'none' to disable security +# This value must match that in group_vars/krb5_server.yml +krb5_kdc_type='ad' +hdfs_tde_enabled='False' +database_type='mysql' +full_teardown='False' diff --git a/inventory b/inventory new file mode 100644 index 0000000..3c3489d --- /dev/null +++ b/inventory @@ -0,0 +1 @@ +Subproject commit f22a38889598fee47e9c1cefe7b90b3577f73033 diff --git a/roles/ca_server/tasks/main.yml b/roles/ca_server/tasks/main.yml new file mode 100644 index 0000000..f5bbca8 --- /dev/null +++ b/roles/ca_server/tasks/main.yml @@ -0,0 +1,169 @@ +--- +- include_vars: ../../../group_vars/ca.yml + +- name: Install openssl + yum: + name: openssl + state: latest + +- name: Install PyOpenSSL + yum: + name: pyOpenSSL + state: latest + +- name: Prepare Root CA directories + file: + state: directory + path: "{{ ca_root_location }}" + mode: 0700 + owner: root + +- name: Prepare Root CA subdirs + file: + state: directory + path: "{{ ca_root_location }}/{{ item }}" + mode: 0700 + owner: root + with_items: + - "certs" + - "crl" + - "newcerts" + - "private" + +- name: Create index file + file: + state: touch + path: "{{ ca_root_location }}/index.txt" + mode: 0700 + +- name: Write serial + shell: "echo 1000 > {{ ca_root_location }}/serial" + args: + creates: "{{ ca_root_location }}/serial" + +- name: Install root ca openssl.cnf + template: + src: root.openssl.cnf.j2 + dest: "{{ ca_root_location }}/openssl.cnf" + owner: root + mode: '0644' + +- name: Genereate root private key + openssl_privatekey: + path: "{{ ca_root_location }}/private/ca.key.pem" + size: 4096 + cipher: aes256 + passphrase: "{{ ca_root_key_password }}" + mode: 0400 + +- name: "set CA_SUBJECT var" + set_fact: + ca_subject: '/C={{ ca_countryname_default }}/O={{ ca_org_name }}/OU={{ ca_ou }}/CN={{ ca_root_cn }}' + +- name: Generate root certificate + shell: "{{ openssl_path }} req -config {{ ca_root_location }}/openssl.cnf -new -key {{ ca_root_location }}/private/ca.key.pem -x509 -days 7300 -sha256 -extensions v3_ca -out {{ ca_root_location }}/certs/{{ root_ca_cert_name }} -passin pass:{{ ca_root_key_password }} -subj \"{{ ca_subject }}\"" + args: + creates: "{{ ca_root_location }}/certs/{{ root_ca_cert_name }}" + +- name: Intermediate CA directories + file: + state: directory + path: "{{ ca_intermediate_location }}" + mode: 0700 + owner: root + +- name: Prepare Intermediate CA subdirs + file: + state: directory + path: "{{ ca_intermediate_location }}/{{ item }}" + mode: 0700 + owner: root + with_items: + - "certs" + - "crl" + - "csr" + - "newcerts" + - "private" + +- name: Create index file + file: + state: touch + path: "{{ ca_intermediate_location }}/index.txt" + mode: 0700 + +- name: Write serial + shell: "echo 1000 > {{ ca_intermediate_location }}/serial" + args: + creates: "{{ ca_intermediate_location }}/serial" + +- name: Install intermediate ca openssl.cnf + template: + src: intermediate.openssl.cnf.j2 + dest: "{{ ca_intermediate_location }}/openssl.cnf" + owner: root + mode: '0644' + +- name: Genereate intermediate private key + openssl_privatekey: + path: "{{ ca_intermediate_location }}/private/intermediate.key.pem" + size: 4096 + cipher: aes256 + passphrase: "{{ ca_intermediate_key_password }}" + mode: 0400 + +- name: "set INT_CA_SUBJECT var" + set_fact: + intermediate_ca_subject: '/C={{ ca_countryname_default }}/O={{ ca_org_name }}/OU={{ ca_ou }}/CN={{ ca_intermediate_cn }}' + +- name: Generate intermediate CSR + shell: "{{ openssl_path }} req -config {{ ca_intermediate_location }}/openssl.cnf -new -sha256 -key {{ ca_intermediate_location }}/private/intermediate.key.pem -out {{ ca_intermediate_location }}/csr/intermediate.csr.pem -passin pass:{{ ca_intermediate_key_password }} -subj \"{{ intermediate_ca_subject }}\"" + args: + creates: "{{ ca_intermediate_location }}/csr/intermediate.csr.pem" + +- name: Sign intermediate certificate + shell: "{{ openssl_path }} ca -batch -config {{ ca_root_location }}/openssl.cnf -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in {{ ca_intermediate_location }}/csr/intermediate.csr.pem -out {{ ca_intermediate_location }}/certs/{{ intermediate_ca_cert_name }} -passin pass:{{ ca_root_key_password }}" + args: + creates: "{{ ca_intermediate_location }}/certs/{{ intermediate_ca_cert_name }}" + +- name: Generate certificate chain file + shell: "cat {{ ca_intermediate_location }}/certs/{{ intermediate_ca_cert_name }} {{ ca_root_location }}/certs/{{ root_ca_cert_name }} > {{ ca_intermediate_location }}/certs/{{ chain_cert_name }}" + args: + creates: "{{ ca_intermediate_location }}/certs/{{ chain_cert_name }}" + +- name: Chmod chain file + file: + state: file + mode: 444 + path: "{{ ca_intermediate_location }}/certs/{{ chain_cert_name }}" + +- name: Prepare directory for signed certs + local_action: + module: file + state: directory + mode: 0777 + owner: "{{ ansible_user_id }}" + path: "{{ signed_certificates_local_location }}" + +- name: Fetch ca files + fetch: + flat: yes + src: "{{ item }}" + dest: "{{ signed_certificates_local_location }}/" + with_items: + - "{{ ca_intermediate_location }}/certs/{{ chain_cert_name }}" + - "{{ ca_intermediate_location }}/certs/{{ intermediate_ca_cert_name }}" + - "{{ ca_root_location }}/certs/{{ root_ca_cert_name }}" + +- name: Identify signed certificates from CA + find: + paths: "{{ ca_intermediate_location }}/certs/" + patterns: ".pem$" + use_regex: True + register: file_2_fetch + +- name: Fetch signed signed_certificates + fetch: + flat: yes + src: "{{ item.path }}" + dest: "{{ signed_certificates_local_location }}/" + with_items: "{{ file_2_fetch.files }}" \ No newline at end of file diff --git a/roles/ca_server/templates/intermediate.openssl.cnf.j2 b/roles/ca_server/templates/intermediate.openssl.cnf.j2 new file mode 100644 index 0000000..7da486d --- /dev/null +++ b/roles/ca_server/templates/intermediate.openssl.cnf.j2 @@ -0,0 +1,137 @@ +# OpenSSL intermediate CA configuration file. +# Copy to `/root/ca/intermediate/openssl.cnf`. + +[ ca ] +# `man ca` +default_ca = CA_default + +[ CA_default ] +# Directory and file locations. +dir = {{ ca_root_location }}/intermediate +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +database = $dir/index.txt +serial = $dir/serial +RANDFILE = $dir/private/.rand + +# The root key and root certificate. +private_key = $dir/private/intermediate.key.pem +certificate = $dir/certs/intermediate.cert.pem + +# For certificate revocation lists. +crlnumber = $dir/crlnumber +crl = $dir/crl/intermediate.crl.pem +crl_extensions = crl_ext +default_crl_days = 30 + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +name_opt = ca_default +cert_opt = ca_default +default_days = 375 +preserve = no +policy = policy_loose + +[ policy_strict ] +# The root CA should only sign intermediate certificates that match. +# See the POLICY FORMAT section of `man ca`. +countryName = match +stateOrProvinceName = match +organizationName = match +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ policy_loose ] +# Allow the intermediate CA to sign a more diverse range of certificates. +# See the POLICY FORMAT section of the `ca` man page. +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req ] +# Options for the `req` tool (`man req`). +default_bits = 2048 +distinguished_name = req_distinguished_name +string_mask = utf8only + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +# Extension to add when the -x509 option is used. +x509_extensions = v3_ca + +[ req_distinguished_name ] +# See . +countryName = Country Name (2 letter code) +stateOrProvinceName = State or Province Name +localityName = Locality Name +0.organizationName = Organization Name +organizationalUnitName = Organizational Unit Name +commonName = Common Name +emailAddress = Email Address + +# Optionally, specify some defaults. +countryName_default = {{ ca_countryname_default }} +stateOrProvinceName_default = {{ ca_state_or_province }} +localityName_default = +0.organizationName_default = {{ ca_org_name }} +organizationalUnitName_default = {{ ca_ou }} +emailAddress_default = + +[ v3_ca ] +# Extensions for a typical CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ v3_intermediate_ca ] +# Extensions for a typical intermediate CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true, pathlen:0 +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ usr_cert ] +# Extensions for client certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = client, email +nsComment = "OpenSSL Generated Client Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, emailProtection + +[ server_cert ] +# Extensions for server certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = server +nsComment = "OpenSSL Generated Server Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth + +[ crl_ext ] +# Extension for CRLs (`man x509v3_config`). +authorityKeyIdentifier=keyid:always + +[ ocsp ] +# Extension for OCSP signing certificates (`man ocsp`). +basicConstraints = CA:FALSE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +keyUsage = critical, digitalSignature +extendedKeyUsage = critical, OCSPSigning + +[ cloudera_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth, clientAuth \ No newline at end of file diff --git a/roles/ca_server/templates/root.openssl.cnf.j2 b/roles/ca_server/templates/root.openssl.cnf.j2 new file mode 100644 index 0000000..cf396fa --- /dev/null +++ b/roles/ca_server/templates/root.openssl.cnf.j2 @@ -0,0 +1,132 @@ +# OpenSSL root CA configuration file. +# Copy to `/root/ca/openssl.cnf`. + +[ ca ] +# `man ca` +default_ca = CA_default + +[ CA_default ] +# Directory and file locations. +dir = {{ ca_root_location }} +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +database = $dir/index.txt +serial = $dir/serial +RANDFILE = $dir/private/.rand + +# The root key and root certificate. +private_key = $dir/private/ca.key.pem +certificate = $dir/certs/ca.cert.pem + +# For certificate revocation lists. +crlnumber = $dir/crlnumber +crl = $dir/crl/ca.crl.pem +crl_extensions = crl_ext +default_crl_days = 30 + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +name_opt = ca_default +cert_opt = ca_default +default_days = 375 +preserve = no +policy = policy_strict + +[ policy_strict ] +# The root CA should only sign intermediate certificates that match. +# See the POLICY FORMAT section of `man ca`. +countryName = match +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ policy_loose ] +# Allow the intermediate CA to sign a more diverse range of certificates. +# See the POLICY FORMAT section of the `ca` man page. +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req ] +# Options for the `req` tool (`man req`). +default_bits = 2048 +distinguished_name = req_distinguished_name +string_mask = utf8only + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +# Extension to add when the -x509 option is used. +x509_extensions = v3_ca + +[ req_distinguished_name ] +# See . +countryName = Country Name (2 letter code) +stateOrProvinceName = State or Province Name +localityName = Locality Name +0.organizationName = Organization Name +organizationalUnitName = Organizational Unit Name +commonName = Common Name +emailAddress = Email Address + +# Optionally, specify some defaults. +countryName_default = {{ ca_countryname_default }} +stateOrProvinceName_default = {{ ca_state_or_province }} +localityName_default = +0.organizationName_default = {{ ca_org_name }} +organizationalUnitName_default = {{ ca_ou }} +emailAddress_default = + +[ v3_ca ] +# Extensions for a typical CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ v3_intermediate_ca ] +# Extensions for a typical intermediate CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true, pathlen:0 +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ usr_cert ] +# Extensions for client certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = client, email +nsComment = "OpenSSL Generated Client Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, emailProtection + +[ server_cert ] +# Extensions for server certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = server +nsComment = "OpenSSL Generated Server Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth + +[ crl_ext ] +# Extension for CRLs (`man x509v3_config`). +authorityKeyIdentifier=keyid:always + +[ ocsp ] +# Extension for OCSP signing certificates (`man ocsp`). +basicConstraints = CA:FALSE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +keyUsage = critical, digitalSignature +extendedKeyUsage = critical, OCSPSigning diff --git a/roles/ca_server_signing/tasks/main.yml b/roles/ca_server_signing/tasks/main.yml new file mode 100644 index 0000000..9db43f8 --- /dev/null +++ b/roles/ca_server_signing/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- include_vars: ../../../group_vars/ca.yml + +- name: Identify CSRs for signing + local_action: + module: find + paths: "{{ csr_certificates_local_location }}" + patterns: '*.csr' + register: csrs + +- name: Fetch CSRs + copy: + src: "{{ item.path }}" + dest: "{{ ca_intermediate_location }}/csrs/" + with_items: + - "{{ csrs.files }}" + register: csrs_remote + +- name: Sign certificate + shell: "{{ openssl_path }} ca -batch -config {{ ca_intermediate_location }}/openssl.cnf -extensions cloudera_req -days 730 -notext -md sha256 -in {{ item.dest }} -out {{ ca_intermediate_location }}/certs/$(basename /tmp/ca/intermediate/csrs/{{ item.dest }} | sed 's/csr/pem/') -passin pass:{{ ca_root_key_password }}" + with_items: + - "{{ csrs_remote.results }}" + ignore_errors: true + +- name: Identify signed certificates from CA + find: + paths: "{{ ca_intermediate_location }}/certs/" + patterns: "*.pem" + register: file_2_fetch + +- name: Fetch signed signed_certificates + fetch: + flat: yes + src: "{{ item.path }}" + dest: "{{ signed_certificates_local_location }}/" + with_items: "{{ file_2_fetch.files }}" + +- name: Remove CSRs from temp directory + local_action: + module: file + state: absent + path: "{{ csr_certificates_local_location }}" diff --git a/roles/cdh/files/hue-banner.txt b/roles/cdh/files/hue-banner.txt new file mode 100644 index 0000000..bd46953 --- /dev/null +++ b/roles/cdh/files/hue-banner.txt @@ -0,0 +1 @@ +Ansible Hue Build diff --git a/roles/cdh/tasks/main.yml b/roles/cdh/tasks/main.yml index 840abf9..00ff17c 100644 --- a/roles/cdh/tasks/main.yml +++ b/roles/cdh/tasks/main.yml @@ -1,11 +1,81 @@ --- -- include_vars: ../../../group_vars/cdh_servers.yml -- include_vars: ../../../group_vars/scm_server.yml -- include_vars: ../../../group_vars/db_server.yml +- include_vars: "{{ inventory_dir }}/group_vars/all" +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/krb5_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ldap_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/kms_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/kts_servers.yml" + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +- name: Start CMS cluster + uri: + url: "{{ cm_api_url }}/cm/service/commands/start" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: template_resp + failed_when: "'startTime' not in template_resp.content" + +- debug: var=template_resp + +- set_fact: command_id="{{ template_resp.json.id }}" + +# https://cloudera.github.io/cm_api/apidocs/v13/path__commands.html +- name: Wait for cluster to start + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_start_result + until: cluster_start_result.json.active == false + retries: 5 + delay: 30 + +# Install Cloudera Manager Python API +# - include: api.yml + +# Retrieve auto-generated host IDs from SCM +#- name: Get SCM hostIds for inventory hosts +# become: true +# action: scm_hosts +# register: scm_hosts_result + +#- set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" +#- debug: var=scm_host_ids + +#- set_fact: scm_host_names="{{ scm_hosts_result.host_names }}" +#- debug: var=scm_host_names + # Check whether cluster already exists # https://cloudera.github.io/cm_api/apidocs/v13/path__clusters.html + - name: Check whether cluster exists uri: url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}" @@ -20,13 +90,16 @@ - set_fact: cluster_exists={{ 'True' if clusters_resp.status == 200 else 'False' }} - debug: msg="Cluster '{{ cluster_display_name }}' exists - {{ cluster_exists }}" + # https://www.cloudera.com/documentation/enterprise/latest/topics/install_cluster_template.html - name: Prepare cluster template template: src: "base.j2" dest: "{{ tmp_dir }}/cluster.json" + mode: 0777 when: cluster_exists == False - delegate_to: localhost + +#- debug: var= "{{ lookup('file', ''+ tmp_dir + '/cluster.json') }}" # https://cloudera.github.io/cm_api/apidocs/v13/path__cm_importClusterTemplate.html - name: Import cluster template @@ -42,7 +115,6 @@ return_content: yes register: template_resp when: cluster_exists == False - delegate_to: localhost - debug: var=template_resp when: cluster_exists == False @@ -56,7 +128,36 @@ - debug: msg="Login to Cloudera Manager to monitor import progress - http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}/cmf/commands/commands" when: cluster_exists == False +- name: Save template to home dir + shell: cp "{{ tmp_dir }}"/cluster.json /tmp/cluster.json.backup + - file: path: "{{ tmp_dir }}/cluster.json" state: absent - delegate_to: localhost + +- name: Wait for first run wizard to complete + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: frw_result + until: frw_result.json.active == false + retries: 25 + delay: 60 + +- debug: msg="Cluster Kerberos Deployment Type - {{ krb5_kdc_type }}" + +- name: Restart CMS (when deploying a Kerberised Cluster) + uri: + url: "{{ cm_api_url }}/cm/service/commands/restart" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + when: krb5_kdc_type != 'none' diff --git a/roles/cdh/templates/cdsw.j2 b/roles/cdh/templates/cdsw.j2 new file mode 100644 index 0000000..c61268a --- /dev/null +++ b/roles/cdh/templates/cdsw.j2 @@ -0,0 +1,67 @@ +{ + "refName" : "CDSW-1", + "serviceType" : "CDSW", + "serviceConfigs" : [ { + "name" : "hive_service", + "ref" : "HIVE-1" + }, { + "name" : "cdsw.domain.config", + "variable" : "CDSW_DOMAIN" + }, { + "name" : "sentry_service", + "ref" : "SENTRY-1" + }, { + "name" : "yarn_service", + "ref" : "YARN-1" + }, { + "name" : "solr_service", + "ref" : "SOLR-1" + }, { + "name" : "spark_on_yarn_service", + "ref" : "SPARK_ON_YARN-1" + }, { + "name" : "hbase_service", + "ref" : "HBASE-1" + }, { + "name" : "hdfs_service", + "ref" : "HDFS-1" + }, { + "name" : "cdsw.master.ip.config", + "variable" : "CDSW_MASTER_IP" + } ], + "roleConfigGroups" : [ { + "refName" : "CDSW-1-CDSW_APPLICATION-BASE", + "roleType" : "CDSW_APPLICATION", + "configs" : [ + {% if (cdh_tls) %} + { + "name" : "cdsw.enable.tls.config", + "value" : "true" + }, { + "name" : "cdsw.tls.cert.config", + "value" : "/opt/cloudera/security/x509/localhost.pem" + }, { + "name" : "cdsw.tls.key.config", + "value" : "/opt/cloudera/security/x509/localhost.key.unenc" + } + {% endif %} + ], + "base" : true + }, { + "refName" : "CDSW-1-CDSW_MASTER-BASE", + "roleType" : "CDSW_MASTER", + "base" : true + }, { + "refName" : "CDSW-1-CDSW_WORKER-BASE", + "roleType" : "CDSW_WORKER", + "base" : true + }, { + "refName" : "CDSW-1-CDSW_DOCKER-BASE", + "roleType" : "CDSW_DOCKER", + "configs" : [ { + "name" : "cdsw.docker.devices.config", + "value" : "/dev/sdd" + } ], + "base" : true + } ] + } diff --git a/roles/cdh/templates/hbase.j2 b/roles/cdh/templates/hbase.j2 new file mode 100644 index 0000000..fa77f01 --- /dev/null +++ b/roles/cdh/templates/hbase.j2 @@ -0,0 +1,162 @@ +{ + "refName" : "HBASE-1", + "serviceType" : "HBASE", + "serviceConfigs" : [ { + "name" : "zookeeper_service", + "ref" : "ZOOKEEPER-1" + }, { + "name" : "hbase_superuser", + "value" : "hbase" + }, { + "name" : "hbase_enable_replication", + "value" : "true" + }, { + "name" : "hbase_thriftserver_support_proxyuser", + "value" : "true" + }, { + "name" : "hdfs_service", + "ref" : "HDFS-1" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/hbase/audit" + }, { + "name" : "hbase_thriftserver_http", + "value" : "true" + }, { + "name" : "hbase_enable_indexing", + "value" : "true" + } , { + "name" : "hbase_superuser", + "variable" : "HBASE_SUPERUSER" + }, { + "name" : "hbase_enable_replication", + "value" : "true" + }, { + "name" : "hbase_thriftserver_http", + "value" : "true" + } + {% if (cdh_tls) %} + , { + "name" : "ssl_server_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "hbase_hadoop_ssl_enabled", + "value" : "true" + } + {% endif %} + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + , { + "name" : "hbase_thriftserver_support_proxyuser", + "value" : "true" + },{ + "name" : "hbase_restserver_security_authentication", + "value" : "kerberos" + }, { + "name" : "hbase_thriftserver_security_authentication", + "value" : "auth-conf" + }, { + "name" : "hbase_rpc_protection", + "value" : "privacy" + }, { + "name" : "hbase_security_authorization", + "value" : "true" + }, { + "name" : "hbase_security_authentication", + "value" : "kerberos" + }, { + "name" : "hbase_service_config_safety_valve", + "value" : "hbase.security.exec.permission.checkstrueWithout this option, all users will continue to have acess to execute endpoint coprocessors. This options is not enabled when you enable HBase Secure Authorization for backwards compatability." + } + + {% endif %} + ], + "roleConfigGroups" : [ { + "refName" : "HBASE-1-REGIONSERVER-BASE", + "roleType" : "REGIONSERVER", + "configs" : [ { + "name" : "hbase_regionserver_log_dir", + "value" : "{{ log_base }}/hbase" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "hbase_regionserver_java_heapsize", + "value" : "2631925760" + } ], + "base" : true + }, { + "refName" : "HBASE-1-HBASERESTSERVER-BASE", + "roleType" : "HBASERESTSERVER", + "configs" : [ { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "hbase_restserver_log_dir", + "value" : "{{ log_base }}/hbase" + } + {% if (cdh_tls) %} + , { + "name" : "hbase_restserver_keystore_file", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "hbase_restserver_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "hbase_restserver_ssl_enable", + "value" : "true" + }, { + "name" : "hbase_restserver_keystore_password", + "value" : "{{ tls.keystore_password }}" + } + {% endif %} + ], + "base" : true + }, { + "refName" : "HBASE-1-MASTER-BASE", + "roleType" : "MASTER", + "configs" : [ { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "hbase_master_log_dir", + "value" : "{{ log_base }}/hbase" + } ], + "base" : true + }, { + "refName" : "HBASE-1-HBASETHRIFTSERVER-BASE", + "roleType" : "HBASETHRIFTSERVER", + "configs" : [ { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "hbase_thriftserver_log_dir", + "value" : "{{ log_base }}/hbase" + } + {% if (cdh_tls) %} + , { + "name" : "hbase_thriftserver_http_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "hbase_thriftserver_http_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "hbase_thriftserver_http_keystore_file", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "hbase_thriftserver_http_use_ssl", + "value" : "true" + } + + {% endif %} + ], + "base" : true + } ], + "roles" : [ + ] + } diff --git a/roles/cdh/templates/hdfs.j2 b/roles/cdh/templates/hdfs.j2 index d8b750b..6dde90a 100644 --- a/roles/cdh/templates/hdfs.j2 +++ b/roles/cdh/templates/hdfs.j2 @@ -12,25 +12,92 @@ "name": "hadoop_security_authentication", {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} "value": "kerberos" + }, { + "name" : "trusted_realms", + "value" : "{{ trusted_realm|upper }},{{ default_realm|upper }}" + }, { + {% if cdh_tls == 'true' %} + "name" : "dfs_data_transfer_protection", + "value" : "privacy" + }, { + {% endif %} + "name" : "hadoop_rpc_protection", + "value" : "privacy" + }, { + "name" : "dfs_encrypt_data_transfer", + "value" : "true" + }, { + "name": "hadoop_secure_web_ui", + "value": "true" + } {% else %} "value": "simple" + } {% endif %} - }, { + , { "name": "dfs_ha_fencing_methods", "value": "shell(true)" + }, { + "name" : "hdfs_sentry_sync_path_prefixes", + "variable" : "SENTRY_SYNC_PATH_PREFIXES" }, { - "name": "hadoop_secure_web_ui", - "value": "true" + "name" : "dfs_umaskmode", + "value" : "022" + }, { + "name" : "dfs_permissions_supergroup", + "variable" : "HDFS_SUPERGROUP" + }, { + "name" : "dfs_namenode_acls_enabled", + "value" : "true" + }, { + "name" : "hdfs_sentry_sync_enable", + "value" : "true" + }, { + "name" : "dfs_block_local_path_access_user", + "value" : "impala" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/hadoop-hdfs/audit" + }, { + "name" : "dfs_encrypt_data_transfer_algorithm", + "value" : "AES/CTR/NoPadding" }, { "name": "zookeeper_service", "ref": "ZOOKEEPER-1" } + {% if (cdh_tls) %} + , { + "name" : "hdfs_hadoop_ssl_enabled", + "value" : "true" + }, { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_client_truststore_location", + "value" : "{{ tls.truststore_path }}" + }, { + "name" : "ssl_client_truststore_password", + "value" : "{{ tls.truststore_password }}" + }, { + "name" : "ssl_server_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + } + {% endif %} ], "roleConfigGroups": [ { "refName": "HDFS-1-FAILOVERCONTROLLER-BASE", "roleType": "FAILOVERCONTROLLER", - "configs": [], + "configs": [ { + "name" : "failover_controller_log_dir", + "value" : "{{ log_base }}/hadoop-hdfs" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + } ], "base": true }, { "refName": "HDFS-1-DATANODE-BASE", @@ -40,7 +107,15 @@ "name": "dfs_data_dir_list", "variable": "DFS_DATA_DIR_LIST" } - {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + {% if ((krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit')) and (cdh_tls == 'true') %} + , { + "name": "dfs_datanode_port", + "value": "10040" + }, { + "name": "dfs_datanode_http_port", + "value": "10060" + } + {% else %} , { "name": "dfs_datanode_port", "value": "1004" @@ -48,7 +123,26 @@ "name": "dfs_datanode_http_port", "value": "1006" } - {% endif %} + {% endif %} + , { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "dfs_datanode_max_locked_memory", + "value" : "2610954240" + }, { + "name" : "datanode_log_dir", + "value" : "{{ log_base }}/hadoop-hdfs" + }, { + "name" : "dfs_datanode_data_dir_perm", + "value" : "700" + }, { + "name" : "dfs_datanode_failed_volumes_tolerated", + "value" : "3" + }, { + "name" : "datanode_java_heapsize", + "value" : "2147483648" + } ], "base": true }, { @@ -67,16 +161,37 @@ "configs": [ { "name": "dfs_federation_namenode_nameservice", - "value": "nameservice1" + "variable": "NAMESERVICE" }, { "name": "dfs_namenode_quorum_journal_name", - "value": "nameservice1" + "variable": "NAMESERVICE" }, { "name": "autofailover_enabled", "value": "true" }, { "name": "dfs_name_dir_list", "variable": "DFS_NAME_DIR_LIST" + }, { + "name" : "dfs_namenode_servicerpc_address", + "value" : "8022" + }, { + "name" : "dfs_namenode_service_handler_count", + "value" : "63" + }, { + "name" : "dfs_namenode_handler_count", + "value" : "63" + }, { + "name" : "namenode_java_heapsize", + "value" : "21474836480" + }, { + "name" : "fs_trash_interval", + "value" : "5760" + }, { + "name" : "namenode_log_dir", + "value" : "{{ log_base }}/hadoop-hdfs" + }, { + "name" : "dfs_safemode_min_datanodes", + "value" : "0" } ], "base": true @@ -87,13 +202,41 @@ { "name": "dfs_journalnode_edits_dir", "variable": "DFS_JOURNALNODE_EDITS_DIR" + }, { + "name" : "journalnode_log_dir", + "value" : "{{ log_base }}/hadoop-hdfs" + }, { + "name" : "journalNode_java_heapsize", + "value" : "268435456" } ], "base": true }, { "refName": "HDFS-1-HTTPFS-BASE", "roleType": "HTTPFS", - "configs": [], + "configs": [{ + "name" : "httpfs_log_dir", + "value" : "{{ log_base }}/hadoop-httpfs" + } + {% if (cdh_tls) %} + , { + "name" : "httpfs_use_ssl", + "value" : "true" + }, { + "name" : "httpfs_https_keystore_file", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "httpfs_https_truststore_file", + "value" : "{{ tls.truststore_path }}" + }, { + "name" : "httpfs_https_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "httpfs_https_truststore_password", + "value" : "{{ tls.truststore_password }}" + } + {% endif %} + ], "base": true }, { "refName": "HDFS-1-NFSGATEWAY-BASE", @@ -103,7 +246,10 @@ }, { "refName": "HDFS-1-BALANCER-BASE", "roleType": "BALANCER", - "configs": [], + "configs": [{ + "name" : "balancer_log_dir", + "value" : "{{ log_base }}/hadoop-hdfs" + } ], "base": true }, { "refName": "HDFS-1-GATEWAY-BASE", @@ -116,11 +262,5 @@ ], "base": true } - ], - "roles": [ - { - "refName": "HDFS-HTTPFS-1", - "roleType": "HTTPFS" - } ] } diff --git a/roles/cdh/templates/hive.j2 b/roles/cdh/templates/hive.j2 index dd336cf..f364309 100644 --- a/roles/cdh/templates/hive.j2 +++ b/roles/cdh/templates/hive.j2 @@ -3,9 +3,6 @@ "serviceType": "HIVE", "serviceConfigs": [ { - "name": "hive_metastore_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" - }, { "name": "hive_metastore_database_type", "value": "{{ databases.metastore.type }}" }, { @@ -18,6 +15,37 @@ "name": "hive_metastore_database_password", "value": "{{ databases.metastore.pass }}" }, { + "name": "hive_metastore_database_host", + "value": "{{ databases.metastore.host }}" + }, { + "name": "hive_metastore_database_port", + "value": "{{ databases.metastore.port }}" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/hive/audit" + }, { + "name" : "lineage_event_log_dir", + "value" : "{{ log_base }}/hive/lineage" + } + {% if (cdh_tls) %} + , { + "name" : "hiveserver2_enable_ssl", + "value" : "true" + }, { + "name" : "hiveserver2_keystore_path", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "hiveserver2_truststore_file", + "value" : "{{ tls.truststore_path }}" + }, { + "name" : "hiveserver2_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "hiveserver2_truststore_password", + "value" : "{{ tls.truststore_password }}" + } + {% endif %} + , { "name": "spark_on_yarn_service", "ref": "SPARK_ON_YARN-1" }, { @@ -29,6 +57,10 @@ }, { "name": "sentry_service", "ref": "SENTRY-1" + }, { + "name": "hbase_service", + "ref": "HBASE-1" + } ], "roleConfigGroups": [ @@ -37,31 +69,73 @@ "roleType": "GATEWAY", "configs": [], "base": true - }, { - "refName": "HIVE-1-WEBHCAT-BASE", - "roleType": "WEBHCAT", - "configs": [ - { - "name": "hive_webhcat_address_port", - "value": "7272" - } - ], - "base": true }, { "refName": "HIVE-1-HIVEMETASTORE-BASE", "roleType": "HIVEMETASTORE", - "configs": [], + "configs": [{ + "name" : "metastore_canary_health_enabled", + "value" : "false" + }, { + "name" : "hive_log_dir", + "value" : "{{ log_base }}/hive" + }, { + "name" : "hive_metastore_server_max_message_size", + "value" : "859832320" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "hive_metastore_delegation_token_store", + "value" : "org.apache.hadoop.hive.thrift.DBTokenStore" + }, { + "name" : "hive_enable_db_notification", + "value" : "true" + } ], "base": true }, { "refName": "HIVE-1-HIVESERVER2-BASE", "roleType": "HIVESERVER2", - "configs": [ - { - "name": "hiveserver2_enable_impersonation", - "value": "false" - } - ], + "configs": [ { + "name" : "hiveserver2_spark_yarn_executor_memory_overhead", + "value" : "107" + }, { + "name" : "hiveserver2_webui_port", + "value" : "0" + }, { + "name" : "hive_server2_logging_operation_log_location", + "value" : "{{ log_base }}/hive/operation_logs" + }, { + "name" : "hive_log_dir", + "value" : "{{ log_base }}/hive" + } , { + "name" : "hiveserver2_load_balancer", + "value" : "{{ hostvars[groups['haproxy'][0]]['inventory_hostname'] }}:10000" + }, { + "name" : "hiveserver2_spark_driver_memory", + "value" : "966367641" + }, { + "name" : "hiveserver2_java_heapsize", + "value" : "8589934592" + }, { + "name" : "hiveserver2_enable_impersonation", + "value" : "false" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "hiveserver2_idle_session_timeout", + "value" : "86400000" + }, { + "name" : "hiveserver2_spark_executor_cores", + "value" : "6" + }, { + "name" : "hiveserver2_spark_yarn_driver_memory_overhead", + "value" : "102" + }, { + "name" : "hiveserver2_spark_executor_memory", + "value" : "1016007228" + } ], "base": true } ] -} \ No newline at end of file +} diff --git a/roles/cdh/templates/host.j2 b/roles/cdh/templates/host.j2 index 003743a..9d662af 100644 --- a/roles/cdh/templates/host.j2 +++ b/roles/cdh/templates/host.j2 @@ -1,5 +1,5 @@ [ { - "refName": "HostTemplate-Gateway", + "refName": "HostTemplate-GatewayLB", "cardinality": 1, "roleConfigGroupsRefNames": [ "HDFS-1-GATEWAY-BASE", @@ -10,7 +10,30 @@ "HUE-1-KT_RENEWER-BASE", {% endif %} "OOZIE-1-OOZIE_SERVER-BASE", + "HIVE-1-HIVESERVER2-BASE", "SPARK_ON_YARN-1-GATEWAY-BASE", + "HBASE-1-HBASERESTSERVER-BASE", + "HBASE-1-HBASETHRIFTSERVER-BASE", + "HUE-1-HUE_LOAD_BALANCER-BASE", + "SENTRY-1-GATEWAY-BASE", + "SOLR-1-GATEWAY-BASE", + "YARN-1-GATEWAY-BASE" ] +}, { + "refName": "HostTemplate-GatewayEdge", + "cardinality": 1, + "roleConfigGroupsRefNames": [ + "HDFS-1-GATEWAY-BASE", + "HDFS-1-HTTPFS-BASE", + "HIVE-1-GATEWAY-BASE", + "HUE-1-HUE_SERVER-BASE", + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + "HUE-1-KT_RENEWER-BASE", + {% endif %} + "SPARK_ON_YARN-1-GATEWAY-BASE", + "HBASE-1-HBASERESTSERVER-BASE", + "HBASE-1-HBASETHRIFTSERVER-BASE", + "SENTRY-1-GATEWAY-BASE", + "SOLR-1-GATEWAY-BASE", "YARN-1-GATEWAY-BASE" ] }, { "refName": "HostTemplate-Master1", @@ -19,11 +42,11 @@ "HDFS-1-NAMENODE-BASE", "HDFS-1-FAILOVERCONTROLLER-BASE", "HDFS-1-JOURNALNODE-BASE", - "HDFS-1-BALANCER-BASE", - "SENTRY-1-GATEWAY-BASE", "SENTRY-1-SENTRY_SERVER-BASE", "ZOOKEEPER-1-SERVER-BASE", - "HIVE-1-GATEWAY-BASE" ] + "YARN-1-RESOURCEMANAGER-BASE", + "HBASE-1-MASTER-BASE", + "HIVE-1-HIVEMETASTORE-BASE" ] }, { "refName": "HostTemplate-Master2", "cardinality": 1, @@ -31,28 +54,54 @@ "HDFS-1-NAMENODE-BASE", "HDFS-1-FAILOVERCONTROLLER-BASE", "HDFS-1-JOURNALNODE-BASE", + "SENTRY-1-SENTRY_SERVER-BASE", "ZOOKEEPER-1-SERVER-BASE", - "HIVE-1-HIVESERVER2-BASE", - "HIVE-1-HIVEMETASTORE-BASE", - "HIVE-1-GATEWAY-BASE", - "SPARK_ON_YARN-1-GATEWAY-BASE" ] + "YARN-1-RESOURCEMANAGER-BASE", + "HBASE-1-MASTER-BASE", + "HIVE-1-HIVEMETASTORE-BASE" ] }, { "refName": "HostTemplate-Master3", "cardinality": 1, "roleConfigGroupsRefNames": [ "HDFS-1-JOURNALNODE-BASE", + "HDFS-1-BALANCER-BASE", "ZOOKEEPER-1-SERVER-BASE", - "HIVE-1-GATEWAY-BASE", "IMPALA-1-CATALOGSERVER-BASE", "IMPALA-1-STATESTORE-BASE", "SPARK_ON_YARN-1-SPARK_YARN_HISTORY_SERVER-BASE", - "YARN-1-JOBHISTORY-BASE", - "YARN-1-RESOURCEMANAGER-BASE" ] + "KS_INDEXER-1-HBASE_INDEXER-BASE", + "HIVE-1-GATEWAY-BASE", + "YARN-1-JOBHISTORY-BASE" ] }, { "refName": "HostTemplate-Workers", "cardinality": 3, "roleConfigGroupsRefNames": [ "HDFS-1-DATANODE-BASE", "YARN-1-NODEMANAGER-BASE", - "IMPALA-1-IMPALAD-BASE" ] -} ] \ No newline at end of file + "IMPALA-1-IMPALAD-BASE", + "HBASE-1-REGIONSERVER-BASE", + "SOLR-1-SOLR_SERVER-BASE", + "HIVE-1-GATEWAY-BASE", + "SPARK_ON_YARN-1-GATEWAY-BASE" ] +} +{% if (krb5_kdc_type == 'neverever') %}, { + "refName" : "HostTemplate-CDSWMaster", + "cardinality" : 0, + "roleConfigGroupsRefNames" : [ + "CDSW-1-CDSW_APPLICATION-BASE", + "CDSW-1-CDSW_DOCKER-BASE", + "CDSW-1-CDSW_MASTER-BASE", + "HDFS-1-GATEWAY-BASE", + "HIVE-1-GATEWAY-BASE", + "YARN-1-GATEWAY-BASE" ] +}, { + "refName" : "HostTemplate-CDSWWorker", + "cardinality" : 0, + "roleConfigGroupsRefNames" : [ + "CDSW-1-CDSW_DOCKER-BASE", + "CDSW-1-CDSW_WORKER-BASE", + "HDFS-1-GATEWAY-BASE", + "HIVE-1-GATEWAY-BASE", + "YARN-1-GATEWAY-BASE" ] +}{% endif %} +] diff --git a/roles/cdh/templates/hue.j2 b/roles/cdh/templates/hue.j2 index 1a391ac..ca5f824 100644 --- a/roles/cdh/templates/hue.j2 +++ b/roles/cdh/templates/hue.j2 @@ -1,59 +1,179 @@ { - "refName": "HUE-1", - "serviceType": "HUE", - "serviceConfigs": [ - { - "name": "database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" - }, { - "name": "database_type", + "refName" : "HUE-1", + "serviceType" : "HUE", + "serviceConfigs" : [ { + "name" : "database_type", "value": "{{ databases.hue.type }}" }, { - "name": "database_name", - "value": "{{ databases.hue.name }}" + "name" : "usage_data_collection_enable", + "value" : "false" + }, { + "name" : "database_host", + "value": "{{ databases.hue.host }}" + }, { + "name" : "database_port", + "value": "{{ databases.hue.port }}" + }, { + "name" : "cherrypy_server_threads", + "value" : "300" }, { "name": "database_user", "value": "{{ databases.hue.user }}" }, { - "name": "database_password", + "name" : "database_password", "value": "{{ databases.hue.pass }}" }, { "name": "oozie_service", "ref": "OOZIE-1" - }, { - "name": "impala_service", - "ref": "IMPALA-1" - }, { - "name": "hive_service", - "ref": "HIVE-1" }, { "name": "sentry_service", "ref": "SENTRY-1" - }, { - "name": "hue_service_safety_valve", - "value": "[impala]\nserver_port=21051\n\n[beeswax]\ndownload_cell_limit=10" }, { "name": "zookeeper_service", "ref": "ZOOKEEPER-1" }, { - "name": "hue_webhdfs", - "ref": "HDFS-HTTPFS-1" + "name" : "nt_domain", + "value" : "{{ ldap_udom.domain|upper }}" + }, { + "name" : "group_member_attr", + "value" : "{{ ldap_udom.group_member_attr }}" + }, { "name" : "hue_service_safety_valve", + "value" : "[hbase]\nhbase_conf_dir={% raw %}{{HBASE_CONF_DIR}}{% endraw %}\nthrift_transport=buffered\n[desktop]\nleaflet_tile_layer=\"{{ cdh_services | json_query('[?type==`hue`].leaflet_tile_layer') | first }}\"\nleaflet_tile_layer_attribution=\"{{ cdh_services | json_query('[?type==`hue`].mapping_attribution') | first }}\"\nsecure_content_security_policy=\"{{ cdh_services | json_query('[?type==`hue`].secure_content_security_policy') | first }}\"\ndisable_hue_3=true\n[[database]]\n{% if (database_type == 'oracle') %}options={\"threaded\":true}{% endif %}\n[[ldap]]\nsync_groups_on_login=true\n[[custom]]\nbanner_top_html='{{ lookup('file', 'hue-banner.txt') }}'\n[useradmin]\nhome_dir_permissions=0700" + }, { + "name" : "usage_data_collection_enable", + "value" : "false" + }, { + "name" : "search_bind_authentication", + "value" : "true" + }, { + "name" : "user_name_attr", + "value" : "{{ ldap_udom.user_name_attr }}" + }, { + "name" : "cherrypy_server_threads", + "value" : "300" + }, { + "name" : "group_name_attr", + "value" : "{{ ldap_udom.group_name_attr }}" + }, { + "name" : "bind_dn", + "value" : "{{ ldap_udom.bind_dn }}" + }, { + "name" : "ldap_cert", + "value" : "" + }, { + "name" : "base_dn", + "value" : "{{ ldap_udom.base_dn }}" + }, { + "name" : "time_zone", + "variable" : "HUE_TIMEZONE" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/hue/audit" + }, { + "name" : "hive_service", + "ref" : "HIVE-1" + }, { + "name" : "group_filter", + "value" : "{{ ldap_udom.group_filter }}" + }, { + "name" : "user_filter", + "value" : "{{ ldap_udom.user_filter }}" + }, { + "name" : "impala_service", + "ref" : "IMPALA-1" + }, { + "name" : "enable_navmetadataserver", + "value" : "false" + }, { + "name" : "hue_service_env_safety_valve", + "value" : "KRB5_KTNAME=/var/lib/hue/hue.keytab" + }, { + "name" : "bind_password", + "value" : "{{ ldap_udom.bind_pass }}" + }, { + "name" : "ldap_url", + "value" : "{{ ldap_udom.url }}" + }, { + "name" : "auth_backend", + "value" : "desktop.auth.backend.LdapBackend,desktop.auth.backend.AllowFirstUserDjangoBackend" + }, { + "name" : "hbase_service", + "ref" : "HBASE-1" + }, { + "name" : "database_name", + "value": "{{ databases.hue.name }}" + }, { + "name" : "solr_service", + "ref" : "SOLR-1" } ], "roleConfigGroups": [ { "refName": "HUE-1-HUE_SERVER-BASE", "roleType": "HUE_SERVER", - "configs": [], + "configs": [ { + "name" : "hue_http_port", + "value" : "8889" + }, { + "name" : "hue_server_log_dir", + "value" : "{{ log_base }}/hue" + }, { + "name" : "banner_html", + "value" : "Overridden by safety valve" + {% if (cdh_tls) %} + }, { + "name" : "ssl_private_key", + "value" : "{{ tls.private_key }}" + }, { + "name" : "ssl_certificate", + "value" : "{{ tls.tls_cert }}" + }, { + "name" : "ssl_private_key_password", + "value" : "{{ tls.keystore_password}}" + }, { + "name" : "ssl_cacerts", + "value" : "{{ tls.cert_chain }}" + }, { + "name" : "ssl_enable", + "value" : "true" + {% endif %} + } ], "base": true } {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} , { "refName": "HUE-1-KT_RENEWER-BASE", "roleType": "KT_RENEWER", - "configs": [], + "configs": [{ + "name" : "kt_renewer_log_dir", + "value" : "{{ log_base }}/hue" + }], "base": true } {% endif %} - ] -} \ No newline at end of file + , { + "refName" : "HUE-1-HUE_LOAD_BALANCER-BASE", + "roleType" : "HUE_LOAD_BALANCER", + "configs" : [ + {% if (cdh_tls) %} + { + "name" : "passphrasefile_location", + "value" : "{{ tls.key_password_file}}" + }, { + "name" : "ssl_certificate", + "value" : "{{ tls.tls_cert }}" + }, { + "name" : "ssl_certificate_key", + "value" : "{{ tls.private_key }}" + }, + {% endif %} + { + "name" : "listen", + "value" : "8888" + }, { + "name" : "hue_load_balancer_log_dir", + "value" : "{{ log_base }}/hue-httpd" + } ], + "base" : true + } ] +} diff --git a/roles/cdh/templates/impala.j2 b/roles/cdh/templates/impala.j2 index bed7296..717cba9 100644 --- a/roles/cdh/templates/impala.j2 +++ b/roles/cdh/templates/impala.j2 @@ -1,73 +1,167 @@ { - "refName": "IMPALA-1", - "serviceType": "IMPALA", - "serviceConfigs": [ - { - "name": "impala_cmd_args_safety_valve", - "value": "" + "refName" : "IMPALA-1", + "serviceType" : "IMPALA", + "serviceConfigs" : [ { + "name" : "hbase_service", + "ref" : "HBASE-1" }, { - "name": "hdfs_service", - "ref": "HDFS-1" + "name" : "admission_control_enabled", + "value" : "true" }, { - "name": "admission_control_enabled", - "value": "true" + "name" : "hdfs_service", + "ref" : "HDFS-1" }, { - "name": "sentry_service", - "ref": "SENTRY-1" + "name" : "hive_service", + "ref" : "HIVE-1" }, { - "name": "kerberos_reinit_interval", - "value": "10" + "name" : "all_admission_control_enabled", + "value" : "false" }, { - "name": "enable_core_dump", - "value": "true" + "name" : "sentry_service", + "ref" : "SENTRY-1" + } + {% if (cdh_tls) %} + ,{ + "name" : "ssl_client_ca_certificate", + "value" : "{{ tls.cert_chain }}" + }, { + "name" : "ssl_private_key", + "value" : "{{ tls.private_key }}" + }, { + "name" : "ssl_server_certificate", + "value" : "{{ tls.tls_cert }}" }, { - "name": "hive_service", - "ref": "HIVE-1" + "name" : "client_services_ssl_enabled", + "value" : "true" }, { - "name": "all_admission_control_enabled", - "value": "true" + "name" : "ssl_private_key_password", + "value" : "{{ tls.keystore_password }}" } - ], - "roleConfigGroups": [ - { - "refName": "IMPALA-1-IMPALAD-BASE", - "roleType": "IMPALAD", - "configs": [ - { - "name": "enable_audit_event_log", - "value": "true" - }, { - "name": "scratch_dirs", - "variable": "SCRATCH_DIRS" - }, { - "name": "logbuflevel", - "value": "-1" - } + {% endif %} + ], + "roleConfigGroups" : [ { + "refName" : "IMPALA-1-IMPALAD-BASE", + "roleType" : "IMPALAD", + "configs" : [ { + "name" : "disk_spill_encryption", + "value" : "true" + }, { + "name" : "log_dir", + "value" : "{{ log_base }}/impalad" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/impalad/audit" + }, { + "name" : "lineage_event_log_dir", + "value" : "{{ log_base }}/impalad/lineage" + }, { + "name" : "core_dump_dir", + "value" : "{{ log_base }}/impalad" + }, { + "name" : "minidump_path", + "value" : "{{ log_base }}/impala-minidumps" + }, { + "name" : "default_query_options", + "value" : "query_timeout_s=0" + }, { + "name" : "impalad_memory_limit", + "value" : "75161927680" + }, { + "name" : "enable_audit_event_log", + "value" : "true" + }, { + "name" : "scratch_dirs", + "variable" : "IMPALA_SCRATCH_DIRS" + }, { + "name" : "impalad_load_balancer", + "value" : "{{ hostvars[groups['haproxy'][0]]['inventory_hostname'] }}:25004" + }, { + "name" : "impalad_cmd_args_safety_valve", + "value" : "-inc_stats_size_limit_bytes=800000000" + } + {% if (cdh_tls) %} + , { + "name" : "webserver_private_key_password_cmd", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "webserver_certificate_file", + "value" : "{{ tls.tls_cert }}" + }, { + "name" : "webserver_private_key_file", + "value" : "{{ tls.private_key }}" + } + {% endif %} ], - "base": true + "base" : true }, { - "refName": "IMPALA-1-CATALOGSERVER-BASE", - "roleType": "CATALOGSERVER", - "configs": [ - { - "name": "logbuflevel", - "value": "-1" - }, { - "name": "catalogd_embedded_jvm_heapsize", - "value": "603979776" - } + "refName" : "IMPALA-1-STATESTORE-BASE", + "roleType" : "STATESTORE", + "configs" : [ { + "name" : "statestore_cmd_args_safety_valve", + "value" : "--convert_legacy_hive_parquet_utc_timestamps=true" + }, { + "name" : "log_dir", + "value" : "{{ log_base }}/statestore" + }, { + "name" : "core_dump_dir", + "value" : "{{ log_base }}/statestore" + }, { + "name" : "minidump_path", + "value" : "{{ log_base }}/impala-minidumps" + } + {% if (cdh_tls) %} + , { + "name" : "webserver_private_key_password_cmd", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "webserver_private_key_file", + "value" : "{{ tls.private_key }}" + }, { + "name" : "webserver_certificate_file", + "value" : "{{ tls.tls_cert }}" + } + {% endif %} ], - "base": true + "base" : true }, { - "refName": "IMPALA-1-STATESTORE-BASE", - "roleType": "STATESTORE", - "configs": [ - { - "name": "logbuflevel", - "value": "-1" - } - ], - "base": true - } - ] -} \ No newline at end of file + "refName" : "IMPALA-1-CATALOGSERVER-BASE", + "roleType" : "CATALOGSERVER", + "configs" : [ { + "name" : "catalogd_embedded_jvm_heapsize", + "value" : "33800047616" + }, { + "name" : "log_dir", + "value" : "{{ log_base }}/catalogd" + }, { + "name" : "core_dump_dir", + "value" : "{{ log_base }}/catalogd" + }, { + "name" : "minidump_path", + "value" : "{{ log_base }}/impala-minidumps" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "load_catalog_in_background", + "value" : "true" + } + , { + "name" : "catalogd_cmd_args_safety_valve", + "value" : "-inc_stats_size_limit_bytes=800000000" + } + {% if (cdh_tls) %} + ,{ + "name" : "webserver_private_key_password_cmd", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "webserver_private_key_file", + "value" : "{{ tls.private_key }}" + }, { + "name" : "webserver_certificate_file", + "value" : "{{ tls.tls_cert }}" + } + {% endif %} + ], + "base" : true + } ] + } diff --git a/roles/cdh/templates/instantiator.j2 b/roles/cdh/templates/instantiator.j2 index ee8ba75..cda3c0b 100644 --- a/roles/cdh/templates/instantiator.j2 +++ b/roles/cdh/templates/instantiator.j2 @@ -6,7 +6,7 @@ {% if 'host_template' in hostvars[host] %} {{ host_joiner() }} { - "hostName" : "{{ scm_host_names[host] }}", + "hostName" : "{{ host }}", "hostTemplateRefName" : "{{ hostvars[host]['host_template'] }}" {% if 'role_ref_names' in hostvars[host] %} ,"roleRefNames" : [ "{{ hostvars[host]['role_ref_names'] }}" ] diff --git a/roles/cdh/templates/kafka.j2 b/roles/cdh/templates/kafka.j2 new file mode 100644 index 0000000..6780aeb --- /dev/null +++ b/roles/cdh/templates/kafka.j2 @@ -0,0 +1,46 @@ +{ + "refName" : "KAFKA-1", + "serviceType" : "KAFKA", + "serviceConfigs" : [ { + "name" : "default.replication.factor", + "value" : "1" + }, { + "name" : "zookeeper.chroot", + "value" : "/kafka" + }, { + "name" : "sentry.kafka.caching.ttl.ms", + "value" : "1" + }, { + "name" : "sentry_service", + "value" : "SENTRY-1" + }, { + "name": "kerberos.auth.enable", + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + "value": "true" + {% else %} + "value": "false" + {% endif %} + }, { + "name" : "zookeeper_service", + "ref" : "ZOOKEEPER-1" + } ], + "roleConfigGroups" : [ { + "refName" : "KAFKA-1-GATEWAY-BASE", + "roleType" : "GATEWAY", + "base" : true + }, { + "refName" : "KAFKA-1-KAFKA_BROKER-BASE", + "roleType" : "KAFKA_BROKER", + "configs" : [ { + "name" : "log.retention.ms", + "value" : "600000" + }, { + "name" : "kafka.properties_role_safety_valve", + "value" : "offsets.retention.minutes=40320\nsasl.kerberos.principal.to.local.rules=RULE:[1:$1@$0](.*@{{ trusted_realm|upper }})s/@{{ trusted_realm|upper }}// , RULE:[2:$1@$0](.*@{{ trusted_realm|upper }})s/@{{ trusted_realm|upper }}//,DEFAULT" + }, { + "name" : "super.users", + "value" : "kafka" + } ], + "base" : true + } ] + } diff --git a/roles/cdh/templates/ks_indexer.j2 b/roles/cdh/templates/ks_indexer.j2 new file mode 100644 index 0000000..c3cf530 --- /dev/null +++ b/roles/cdh/templates/ks_indexer.j2 @@ -0,0 +1,33 @@ +{ + "refName" : "KS_INDEXER-1", + "serviceType" : "KS_INDEXER", + "serviceConfigs" : [ { + "name" : "hbase_service", + "ref" : "HBASE-1" + }, { + "name" : "solr_service", + "ref" : "SOLR-1" + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + }, { + "name" : "hbase_indexer_security_authentication", + "value" : "kerberos" + {% endif %} + {% if (cdh_tls) %} + }, { + "name" : "keystore_indexer_truststore_file", + "value" : "{{ tls.truststore_path }}" + {% endif %} + } ], + "roleConfigGroups" : [ { + "refName" : "KS_INDEXER-1-HBASE_INDEXER-BASE", + "roleType" : "HBASE_INDEXER", + "configs" : [ { + "name" : "hbase_indexer_log_dir", + "value" : "{{ log_base }}/hbase-solr" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + } ], + "base" : true + } ] + } diff --git a/roles/cdh/templates/oozie.j2 b/roles/cdh/templates/oozie.j2 index 29a0078..a8c0c5f 100644 --- a/roles/cdh/templates/oozie.j2 +++ b/roles/cdh/templates/oozie.j2 @@ -11,6 +11,11 @@ }, { "name": "mapreduce_yarn_service", "ref": "YARN-1" + {% if (cdh_tls) %} + }, { + "name" : "oozie_use_ssl", + "value" : "true" + {% endif %} }, { "name": "spark_on_yarn_service", "ref": "SPARK_ON_YARN-1" @@ -26,7 +31,7 @@ "configs": [ { "name": "oozie_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" + "value": "{{ databases.oozie.host }}:{{ databases.oozie.port }}" }, { "name": "oozie_database_type", "value": "{{ databases.oozie.type }}" @@ -42,12 +47,37 @@ }, { "name": "oozie_workflow_extension_schemas", "value": ",ssh-action-0.1.xsd,hive-action-0.3.xsd,sqoop-action-0.3.xsd,shell-action-0.2.xsd,shell-action-0.1.xsd" + }, { + "name" : "oozie_log_dir", + "value" : "{{ log_base }}/oozie" + }, { + "name" : "oozie_email_from_address", + "variable" : "OOZIE_FROM_EMAIL_ADDRESS" + }, { + "name" : "oozie_email_smtp_host", + "variable" : "OOZIE_EMAIL_SMTP_HOST" + }, { + "name" : "oozie_https_port", + "variable" : "OOZIE_HTTPS_PORT" + {% if (cdh_tls) %} + }, { + "name" : "oozie_https_keystore_file", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "oozie_https_truststore_file", + "value" : "{{ tls.truststore_path }}" + }, { + "name" : "oozie_https_keystore_password", + "value" : "{{ tls.keystore_password }}" + {% endif %} + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} }, { "name": "oozie_config_safety_valve", - "value": "\n\noozie.action.launcher.mapreduce.job.ubertask.enable\nfalse\n" + "value": "\n\noozie.service.AuthorizationService.security.enabled\ntrue\n" + {% endif %} } ], "base": true } ] -} \ No newline at end of file +} diff --git a/roles/cdh/templates/sentry.j2 b/roles/cdh/templates/sentry.j2 index 840bc69..80d0d08 100644 --- a/roles/cdh/templates/sentry.j2 +++ b/roles/cdh/templates/sentry.j2 @@ -2,9 +2,15 @@ "refName": "SENTRY-1", "serviceType": "SENTRY", "serviceConfigs": [ - { + { + "name" : "sentry_service_admin_group", + "variable" : "SENTRY_ADMIN_GROUP" + }, { + "name" : "sentry_service_allow_connect", + "variable" : "SENTRY_ALLOW_CONNECT" + }, { "name": "sentry_server_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" + "value": "{{ databases.sentry.host }}" }, { "name": "sentry_server_database_type", "value": "{{ databases.sentry.type }}" @@ -17,12 +23,21 @@ }, { "name": "sentry_server_database_password", "value": "{{ databases.sentry.pass }}" + }, { + "name": "sentry_server_database_port", + "value": "{{ databases.sentry.port }}" }, { "name": "zookeeper_service", "ref": "ZOOKEEPER-1" }, { "name": "hdfs_service", "ref": "HDFS-1" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/sentry/audit" + }, { + "name" : "process_username", + "variable" : "SENTRY_USER" } ], "roleConfigGroups": [ @@ -33,8 +48,11 @@ }, { "refName": "SENTRY-1-SENTRY_SERVER-BASE", "roleType": "SENTRY_SERVER", - "configs": [], + "configs": [ { + "name" : "sentry_server_log_dir", + "value" : "{{ log_base }}/sentry" + } ], "base": true } ] -} \ No newline at end of file +} diff --git a/roles/cdh/templates/solr.j2 b/roles/cdh/templates/solr.j2 new file mode 100644 index 0000000..c11ce39 --- /dev/null +++ b/roles/cdh/templates/solr.j2 @@ -0,0 +1,98 @@ +{ + "refName" : "SOLR-1", + "serviceType" : "SOLR", + "serviceConfigs" : [ { + "name" : "hdfs_service", + "ref" : "HDFS-1" + }, { + "name" : "zookeeper_service", + "ref" : "ZOOKEEPER-1" + }, { + "name" : "audit_event_log_dir", + "value" : "{{ log_base }}/solr/audit" + }, { + "name" : "solr_env_safety_valve", + "value" : "HTTPFS_MAX_HTTP_HEADER_SIZE=500000000\nJAVA_NAMING_REFERRAL=follow\nGC_TUNE=-XX:+UseG1GC" + }, { + "name" : "solr_core_site_safety_valve", + "value" : "" + }, { + "name" : "solr_sentry_safety_valve", + "value" : "sentry.provider.backend.generic.cache.enabledtrueEnables caching so that each query isn't going back to Sentry all the timesentry.provider.backend.generic.cache.ttl.ms600000Set cache timeout to 10 minutes" + }, { + "name" : "navigator_audit_event_filter", + "value" : "{\n \"rules\": [\n {\n \"action\": \"discard\",\n \"fields\": [\n {\n \"name\": \"operation\",\n \"match\": \"add\"\n }\n ]\n },\n {\n \"action\": \"discard\",\n \"fields\": [\n {\n \"name\": \"operation\",\n \"match\": \"finish\"\n }\n ]\n }\n ],\n \"defaultAction\": \"accept\"\n}" + }, { + "name": "solr_security_authentication", + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + "value": "kerberos" + }, { + "name" : "sentry_service", + "value" : "sentry" + }, { + "name" : "navigator_audit_enabled", + "value" : "true" + {% else %} + "value": "simple" + }, { + "name" : "sentry_service", + "value" : "none" + {% endif %} + } + {% if (cdh_tls) %} + , { + "name" : "solr_https_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "solr_https_keystore_file", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "solr_use_ssl", + "value" : "true" + }, { + "name" : "solr_https_truststore_file", + "value" : "{{ tls.truststore_path }}" + }, { + "name" : "solr_https_truststore_password", + "value" : "{{ tls.truststore_password }}" + } + {% endif %} + ], + "roleConfigGroups" : [ { + "refName" : "SOLR-1-SOLR_SERVER-BASE", + "roleType" : "SOLR_SERVER", + "configs" : [ { + "name" : "solr_java_heapsize", + "value" : "53687091200" + }, { + "name" : "solr_log_dir", + "value" : "{{ log_base }}/solr" + }, { + "name" : "log4j_safety_valve", + "value" : "" + }, { + "name" : "solr_java_direct_memory_size", + "value" : "17179869184" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "solr_load_balancer", + "value" : "{{ hostvars[groups['haproxy'][0]]['inventory_hostname'] }}:8985" + }, { + "name" : "solr_java_opts", + "value" : "" + }, { + "name" : "stacks_collection_directory", + "value" : "{{ log_base }}/solr" + }, { + "name" : "stacks_collection_enabled", + "value" : "true" + } ], + "base" : true + }, { + "refName" : "SOLR-1-GATEWAY-BASE", + "roleType" : "GATEWAY", + "base" : true + } ] + } diff --git a/roles/cdh/templates/spark.j2 b/roles/cdh/templates/spark.j2 index ee6a581..bcf545d 100644 --- a/roles/cdh/templates/spark.j2 +++ b/roles/cdh/templates/spark.j2 @@ -5,18 +5,57 @@ { "name": "yarn_service", "ref": "YARN-1" + }, { + "name" : "spark_authenticate", + "value" : "true" + }, { + "name" : "spark-conf/spark-env.sh_service_safety_valve", + "value" : "export PYSPARK_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python\nexport PYSPARK_DRIVER_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python" } ], "roleConfigGroups": [ { "refName": "SPARK_ON_YARN-1-GATEWAY-BASE", "roleType": "GATEWAY", + "configs" : [ { + "name" : "lineage_event_log_dir", + "value" : "{{ log_base }}/spark/lineage" + }, { + "name" : "spark-conf/spark-defaults.conf_client_config_safety_valve", + "value" : "spark.shuffle.encryption.enabled=true\nspark.shuffle.encryption.keySizeBits=256\nspark.network.sasl.serverAlwaysEncrypt=true\nspark.authenticate.enableSaslEncryption=true" + }, { + "name" : "spark_network_encryption_enabled", + "value" : "true" + }, { + "name" : "spark-conf/spark-env.sh_client_config_safety_valve", + "value" : "export PYSPARK_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python\nexport PYSPARK_DRIVER_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python" + } ], "base": true }, { "refName": "SPARK_ON_YARN-1-SPARK_YARN_HISTORY_SERVER-BASE", "roleType": "SPARK_YARN_HISTORY_SERVER", - "configs": [], + "configs": [ + { + "name" : "log_dir", + "value" : "{{ log_base }}/spark" + }, { + "name" : "history_server_max_heapsize", + "value" : "3221225472" + } + {% if (cdh_tls) %} + , { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_enabled", + "value" : "true" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + } + {% endif %} + ], "base": true } ] -} \ No newline at end of file +} diff --git a/roles/cdh/templates/spark2.j2 b/roles/cdh/templates/spark2.j2 new file mode 100644 index 0000000..dc668df --- /dev/null +++ b/roles/cdh/templates/spark2.j2 @@ -0,0 +1,58 @@ +{ + "refName": "SPARK2_ON_YARN-1", + "serviceType": "SPARK2_ON_YARN", + "serviceConfigs": [ + { + "name" : "spark2-conf/spark-env.sh_service_safety_valve", + "value" : "export PYSPARK_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python\nexport PYSPARK_DRIVER_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python" + }, { + "name": "yarn_service", + "ref": "YARN-1" + }, { + "name" : "hive_service", + "ref" : "HIVE-1" + }, { + "name" : "spark_authenticate", + "value" : "true" + } + ], + "roleConfigGroups": [ + { + "refName": "SPARK2_ON_YARN-1-GATEWAY-BASE", + "roleType": "GATEWAY", + "configs" : [ { + "name" : "spark2-conf/spark-env.sh_client_config_safety_valve", + "value" : "export PYSPARK_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python\nexport PYSPARK_DRIVER_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python" + }, { + "name" : "spark2-conf/spark-defaults.conf_client_config_safety_valve", + "value" : "spark.io.encryption.enabled=true\nspark.network.sasl.serverAlwaysEncrypt=true\nspark.authenticate.enableSaslEncryption=true" + } ], + "base": true + }, { + "refName": "SPARK2_ON_YARN-1-SPARK2_YARN_HISTORY_SERVER-BASE", + "roleType": "SPARK2_YARN_HISTORY_SERVER", + "configs": [ { + "name" : "event_log_cleaner_max_age", + "value" : "1209600" + }, { + "name" : "history_server_max_heapsize", + "value" : "4294967296" + } + {% if (cdh_tls) %} + { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_enabled", + "value" : "true" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + } + + {% endif %} + ], + "base": true + } + ] +} diff --git a/roles/cdh/templates/yarn.j2 b/roles/cdh/templates/yarn.j2 index 31d18dd..fc4a828 100644 --- a/roles/cdh/templates/yarn.j2 +++ b/roles/cdh/templates/yarn.j2 @@ -4,38 +4,75 @@ "serviceConfigs": [ { "name": "cm_yarn_container_usage_job_user", - "value": "cmjobuser" + "variable": "CMJOBUSER" }, { "name": "zookeeper_service", "ref": "ZOOKEEPER-1" }, { "name": "hdfs_service", "ref": "HDFS-1" + }, { + "name" : "yarn_admin_acl", + "variable" : "YARN_ADMIN_ACL" + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} }, { "name": "hadoop_secure_web_ui", "value": "true" + {% endif %} }, { "name": "cm_yarn_enable_container_usage_aggregation", "value": "true" + }, { + "name" : "cm_yarn_container_usage_job_pool", + "value" : "root.default" + } + {% if (cdh_tls) %} + , { + "name" : "yarn_service_mapred_safety_valve", + "value" : "mapreduce.job.encrypted-intermediate-datatruetrueEnable encryption for intermediate MapreduceSpillsmapreduce.job.encrypted-intermediate-data-key-size-bits128trueThe key length used to encrypt data spilled to diskmapreduce.job.encrypted-intermediate-data.buffer.kb128trueThe buffer size in Kb for the stream written to disk after encryptionmapreduce.shuffle.ssl.enabledtruetrueEnable YARN encrypted shuffle" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_client_truststore_password", + "value" : "{{ tls.truststore_password }}" + }, { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_server_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" } + {% endif %} ], "roleConfigGroups": [ { "refName": "YARN-1-JOBHISTORY-BASE", "roleType": "JOBHISTORY", - "configs": [], + "configs": [ { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "mr2_jobhistory_log_dir", + "value" : "{{ log_base }}/hadoop-mapreduce" + } ], "base": true }, { "refName": "YARN-1-GATEWAY-BASE", "roleType": "GATEWAY", - "configs": [ - { - "name": "mapred_submit_replication", - "value": "1" - }, { - "name": "mapred_reduce_tasks", - "value": "6" - } + "configs": [ { + "name" : "mapreduce_client_config_safety_valve", + "value" : "" + }, { + "name" : "hadoop_job_history_dir", + "value" : "{{ log_base }}/hadoop-mapreduce/history" + }, { + "name" : "mapred_reduce_tasks", + "value" : "368" + }, { + "name" : "mapred_submit_replication", + "value" : "3" + } ], "base": true }, { @@ -48,14 +85,53 @@ }, { "name": "yarn_nodemanager_log_dirs", "variable": "YARN_NODEMANAGER_LOG_DIRS" - } + }, { + "name" : "node_manager_log_dir", + "value" : "{{ log_base }}/hadoop-yarn" + }, { + "name" : "yarn_nodemanager_resource_memory_mb", + "value" : "40960" + }, { + "name" : "yarn_nodemanager_heartbeat_interval_ms", + "value" : "240" + }, { + "name" : "yarn_nodemanager_resource_cpu_vcores", + "value" : "24" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + } ], "base": true }, { "refName": "YARN-1-RESOURCEMANAGER-BASE", "roleType": "RESOURCEMANAGER", - "configs": [], + "configs": [ { + "name" : "resource_manager_log_dir", + "value" : "{{ log_base }}/hadoop-yarn" + }, { + "name" : "yarn_scheduler_maximum_allocation_vcores", + "value" : "24" + }, { + "name" : "resourcemanager_fair_scheduler_preemption", + "value" : "true" + }, { + "name" : "yarn_scheduler_maximum_allocation_mb", + "value" : "32768" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "yarn_scheduler_fair_continuous_scheduling_enabled", + "value" : "true" + }, { + "name" : "resourcemanager_fair_scheduler_assign_multiple", + "value" : "false" + }, { + "name" : "resource_manager_java_heapsize", + "value" : "4294967296" + } ], "base": true } ] -} \ No newline at end of file +} diff --git a/roles/cdh/templates/zookeeper.j2 b/roles/cdh/templates/zookeeper.j2 index f1d54b6..fff4e8f 100644 --- a/roles/cdh/templates/zookeeper.j2 +++ b/roles/cdh/templates/zookeeper.j2 @@ -4,15 +4,41 @@ "serviceConfigs": [ { "name": "zookeeper_datadir_autocreate", - "value": "true" + "value": "false" + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + }, { + "name" : "enableSecurity", + "value" : "true" + }, { + "name" : "quorum_auth_enable_sasl", + "value" : "true" + {% endif %} } ], "roleConfigGroups": [ { "refName": "ZOOKEEPER-1-SERVER-BASE", "roleType": "SERVER", - "configs": [], + "configs" : [ { + "name" : "zk_server_log_dir", + "value" : "{{ log_base }}/zookeeper" + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false" + }, { + "name" : "maxSessionTimeout", + "value" : "60000" + }, { + "name" : "dataDir", + "variable" : "ZOOKEEPER_EDITS_DIR" + }, { + "name" : "dataLogDir", + "variable" : "ZOOKEEPER_DATA_LOG_DIR" + }, { + "name" : "maxClientCnxns", + "value" : "1200" + } ], "base": true } ] -} \ No newline at end of file +} diff --git a/roles/cdh_teardown/tasks/main.yml b/roles/cdh_teardown/tasks/main.yml new file mode 100644 index 0000000..425ecce --- /dev/null +++ b/roles/cdh_teardown/tasks/main.yml @@ -0,0 +1,92 @@ +--- + + + +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +# Check whether cluster already exists +# https://cloudera.github.io/cm_api/apidocs/v13/path__clusters.html +- name: Check whether cluster exists + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}" + method: GET + status_code: 200,404 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: clusters_resp + ignore_errors: true + +- set_fact: cluster_exists={{ 'True' if clusters_resp.status == 200 else 'False' }} +- debug: msg="Cluster '{{ cluster_display_name }}' exists - {{ cluster_exists }}" + +# https://cloudera.github.io/cm_api/apidocs/v13/path__clusters-clusterName-commands-stop.html + +- name: Stop cluster + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/commands/stop" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: template_resp + when: cluster_exists == True + +- debug: var=template_resp + when: cluster_exists == True + +- set_fact: command_id="{{ template_resp.json.id }}" + when: cluster_exists == True + + +# https://cloudera.github.io/cm_api/apidocs/v13/path__commands.html +- name: Wait for cluster to stop + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: result + when: cluster_exists == True + until: result.json.active == false + retries: 10 + delay: 30 + +- debug: var=result + + +# https://cloudera.github.io/cm_api/apidocs/v13/path__clusters-clusterName-commands-stop.html +- name: Delete cluster + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}" + method: DELETE + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: template_resp + when: cluster_exists == True diff --git a/roles/certs/tasks/main.yml b/roles/certs/tasks/main.yml new file mode 100644 index 0000000..5655c4a --- /dev/null +++ b/roles/certs/tasks/main.yml @@ -0,0 +1,303 @@ +--- +- include_vars: "{{ inventory_dir }}/group_vars/pki.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ca.yml" +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" + +#- name: Install {{ openssl_path }} +# yum: +# name: {{ openssl_path }} +# state: latest + +- name: Prepare security directories + file: + state: directory + path: "{{ security_root }}" + mode: 0755 + owner: root + +- name: Prepare security subdirs + file: + state: directory + path: "{{ security_root }}/{{ item }}" + mode: 0755 + owner: root + with_items: + - "x509" + - "jks" + - "CAcerts" + +- name: Create JKS file + shell: "{{ keytool_path }} -genkeypair -alias {{ inventory_hostname_short }} -keyalg RSA -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -keysize 2048 -dname \"CN={{ inventory_hostname }},OU={{ ca_ou }},O={{ ca_org_name }},ST={{ ca_state_or_province }},C={{ ca_countryname_default }}\" -ext san=dns:{{ inventory_hostname }} -storepass {{ keystore_password }} -keypass {{ keystore_password }} -storetype jks" + args: + creates: "{{ security_root }}/jks/{{ inventory_hostname_short }}.jks" + +- name: Change permisions on JKS file + file: + state: file + path: "{{ security_root }}/jks/{{ inventory_hostname_short }}.jks" + mode: 0644 + owner: root + +- name: Check JKS contains a private key + shell: "{{ keytool_path }} -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -alias {{ inventory_hostname_short }} -storepass {{ keystore_password }} | grep PrivateKeyEntry" + +- name: Create CSR + shell: "{{ keytool_path }} -certreq -alias {{ inventory_hostname_short }} -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -file {{ security_root }}/x509/{{ inventory_hostname_short }}.csr -ext san=dns:{{ inventory_hostname }} -ext EKU=serverAuth,clientAuth -storepass {{ keystore_password }} -keypass {{ keystore_password }}" + args: + creates: "{{ security_root }}/x509/{{ inventory_hostname_short }}.csr" + +- name: Prepare directory for csrs + local_action: + module: file + state: directory + mode: 0777 + owner: "{{ ansible_user_id }}" + path: "{{ csr_certificates_local_location }}" + +- name: Fetch certificate signing requests + fetch: + src: "{{ security_root }}/x509/{{ inventory_hostname_short }}.csr" + dest: "{{ csr_certificates_local_location }}/" + flat: yes + ignore_errors: yes + +- name: Push ca files + copy: + src: "{{ item }}" + dest: "{{ security_root }}/CAcerts" + mode: 0744 + with_items: + - "{{ signed_certificates_local_location }}/{{ chain_cert_name }}" + - "{{ signed_certificates_local_location }}/{{ intermediate_ca_cert_name }}" + - "{{ signed_certificates_local_location }}/{{ root_ca_cert_name }}" + +- set_fact: + root_ca_present: False + +- name: Check Root CA is present + shell: "{{ keytool_path }} -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }} -storetype jks -alias {{ root_ca_alias }}" + register: root_ca_check + ignore_errors: yes + +- set_fact: + root_ca_present: True + when: root_ca_check.rc == 0 + +- name: Check Root CA is correct + shell: "[[ $({{ openssl_path }} x509 -noout -modulus -in {{ security_root }}/CAcerts/{{ root_ca_cert_name }} | {{ openssl_path }} md5) == $({{ keytool_path }} -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }} -storetype jks -alias {{ root_ca_alias }} -rfc | {{ openssl_path }} x509 -inform pem -modulus -noout | {{ openssl_path }} md5) ]]" + when: root_ca_present == True + ignore_errors: yes + register: root_ca_correctness + +- name: Remove incorrect Root CA + shell: "{{ keytool_path }} -delete -alias {{ root_ca_alias }} -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }}" + when: root_ca_present == True and root_ca_correctness.rc != 0 + +- set_fact: + root_ca_present: False + when: root_ca_present == True and root_ca_correctness.rc != 0 + +- name: Install Root CA cert into JKS + shell: "{{ keytool_path }} -importcert -alias {{ root_ca_alias }} -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -file {{ item }} -storepass {{ keystore_password }} -noprompt -trustcacerts" + with_items: + - "{{ security_root }}/CAcerts/{{ root_ca_cert_name }}" + when: root_ca_present == False + +- set_fact: + int_ca_present: False + +- name: Check Intermediate CA is present + shell: "{{ keytool_path }} -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }} -storetype jks -alias {{ intermediate_ca_alias }}" + register: int_ca_check + ignore_errors: yes + +- set_fact: + int_ca_present: True + when: int_ca_check.rc == 0 + +- name: Check Intermediate CA is correct + shell: "[[ $({{ openssl_path }} x509 -noout -modulus -in {{ security_root }}/CAcerts/{{ intermediate_ca_cert_name }} | {{ openssl_path }} md5) == $({{ keytool_path }} -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }} -storetype jks -alias {{ intermediate_ca_alias }} -rfc | {{ openssl_path }} x509 -inform pem -modulus -noout | {{ openssl_path }} md5) ]]" + when: int_ca_present == True + ignore_errors: yes + register: int_ca_correctness + +- name: Remove incorrect Intermediateoot CA + shell: "{{ keytool_path }} -delete -alias {{ intermediate_ca_alias }} -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }}" + when: int_ca_present == True and int_ca_correctness.rc != 0 + +- set_fact: + int_ca_present: False + when: int_ca_present == True and int_ca_correctness.rc != 0 + +- name: Install Intermediate CA cert into JKS + shell: "{{ keytool_path }} -importcert -alias {{ intermediate_ca_alias }} -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -file {{ item }} -storepass {{ keystore_password }} -noprompt -trustcacerts" + with_items: + - "{{ security_root }}/CAcerts/{{ intermediate_ca_cert_name }}" + when: int_ca_present == False + +- name: Check if the exported key exists + file: + path: "{{ security_root }}/x509/{{ inventory_hostname_short }}.key" + state: file + mode: 0644 + register: keystatus + ignore_errors: yes + +- name: Export PKCS12 + shell: "{{ keytool_path }} -importkeystore -srckeystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -destkeystore {{ security_root }}/x509/{{ inventory_hostname_short }}.p12 -deststoretype PKCS12 -srcstorepass {{ keystore_password }} -srckeypass {{ keystore_password }} -deststorepass {{ keystore_password }} -destkeypass {{ keystore_password }} -srcalias {{ inventory_hostname_short }} -destalias {{ inventory_hostname_short }}" + when: keystatus is failed + +- name: Export Key + shell: "{{ openssl_path }} pkcs12 -in {{ security_root }}/x509/{{ inventory_hostname_short }}.p12 -nocerts -out {{ security_root }}/x509/{{ inventory_hostname_short }}.key -passout pass:{{ keystore_password }} -passin pass:{{ keystore_password }}" + when: keystatus is failed + +- name: Change permisions on Key + file: + state: file + path: "{{ security_root }}/x509/{{ inventory_hostname_short }}.key" + mode: 0644 + owner: root + +- name: Delete PKCS12 + file: + state: absent + path: "{{ security_root }}/x509/{{ inventory_hostname_short }}.p12" + +- name: Write key password + shell: "echo {{ keystore_password }} > {{ security_root }}/x509/key.pw" + args: + creates: "{{ security_root }}/x509/key.pw" + +- name: Change password permissions + file: + state: file + path: "{{ security_root }}/x509/key.pw" + mode: 0644 + owner: root + group: root + +- name: Check cacerts_file exists + local_action: + module: file + path: "{{ cacerts_file }}" + state: file + +- set_fact: + jssecacerts_local_location: "{{ signed_certificates_local_location }}/jssecacerts" + +- name: Prepare directory for signed certs + local_action: + module: file + state: directory + mode: 0777 + owner: "{{ ansible_user_id }}" + path: "{{ signed_certificates_local_location }}" + +- name: Copy cacerts to jssecacerts + local_action: shell cp {{ cacerts_file }} {{ jssecacerts_local_location }} + sudo: yes + run_once: true + args: + creates: "{{ jssecacerts_local_location }}" + +- name: Check cacerts_file exists + local_action: + module: file + path: "{{ jssecacerts_local_location }}" + mode: 0744 + state: file + run_once: true + +- name: Change jssecacerts away from default + local_action: shell {{ keytool_path }} -storepasswd -storepass changeit -keystore {{ jssecacerts_local_location }} -new {{ jssecacerts_pw }} + run_once: true + ignore_errors: yes + +- set_fact: + root_ca_present: False + run_once: true + +- name: Check Root CA is present in jssecacerts + local_action: shell {{ keytool_path }} -list -keystore {{ jssecacerts_local_location }} -storepass {{ jssecacerts_pw }} -storetype jks -alias {{ root_ca_alias }} + register: root_ca_check + run_once: true + ignore_errors: yes + +- set_fact: + root_ca_present: True + when: root_ca_check.rc == 0 + run_once: true + +- name: Check Root CA is correct in jssecacerts + local_action: shell [[ $({{ openssl_path }} x509 -noout -modulus -in {{ security_root }}/CAcerts/{{ root_ca_cert_name }} | {{ openssl_path }} md5) == $({{ keytool_path }} -list -keystore {{ jssecacerts_local_location }} -storepass {{ jssecacerts_pw }} -storetype jks -alias {{ root_ca_alias }} -rfc | {{ openssl_path }} x509 -inform pem -modulus -noout | {{ openssl_path }} md5) ]] + when: root_ca_present == True + ignore_errors: yes + register: root_ca_correctness + run_once: true + +- name: Remove incorrect Root CA from jssecacerts + local_action: shell {{ keytool_path }} -delete -alias {{ root_ca_alias }} -keystore {{ jssecacerts_local_location }} -storepass {{ jssecacerts_pw }} + when: root_ca_present == True and root_ca_correctness.rc != 0 + run_once: true + +- set_fact: + root_ca_present: False + when: root_ca_present == True and root_ca_correctness.rc != 0 + run_once: true + +- name: Install Root CA cert into jssecacerts + local_action: shell {{ keytool_path }} -importcert -alias {{ root_ca_alias }} -keystore {{ jssecacerts_local_location }} -file {{ item }} -storepass {{ jssecacerts_pw }} -noprompt -trustcacerts + with_items: + - "{{ security_root }}/CAcerts/{{ root_ca_cert_name }}" + when: root_ca_present == False + run_once: true + +- set_fact: + int_ca_present: False + run_once: true + +- name: Check Intermediate CA is present in jssecacerts + local_action: shell {{ keytool_path }} -list -keystore {{ jssecacerts_local_location }} -storepass {{ jssecacerts_pw }} -storetype jks -alias {{ intermediate_ca_alias }} + register: int_ca_check + ignore_errors: yes + run_once: true + +- set_fact: + int_ca_present: True + when: int_ca_check.rc == 0 + run_once: true + +- name: Check Intermediate CA is correct in jssecacerts + local_action: shell [[ $({{ openssl_path }} x509 -noout -modulus -in {{ security_root }}/CAcerts/{{ intermediate_ca_cert_name }} | {{ openssl_path }} md5) == $({{ keytool_path }} -list -keystore {{ jssecacerts_local_location }} -storepass {{ jssecacerts_pw }} -storetype jks -alias {{ intermediate_ca_alias }} -rfc | {{ openssl_path }} x509 -inform pem -modulus -noout | {{ openssl_path }} md5) ]] + when: int_ca_present == True + ignore_errors: yes + register: int_ca_correctness + run_once: true + +- name: Remove incorrect Intermediateoot CA from jssecacerts + local_action: shell {{ keytool_path }} -delete -alias {{ intermediate_ca_alias }} -keystore {{ jssecacerts_local_location }} -storepass {{ jssecacerts_pw }} + when: int_ca_present == True and int_ca_correctness.rc != 0 + run_once: true + +- set_fact: + int_ca_present: False + when: int_ca_present == True and int_ca_correctness.rc != 0 + run_once: true + +- name: Install Intermediate CA cert into jssecacerts + local_action: shell {{ keytool_path }} -importcert -alias {{ intermediate_ca_alias }} -keystore {{ jssecacerts_local_location }} -file {{ item }} -storepass {{ jssecacerts_pw }} -noprompt -trustcacerts + with_items: + - "{{ security_root }}/CAcerts/{{ intermediate_ca_cert_name }}" + when: int_ca_present == False + run_once: true + +- name: Distribute jssecacerts file + copy: + src: "{{ jssecacerts_local_location }}" + dest: "{{ jssecacerts_file }}" + mode: 0644 + +- name: Update CA Certs for Ansible + blockinfile: + path: /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + block: "{{ lookup('file', '{{ tls.cert_chain }}') }}" diff --git a/roles/certs_signed_install/tasks/main.yml b/roles/certs_signed_install/tasks/main.yml new file mode 100644 index 0000000..5978504 --- /dev/null +++ b/roles/certs_signed_install/tasks/main.yml @@ -0,0 +1,61 @@ +--- +- include_vars: "{{ inventory_dir }}/group_vars/pki.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ca.yml" + +- name: Check if the signed certificate exists + file: + path: "{{ security_root }}/x509/{{ inventory_hostname_short }}.pem" + state: file + mode: 0644 + register: filestatus + ignore_errors: yes + +- name: Prepare local directory for CSRs + local_action: + module: file + state: directory + mode: 0777 + owner: "{{ ansible_user_id }}" + path: "{{ csr_certificates_local_location }}" + when: filestatus.failed + +- name: Copy signed signed_certificate + copy: + src: "{{ signed_certificates_local_location }}/{{ inventory_hostname_short }}.pem" + dest: "{{ security_root }}/x509/" + mode: 0644 + owner: root + when: filestatus.failed + +- name: Check if the signed certificate exists + file: + path: "{{ security_root }}/x509/{{ inventory_hostname_short }}.pem" + state: file + register: filestatus + +- name: Check Certificate Validates + shell: "openssl verify -verbose -CAfile {{ security_root }}/CAcerts/{{ chain_cert_name }} {{ security_root }}/x509/{{ inventory_hostname_short }}.pem" + +- name: Check New Certificate Matches + shell: "[[ $(keytool -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }} -storetype jks -alias {{ inventory_hostname_short }} -rfc | openssl x509 -inform pem -modulus -noout | openssl md5) == $(openssl x509 -noout -modulus -in {{ security_root }}/x509/{{ inventory_hostname_short }}.pem | openssl md5) ]]" + register: installed_cert + +- name: Check certificate not already present + shell: "keytool -list -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -storepass {{ keystore_password }} -storetype jks -alias {{ inventory_hostname_short }} -v | grep 'Certificate chain length: 1'" + register: installed_cert_present + ignore_errors: yes + +- name: Install certificate reply + shell: "keytool -importcert -alias {{ inventory_hostname_short }} -keystore {{ security_root }}/jks/{{ inventory_hostname_short }}.jks -file {{ security_root }}/x509/{{ inventory_hostname_short }}.pem -storepass {{ keystore_password }} -noprompt -trustcacerts" + when: installed_cert_present.rc == 0 + +- name: Create host agnostic links + file: + src: '{{ security_root }}/{{ item.src }}' + dest: '{{ security_root }}/{{ item.dest }}' + state: hard + with_items: + - { src: "jks/{{ inventory_hostname_short }}.jks", dest: "jks/localhost.jks" } + - { src: "x509/{{ inventory_hostname_short }}.key", dest: "x509/localhost.key" } + - { src: "x509/{{ inventory_hostname_short }}.pem", dest: "x509/localhost.pem" } + diff --git a/roles/cm_agents/tasks/36322.yml b/roles/cm_agents/tasks/36322.yml index cff46be..cc22d76 100644 --- a/roles/cm_agents/tasks/36322.yml +++ b/roles/cm_agents/tasks/36322.yml @@ -13,54 +13,48 @@ - group: name=kms - group: name=keytrustee - group: name=kudu -- group: name=llama - group: name=mapred - group: name=oozie - group: name=solr - group: name=spark - group: name=sentry - group: name=sqoop -- group: name=sqoop2 - group: name=yarn - group: name=zookeeper -- user: name=flume group=flume createhome=no system=yes -- user: name=hbase group=hbase createhome=no system=yes -- user: name=hdfs group=hdfs createhome=no system=yes groups=hadoop -- user: name=hive group=hive createhome=no system=yes -- user: name=httpfs group=httpfs createhome=no system=yes -- user: name=hue group=hue createhome=no system=yes -- user: name=impala group=impala createhome=no system=yes groups=hive -- user: name=kafka group=kafka createhome=no system=yes -- user: name=kms group=kms createhome=no system=yes -- user: name=keytrustee group=keytrustee createhome=no system=yes -- user: name=kudu group=kudu createhome=no system=yes -- user: name=llama group=llama createhome=no system=yes -- user: name=mapred group=mapred createhome=no system=yes groups=hadoop -- user: name=oozie group=oozie createhome=no system=yes -- user: name=solr group=solr createhome=no system=yes -- user: name=spark group=spark createhome=no system=yes -- user: name=sentry group=sentry createhome=no system=yes -- user: name=sqoop group=sqoop createhome=no system=yes -- user: name=sqoop2 group=sqoop createhome=no system=yes groups=sqoop2 -- user: name=yarn group=yarn createhome=no system=yes groups=hadoop -- user: name=zookeeper group=zookeeper createhome=no system=yes +- user: name=flume state=present group=flume createhome=no system=yes home=/var/lib/flume-ng +- user: name=hbase state=present group=hbase createhome=no system=yes home=/var/lib/hbase +- user: name=hdfs state=present group=hdfs createhome=no system=yes home=/var/lib/hadoop-hdfs groups=hadoop +- user: name=hive state=present group=hive createhome=no system=yes home=/var/lib/hive +- user: name=httpfs state=present group=httpfs createhome=no system=yes home=/var/lib/hadoop-httpfs +- user: name=hue state=present group=hue createhome=no system=yes home=/usr/lib/hue +- user: name=impala state=present group=impala createhome=no system=yes home=/var/lib/impala groups=hive +- user: name=kafka state=present group=kafka createhome=no system=yes home=/var/lib/kafka +- user: name=kms state=present group=kms createhome=no system=yes home=/var/lib/kms +- user: name=keytrustee state=present group=keytrustee createhome=no system=yes home=/var/lib/keytrustee +- user: name=kudu state=present group=kudu createhome=no system=yes home=/var/lib/kudu +- user: name=mapred state=present group=mapred createhome=no system=yes home=/var/lib/hadoop-mapreduce groups=hadoop +- user: name=oozie state=present group=oozie createhome=no system=yes home=/var/lib/oozie +- user: name=solr state=present group=solr createhome=no system=yes home=/var/lib/solr +- user: name=spark state=present group=spark createhome=no system=yes home=/var/lib/spark +- user: name=sentry state=present group=sentry createhome=no system=yes home=/var/lib/sentry +- user: name=sqoop state=present group=sqoop createhome=no system=yes home=/var/lib/sqoop +- user: name=yarn state=present group=yarn createhome=no system=yes home=/var/lib/hadoop-yarn groups=hadoop +- user: name=zookeeper state=present group=zookeeper createhome=no system=yes home=/var/lib/zookeeper -- file: path=/var/lib/flume-ng state=directory owner=flume group=flume mode=0755 -- file: path=/var/lib/hadoop-httpfs state=directory owner=httpfs group=httpfs mode=0755 -- file: path=/var/lib/hadoop-hdfs state=directory owner=hdfs group=hdfs mode=0755 -- file: path=/var/lib/hadoop-yarn state=directory owner=yarn group=yarn mode=0755 -- file: path=/var/lib/hadoop-mapreduce state=directory owner=mapred group=mapred mode=0755 -- file: path=/var/lib/hbase state=directory owner=hbase group=hbase mode=0755 -- file: path=/var/lib/hive state=directory owner=hive group=hive mode=0755 -- file: path=/usr/lib/hue state=directory owner=hue group=hue mode=0755 -- file: path=/var/lib/hadoop-kms state=directory owner=kms group=kms mode=0755 -- file: path=/var/lib/oozie state=directory owner=oozie group=oozie mode=0755 -- file: path=/var/lib/sqoop state=directory owner=sqoop group=sqoop mode=0755 -- file: path=/var/lib/zookeeper state=directory owner=zookeeper group=zookeeper mode=0755 -- file: path=/var/lib/sqoop2 state=directory owner=sqoop2 group=sqoop2 mode=0755 -- file: path=/var/lib/llama state=directory owner=llama group=llama mode=0755 -- file: path=/var/lib/impala state=directory owner=impala group=impala mode=0755 -- file: path=/var/lib/solr state=directory owner=solr group=solr mode=0755 -- file: path=/var/lib/spark state=directory owner=spark group=spark mode=0755 -- file: path=/var/lib/sentry state=directory owner=sentry group=sentry mode=0755 \ No newline at end of file +- file: path=/var/lib/flume-ng state=directory owner=flume group=flume mode=0755 +- file: path=/var/lib/hadoop-httpfs state=directory owner=httpfs group=httpfs mode=0755 +- file: path=/var/lib/hadoop-hdfs state=directory owner=hdfs group=hdfs mode=0755 +- file: path=/var/lib/hadoop-yarn state=directory owner=yarn group=yarn mode=0755 +- file: path=/var/lib/hadoop-mapreduce state=directory owner=mapred group=mapred mode=0755 +- file: path=/var/lib/hbase state=directory owner=hbase group=hbase mode=0755 +- file: path=/var/lib/hive state=directory owner=hive group=hive mode=0755 +- file: path=/usr/lib/hue state=directory owner=hue group=hue mode=0755 +- file: path=/var/lib/hadoop-kms state=directory owner=kms group=kms mode=0755 +- file: path=/var/lib/oozie state=directory owner=oozie group=oozie mode=0755 +- file: path=/var/lib/sqoop state=directory owner=sqoop group=sqoop mode=0755 +- file: path=/var/lib/zookeeper state=directory owner=zookeeper group=zookeeper mode=0755 +- file: path=/var/lib/impala state=directory owner=impala group=impala mode=0755 +- file: path=/var/lib/solr state=directory owner=solr group=solr mode=0755 +- file: path=/var/lib/spark state=directory owner=spark group=spark mode=0755 +- file: path=/var/lib/sentry state=directory owner=sentry group=sentry mode=0755 diff --git a/roles/cm_agents/tasks/main.yml b/roles/cm_agents/tasks/main.yml index 88919fe..3919748 100644 --- a/roles/cm_agents/tasks/main.yml +++ b/roles/cm_agents/tasks/main.yml @@ -1,15 +1,28 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" + - include: 36322.yml - name: Install Cloudera Manager Agents yum: name={{ item }} state=installed with_items: - - cloudera-manager-daemons - - cloudera-manager-agent + - "cloudera-manager-daemons-{{ scm_version }}" + - "cloudera-manager-agent-{{ scm_version }}" + +- name: Deploy CM Agent config ini + template: + src: config.ini.j2 + dest: /etc/cloudera-scm-agent/config.ini + group: root + owner: root + mode: '0644' + +#- name: Configure Cloudera Manager Agent 'server_host' +# lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^server_host line=server_host={{ hostvars[scm_hostname]['inventory_hostname'] }} -- name: Configure Cloudera Manager Agent 'server_host' - lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^server_host line=server_host={{ hostvars[scm_hostname]['inventory_hostname'] }} +#- name: Configure Clouder Manager Agent use_tls +# lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^use_tls line=use_tls={{ use_tls }} - name: Restart Cloudera Manager Agents service: name=cloudera-scm-agent state=restarted enabled=yes diff --git a/roles/cm_agents/templates/config.ini.j2 b/roles/cm_agents/templates/config.ini.j2 new file mode 100644 index 0000000..96e4219 --- /dev/null +++ b/roles/cm_agents/templates/config.ini.j2 @@ -0,0 +1,229 @@ +[General] +# Hostname of the CM server. +server_host={{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }} + +# Port that the CM server is listening on. +server_port=7182 + +## It should not normally be necessary to modify these. +# Port that the CM agent should listen on. +# listening_port=9000 + +# IP Address that the CM agent should listen on. +# listening_ip= + +# Hostname that the CM agent reports as its hostname. If unset, will be +# obtained in code through something like this: +# +# python -c 'import socket; \ +# print socket.getfqdn(), \ +# socket.gethostbyname(socket.getfqdn())' +# +# listening_hostname= + +# An alternate hostname to report as the hostname for this host in CM. +# Useful when this agent is behind a load balancer or proxy and all +# inbound communication must connect through that proxy. +# reported_hostname= + +# Port that supervisord should listen on. +# NB: This only takes effect if supervisord is restarted. +# supervisord_port=19001 + +# Log file. The supervisord log file will be placed into +# the same directory. Note that if the agent is being started via the +# init.d script, /var/log/cloudera-scm-agent/cloudera-scm-agent.out will +# also have a small amount of output (from before logging is initialized). +# log_file=/var/log/cloudera-scm-agent/cloudera-scm-agent.log + +# Persistent state directory. Directory to store CM agent state that +# persists across instances of the agent process and system reboots. +# Particularly, the agent's UUID is stored here. +# lib_dir=/var/lib/cloudera-scm-agent + +# Parcel directory. Unpacked parcels will be stored in this directory. +# Downloaded parcels will be stored in /../parcel-cache +# parcel_dir=/opt/cloudera/parcels + +# Enable supervisord event monitoring. Used in eager heartbeating, amongst +# other things. +# enable_supervisord_events=true + +# Maximum time to wait (in seconds) for all metric collectors to finish +# collecting data. +max_collection_wait_seconds=10.0 + +# Maximum time to wait (in seconds) when connecting to a local role's +# webserver to fetch metrics. +metrics_url_timeout_seconds=30.0 + +# Maximum time to wait (in seconds) when connecting to a local TaskTracker +# to fetch task attempt data. +task_metrics_timeout_seconds=5.0 + +# The list of non-device (nodev) filesystem types which will be monitored. +monitored_nodev_filesystem_types=nfs,nfs4,tmpfs + +# The list of filesystem types which are considered local for monitoring purposes. +# These filesystems are combined with the other local filesystem types found in +# /proc/filesystems +local_filesystem_whitelist=ext2,ext3,ext4 + +# The largest size impala profile log bundle that this agent will serve to the +# CM server. If the CM server requests more than this amount, the bundle will +# be limited to this size. All instances of this limit being hit are logged to +# the agent log. +impala_profile_bundle_max_bytes=1073741824 + +# The largest size stacks log bundle that this agent will serve to the CM +# server. If the CM server requests more than this amount, the bundle will be +# limited to this size. All instances of this limit being hit are logged to the +# agent log. +stacks_log_bundle_max_bytes=1073741824 + +# The size to which the uncompressed portion of a stacks log can grow before it +# is rotated. The log will then be compressed during rotation. +stacks_log_max_uncompressed_file_size_bytes=5242880 + +# The orphan process directory staleness threshold. If a diretory is more stale +# than this amount of seconds, CM agent will remove it. +orphan_process_dir_staleness_threshold=5184000 + +# The orphan process directory refresh interval. The CM agent will check the +# staleness of the orphan processes config directory every this amount of +# seconds. +orphan_process_dir_refresh_interval=3600 + +# A knob to control the agent logging level. The options are listed as follows: +# 1) DEBUG (set the agent logging level to 'logging.DEBUG') +# 2) INFO (set the agent logging level to 'logging.INFO') +scm_debug=INFO + +# The DNS resolution collecion interval in seconds. A java base test program +# will be executed with at most this frequency to collect java DNS resolution +# metrics. The test program is only executed if the associated health test, +# Host DNS Resolution, is enabled. +dns_resolution_collection_interval_seconds=60 + +# The maximum time to wait (in seconds) for the java test program to collect +# java DNS resolution metrics. +dns_resolution_collection_timeout_seconds=30 + +# The directory location in which the agent-wide kerberos credential cache +# will be created. +# agent_wide_credential_cache_location=/var/run/cloudera-scm-agent + +[Security] + +#IF STATEMENT + +# Use TLS and certificate validation when connecting to the CM server. +use_tls=0 + +# The maximum allowed depth of the certificate chain returned by the peer. +# The default value of 9 matches the default specified in openssl's +# SSL_CTX_set_verify. +max_cert_depth=9 + +# A file of CA certificates in PEM format. The file can contain several CA +# certificates identified by +# +# -----BEGIN CERTIFICATE----- +# ... (CA certificate in base64 encoding) ... +# -----END CERTIFICATE----- +# +# sequences. Before, between, and after the certificates text is allowed which +# can be used e.g. for descriptions of the certificates. +# +# The file is loaded once, the first time an HTTPS connection is attempted. A +# restart of the agent is required to pick up changes to the file. +# +# Note that if neither verify_cert_file or verify_cert_dir is set, certificate +# verification will not be performed. +# verify_cert_file= + +# Directory containing CA certificates in PEM format. The files each contain one +# CA certificate. The files are looked up by the CA subject name hash value, +# which must hence be available. If more than one CA certificate with the same +# name hash value exist, the extension must be different (e.g. 9d66eef0.0, +# 9d66eef0.1 etc). The search is performed in the ordering of the extension +# number, regardless of other properties of the certificates. Use the c_rehash +# utility to create the necessary links. +# +# The certificates in the directory are only looked up when required, e.g. when +# building the certificate chain or when actually performing the verification +# of a peer certificate. The contents of the directory can thus be changed +# without an agent restart. +# +# When looking up CA certificates, the verify_cert_file is first searched, then +# those in the directory. Certificate matching is done based on the subject name, +# the key identifier (if present), and the serial number as taken from the +# certificate to be verified. If these data do not match, the next certificate +# will be tried. If a first certificate matching the parameters is found, the +# verification process will be performed; no other certificates for the same +# parameters will be searched in case of failure. +# +# Note that if neither verify_cert_file or verify_cert_dir is set, certificate +# verification will not be performed. + +# PEM file containing client private key. +# client_key_file= + +# A command to run which returns the client private key password on stdout +# client_keypw_cmd= + +# If client_keypw_cmd isn't specified, instead a text file containing +# the client private key password can be used. +# client_keypw_file= + +# PEM file containing client certificate. +# client_cert_file= + +#ELSE + + +#ENDIF STATEMENT + +## Location of Hadoop files. These are the CDH locations when installed by +## packages. Unused when CDH is installed by parcels. +[Hadoop] +#cdh_crunch_home=/usr/lib/crunch +#cdh_flume_home=/usr/lib/flume-ng +#cdh_hadoop_bin=/usr/bin/hadoop +#cdh_hadoop_home=/usr/lib/hadoop +#cdh_hbase_home=/usr/lib/hbase +#cdh_hbase_indexer_home=/usr/lib/hbase-solr +#cdh_hcat_home=/usr/lib/hive-hcatalog +#cdh_hdfs_home=/usr/lib/hadoop-hdfs +#cdh_hive_home=/usr/lib/hive +#cdh_httpfs_home=/usr/lib/hadoop-httpfs +#cdh_hue_home=/usr/share/hue +#cdh_hue_plugins_home=/usr/lib/hadoop +#cdh_impala_home=/usr/lib/impala +#cdh_llama_home=/usr/lib/llama +#cdh_mr1_home=/usr/lib/hadoop-0.20-mapreduce +#cdh_mr2_home=/usr/lib/hadoop-mapreduce +#cdh_oozie_home=/usr/lib/oozie +#cdh_parquet_home=/usr/lib/parquet +#cdh_pig_home=/usr/lib/pig +#cdh_solr_home=/usr/lib/solr +#cdh_spark_home=/usr/lib/spark +#cdh_sqoop_home=/usr/lib/sqoop +#cdh_sqoop2_home=/usr/lib/sqoop2 +#cdh_yarn_home=/usr/lib/hadoop-yarn +#cdh_zookeeper_home=/usr/lib/zookeeper +#hive_default_xml=/etc/hive/conf.dist/hive-default.xml +#webhcat_default_xml=/etc/hive-webhcat/conf.dist/webhcat-default.xml +#jsvc_home=/usr/libexec/bigtop-utils +#tomcat_home=/usr/lib/bigtop-tomcat + +## Location of Cloudera Management Services files. +[Cloudera] +#mgmt_home=/usr/share/cmf + +## Location of JDBC Drivers. +[JDBC] +#cloudera_mysql_connector_jar=/usr/share/java/mysql-connector-java.jar +#cloudera_oracle_connector_jar=/usr/share/java/oracle-connector-java.jar +#By default, postgres jar is found dynamically in $MGMT_HOME/lib +#cloudera_postgresql_jdbc_jar= diff --git a/roles/cm_agents_teardown/tasks/main.yml b/roles/cm_agents_teardown/tasks/main.yml new file mode 100644 index 0000000..4e602b2 --- /dev/null +++ b/roles/cm_agents_teardown/tasks/main.yml @@ -0,0 +1,134 @@ +--- +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" + +- name: Stop CM agents on all nodes + service: + name: cloudera-scm-agent + state: stopped + +- name: Hard Stop CM agents on all nodes + service: + name: supervisord + state: stopped + +- name: Delete CM agent run directories on all nodes + shell: "rm /var/run/cloudera-scm-agent/process/* -rf" + +- name: Delete CM agent directories on all nodes + shell: "rm /var/lib/cloudera-scm-agent/* -rf" + +- name: JN Edits Dir + file: + path: "{{ cdh_services | json_query (query) }}" + state: absent + ignore_errors: true + vars: + query: "[?type=='hdfs'].dfs_journalnode_edits_dir" + +- name: Impala Catalog Log Dir + file: + path: "{{ log_base }}/catalogd" + state: absent + ignore_errors: true + +- name: HDFS Log Dir + file: + path: "{{ log_base }}/hadoop-hdfs" + state: absent + ignore_errors: true + +- name: HTTPFS Log Dir + file: + path: "{{ log_base }}/hadoop-httpfs" + state: absent + ignore_errors: true + +- name: MR Log Dir + file: + path: "{{ log_base }}/hadoop-mapreduce" + state: absent + ignore_errors: true + +- name: YARN Log Dir + file: + path: "{{ log_base }}/hadoop-yarn" + state: absent + ignore_errors: true + +- name: HBase Log Dir + file: + path: "{{ log_base }}/hbase" + state: absent + ignore_errors: true + +- name: HBase Solr Log Dir + file: + path: "{{ log_base }}/hbase-solr" + state: absent + ignore_errors: true + +- name: Hive Log Dir + file: + path: "{{ log_base }}/hive" + state: absent + ignore_errors: true + +- name: Hue Log Dir + file: + path: "{{ log_base }}/hue" + state: absent + ignore_errors: true + +- name: Hue LB Log Dir + file: + path: "{{ log_base }}/hue-httpd" + state: absent + ignore_errors: true + +- name: ImpalaD Log Dir + file: + path: "{{ log_base }}/impalad" + state: absent + ignore_errors: true + +- name: Impala Minidumps Dir + file: + path: "{{ log_base }}/impala-minidumps" + state: absent + ignore_errors: true + +- name: Oozie Log Dir + file: + path: "{{ log_base }}/oozie" + state: absent + ignore_errors: true + +- name: Sentry Log Dir + file: + path: "{{ log_base }}/sentry" + state: absent + ignore_errors: true + +- name: Solr Log Dir + file: + path: "{{ log_base }}/solr" + state: absent + ignore_errors: true + +- name: Spark Log Dir + file: + path: "{{ log_base }}/spark" + state: absent + ignore_errors: true + +- name: Impala Statestore Log Dir + file: + path: "{{ log_base }}/statestore" + state: absent + ignore_errors: true + +- name: ZK Log Dir + file: + path: "{{ log_base }}/zookeeper" + state: absent + ignore_errors: true diff --git a/roles/cm_agents_tls/tasks/36322.yml b/roles/cm_agents_tls/tasks/36322.yml new file mode 100644 index 0000000..cff46be --- /dev/null +++ b/roles/cm_agents_tls/tasks/36322.yml @@ -0,0 +1,66 @@ +--- +# Temporary workaround for OPSAPS-36322 + +- group: name=flume +- group: name=hadoop +- group: name=hbase +- group: name=hdfs +- group: name=hive +- group: name=httpfs +- group: name=hue +- group: name=impala +- group: name=kafka +- group: name=kms +- group: name=keytrustee +- group: name=kudu +- group: name=llama +- group: name=mapred +- group: name=oozie +- group: name=solr +- group: name=spark +- group: name=sentry +- group: name=sqoop +- group: name=sqoop2 +- group: name=yarn +- group: name=zookeeper + +- user: name=flume group=flume createhome=no system=yes +- user: name=hbase group=hbase createhome=no system=yes +- user: name=hdfs group=hdfs createhome=no system=yes groups=hadoop +- user: name=hive group=hive createhome=no system=yes +- user: name=httpfs group=httpfs createhome=no system=yes +- user: name=hue group=hue createhome=no system=yes +- user: name=impala group=impala createhome=no system=yes groups=hive +- user: name=kafka group=kafka createhome=no system=yes +- user: name=kms group=kms createhome=no system=yes +- user: name=keytrustee group=keytrustee createhome=no system=yes +- user: name=kudu group=kudu createhome=no system=yes +- user: name=llama group=llama createhome=no system=yes +- user: name=mapred group=mapred createhome=no system=yes groups=hadoop +- user: name=oozie group=oozie createhome=no system=yes +- user: name=solr group=solr createhome=no system=yes +- user: name=spark group=spark createhome=no system=yes +- user: name=sentry group=sentry createhome=no system=yes +- user: name=sqoop group=sqoop createhome=no system=yes +- user: name=sqoop2 group=sqoop createhome=no system=yes groups=sqoop2 +- user: name=yarn group=yarn createhome=no system=yes groups=hadoop +- user: name=zookeeper group=zookeeper createhome=no system=yes + +- file: path=/var/lib/flume-ng state=directory owner=flume group=flume mode=0755 +- file: path=/var/lib/hadoop-httpfs state=directory owner=httpfs group=httpfs mode=0755 +- file: path=/var/lib/hadoop-hdfs state=directory owner=hdfs group=hdfs mode=0755 +- file: path=/var/lib/hadoop-yarn state=directory owner=yarn group=yarn mode=0755 +- file: path=/var/lib/hadoop-mapreduce state=directory owner=mapred group=mapred mode=0755 +- file: path=/var/lib/hbase state=directory owner=hbase group=hbase mode=0755 +- file: path=/var/lib/hive state=directory owner=hive group=hive mode=0755 +- file: path=/usr/lib/hue state=directory owner=hue group=hue mode=0755 +- file: path=/var/lib/hadoop-kms state=directory owner=kms group=kms mode=0755 +- file: path=/var/lib/oozie state=directory owner=oozie group=oozie mode=0755 +- file: path=/var/lib/sqoop state=directory owner=sqoop group=sqoop mode=0755 +- file: path=/var/lib/zookeeper state=directory owner=zookeeper group=zookeeper mode=0755 +- file: path=/var/lib/sqoop2 state=directory owner=sqoop2 group=sqoop2 mode=0755 +- file: path=/var/lib/llama state=directory owner=llama group=llama mode=0755 +- file: path=/var/lib/impala state=directory owner=impala group=impala mode=0755 +- file: path=/var/lib/solr state=directory owner=solr group=solr mode=0755 +- file: path=/var/lib/spark state=directory owner=spark group=spark mode=0755 +- file: path=/var/lib/sentry state=directory owner=sentry group=sentry mode=0755 \ No newline at end of file diff --git a/roles/cm_agents_tls/tasks/main.yml b/roles/cm_agents_tls/tasks/main.yml new file mode 100644 index 0000000..557d1f8 --- /dev/null +++ b/roles/cm_agents_tls/tasks/main.yml @@ -0,0 +1,30 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" + +#- include: 36322.yml + +- name: Install Cloudera Manager Agents + yum: name={{ item }} state=installed + with_items: + - "cloudera-manager-daemons-{{ scm_version }}" + - "cloudera-manager-agent-{{ scm_version }}" + +- name: Deploy CM Agent config ini + template: + src: config.ini.j2 + dest: /etc/cloudera-scm-agent/config.ini + group: root + owner: root + mode: '0644' + +#- name: Configure Cloudera Manager Agent 'server_host' +# lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^server_host line=server_host={{ hostvars[scm_hostname]['inventory_hostname'] }} + +#- name: Configure Clouder Manager Agent use_tls +# lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^use_tls line=use_tls={{ use_tls }} + +- name: Restart Cloudera Manager Agents + service: name=cloudera-scm-agent state=restarted enabled=yes diff --git a/roles/cm_agents_tls/templates/config.ini.j2 b/roles/cm_agents_tls/templates/config.ini.j2 new file mode 100644 index 0000000..4a6fa61 --- /dev/null +++ b/roles/cm_agents_tls/templates/config.ini.j2 @@ -0,0 +1,237 @@ +[General] +# Hostname of the CM server. +server_host={{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }} + +# Port that the CM server is listening on. +server_port=7182 + +## It should not normally be necessary to modify these. +# Port that the CM agent should listen on. +# listening_port=9000 + +# IP Address that the CM agent should listen on. +# listening_ip= + +# Hostname that the CM agent reports as its hostname. If unset, will be +# obtained in code through something like this: +# +# python -c 'import socket; \ +# print socket.getfqdn(), \ +# socket.gethostbyname(socket.getfqdn())' +# +# listening_hostname= + +# An alternate hostname to report as the hostname for this host in CM. +# Useful when this agent is behind a load balancer or proxy and all +# inbound communication must connect through that proxy. +# reported_hostname= + +# Port that supervisord should listen on. +# NB: This only takes effect if supervisord is restarted. +# supervisord_port=19001 + +# Log file. The supervisord log file will be placed into +# the same directory. Note that if the agent is being started via the +# init.d script, /var/log/cloudera-scm-agent/cloudera-scm-agent.out will +# also have a small amount of output (from before logging is initialized). +# log_file=/var/log/cloudera-scm-agent/cloudera-scm-agent.log + +# Persistent state directory. Directory to store CM agent state that +# persists across instances of the agent process and system reboots. +# Particularly, the agent's UUID is stored here. +# lib_dir=/var/lib/cloudera-scm-agent + +# Parcel directory. Unpacked parcels will be stored in this directory. +# Downloaded parcels will be stored in /../parcel-cache +# parcel_dir=/opt/cloudera/parcels + +# Enable supervisord event monitoring. Used in eager heartbeating, amongst +# other things. +# enable_supervisord_events=true + +# Maximum time to wait (in seconds) for all metric collectors to finish +# collecting data. +max_collection_wait_seconds=10.0 + +# Maximum time to wait (in seconds) when connecting to a local role's +# webserver to fetch metrics. +metrics_url_timeout_seconds=30.0 + +# Maximum time to wait (in seconds) when connecting to a local TaskTracker +# to fetch task attempt data. +task_metrics_timeout_seconds=5.0 + +# The list of non-device (nodev) filesystem types which will be monitored. +monitored_nodev_filesystem_types=nfs,nfs4,tmpfs + +# The list of filesystem types which are considered local for monitoring purposes. +# These filesystems are combined with the other local filesystem types found in +# /proc/filesystems +local_filesystem_whitelist=ext2,ext3,ext4 + +# The largest size impala profile log bundle that this agent will serve to the +# CM server. If the CM server requests more than this amount, the bundle will +# be limited to this size. All instances of this limit being hit are logged to +# the agent log. +impala_profile_bundle_max_bytes=1073741824 + +# The largest size stacks log bundle that this agent will serve to the CM +# server. If the CM server requests more than this amount, the bundle will be +# limited to this size. All instances of this limit being hit are logged to the +# agent log. +stacks_log_bundle_max_bytes=1073741824 + +# The size to which the uncompressed portion of a stacks log can grow before it +# is rotated. The log will then be compressed during rotation. +stacks_log_max_uncompressed_file_size_bytes=5242880 + +# The orphan process directory staleness threshold. If a diretory is more stale +# than this amount of seconds, CM agent will remove it. +orphan_process_dir_staleness_threshold=5184000 + +# The orphan process directory refresh interval. The CM agent will check the +# staleness of the orphan processes config directory every this amount of +# seconds. +orphan_process_dir_refresh_interval=3600 + +# A knob to control the agent logging level. The options are listed as follows: +# 1) DEBUG (set the agent logging level to 'logging.DEBUG') +# 2) INFO (set the agent logging level to 'logging.INFO') +scm_debug=INFO + +# The DNS resolution collecion interval in seconds. A java base test program +# will be executed with at most this frequency to collect java DNS resolution +# metrics. The test program is only executed if the associated health test, +# Host DNS Resolution, is enabled. +dns_resolution_collection_interval_seconds=60 + +# The maximum time to wait (in seconds) for the java test program to collect +# java DNS resolution metrics. +dns_resolution_collection_timeout_seconds=30 + +# The directory location in which the agent-wide kerberos credential cache +# will be created. +# agent_wide_credential_cache_location=/var/run/cloudera-scm-agent + +[Security] + +#IF STATEMENT +{% if (agent_tls) %} + +# Use TLS and certificate validation when connecting to the CM server. +use_tls=1 + +# The maximum allowed depth of the certificate chain returned by the peer. +# The default value of 9 matches the default specified in openssl's +# SSL_CTX_set_verify. +max_cert_depth=9 + +# A file of CA certificates in PEM format. The file can contain several CA +# certificates identified by +# +# -----BEGIN CERTIFICATE----- +# ... (CA certificate in base64 encoding) ... +# -----END CERTIFICATE----- +# +# sequences. Before, between, and after the certificates text is allowed which +# can be used e.g. for descriptions of the certificates. +# +# The file is loaded once, the first time an HTTPS connection is attempted. A +# restart of the agent is required to pick up changes to the file. +# +# Note that if neither verify_cert_file or verify_cert_dir is set, certificate +# verification will not be performed. +verify_cert_file={{ tls.cert_chain }} + +# Directory containing CA certificates in PEM format. The files each contain one +# CA certificate. The files are looked up by the CA subject name hash value, +# which must hence be available. If more than one CA certificate with the same +# name hash value exist, the extension must be different (e.g. 9d66eef0.0, +# 9d66eef0.1 etc). The search is performed in the ordering of the extension +# number, regardless of other properties of the certificates. Use the c_rehash +# utility to create the necessary links. +# +# The certificates in the directory are only looked up when required, e.g. when +# building the certificate chain or when actually performing the verification +# of a peer certificate. The contents of the directory can thus be changed +# without an agent restart. +# +# When looking up CA certificates, the verify_cert_file is first searched, then +# those in the directory. Certificate matching is done based on the subject name, +# the key identifier (if present), and the serial number as taken from the +# certificate to be verified. If these data do not match, the next certificate +# will be tried. If a first certificate matching the parameters is found, the +# verification process will be performed; no other certificates for the same +# parameters will be searched in case of failure. +# +# Note that if neither verify_cert_file or verify_cert_dir is set, certificate +# verification will not be performed. + +# PEM file containing client private key. +# client_key_file= +client_key_file={{ tls.private_key }} + +# A command to run which returns the client private key password on stdout +# client_keypw_cmd= + +# If client_keypw_cmd isn't specified, instead a text file containing +# the client private key password can be used. +# client_keypw_file= +client_keypw_file={{ tls.key_password_file }} + +# PEM file containing client certificate. +# client_cert_file= +client_cert_file={{ tls.tls_cert }} + +#ELSE +{% else %} + +use_tls=0 +max_cert_depth=9 + +{% endif %} +#ENDIF STATEMENT + +## Location of Hadoop files. These are the CDH locations when installed by +## packages. Unused when CDH is installed by parcels. +[Hadoop] +#cdh_crunch_home=/usr/lib/crunch +#cdh_flume_home=/usr/lib/flume-ng +#cdh_hadoop_bin=/usr/bin/hadoop +#cdh_hadoop_home=/usr/lib/hadoop +#cdh_hbase_home=/usr/lib/hbase +#cdh_hbase_indexer_home=/usr/lib/hbase-solr +#cdh_hcat_home=/usr/lib/hive-hcatalog +#cdh_hdfs_home=/usr/lib/hadoop-hdfs +#cdh_hive_home=/usr/lib/hive +#cdh_httpfs_home=/usr/lib/hadoop-httpfs +#cdh_hue_home=/usr/share/hue +#cdh_hue_plugins_home=/usr/lib/hadoop +#cdh_impala_home=/usr/lib/impala +#cdh_llama_home=/usr/lib/llama +#cdh_mr1_home=/usr/lib/hadoop-0.20-mapreduce +#cdh_mr2_home=/usr/lib/hadoop-mapreduce +#cdh_oozie_home=/usr/lib/oozie +#cdh_parquet_home=/usr/lib/parquet +#cdh_pig_home=/usr/lib/pig +#cdh_solr_home=/usr/lib/solr +#cdh_spark_home=/usr/lib/spark +#cdh_sqoop_home=/usr/lib/sqoop +#cdh_sqoop2_home=/usr/lib/sqoop2 +#cdh_yarn_home=/usr/lib/hadoop-yarn +#cdh_zookeeper_home=/usr/lib/zookeeper +#hive_default_xml=/etc/hive/conf.dist/hive-default.xml +#webhcat_default_xml=/etc/hive-webhcat/conf.dist/webhcat-default.xml +#jsvc_home=/usr/libexec/bigtop-utils +#tomcat_home=/usr/lib/bigtop-tomcat + +## Location of Cloudera Management Services files. +[Cloudera] +#mgmt_home=/usr/share/cmf + +## Location of JDBC Drivers. +[JDBC] +#cloudera_mysql_connector_jar=/usr/share/java/mysql-connector-java.jar +#cloudera_oracle_connector_jar=/usr/share/java/oracle-connector-java.jar +#By default, postgres jar is found dynamically in $MGMT_HOME/lib +#cloudera_postgresql_jdbc_jar= diff --git a/roles/cm_api/tasks/main.yml b/roles/cm_api/tasks/main.yml new file mode 100644 index 0000000..d004905 --- /dev/null +++ b/roles/cm_api/tasks/main.yml @@ -0,0 +1,10 @@ +--- + +- name: Copy cm_api to Python lib directory + copy: + src: cm_api-19.0.0-py2.7.egg + dest: /usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg + owner: root + group: root + mode: '0644' + diff --git a/roles/cm_repo/tasks/main.yml b/roles/cm_repo/tasks/main.yml index 2e5271f..e54555d 100644 --- a/roles/cm_repo/tasks/main.yml +++ b/roles/cm_repo/tasks/main.yml @@ -1,11 +1,11 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" - name: Add Cloudera Manager yum repository yum_repository: name: cloudera-manager description: Cloudera Manager - baseurl: http://archive.cloudera.com/cm5/redhat/{{ ansible_distribution_major_version }}/x86_64/cm/5/ - gpgkey: http://archive.cloudera.com/cm5/redhat/{{ ansible_distribution_major_version }}/x86_64/cm/RPM-GPG-KEY-cloudera - gpgcheck: yes + baseurl: "{{ yum_repo_base }}/el{{ ansible_distribution_major_version }}/" + gpgcheck: no enabled: yes when: (ansible_distribution|lower == "redhat") or (ansible_distribution|lower == "centos") diff --git a/roles/cm_roles/tasks/main.yml b/roles/cm_roles/tasks/main.yml new file mode 100644 index 0000000..b00570d --- /dev/null +++ b/roles/cm_roles/tasks/main.yml @@ -0,0 +1,186 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/krb5_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ldap_enc.yml" + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +- name: Prepare Cloudera Manager settings - Cluster Admin + template: + src: "externalUserMappingsClu.j2" + dest: "{{ tmp_dir }}/externalUserMappingsClu.json" + mode: 0777 + delegate_to: localhost + + +- name: Prepare Cloudera Manager settings - Full Admin + template: + src: "externalUserMappingsFull.j2" + dest: "{{ tmp_dir }}/externalUserMappingsFull.json" + mode: 0777 + delegate_to: localhost + + +- name: Prepare Cloudera Manager settings - Key Admin + template: + src: "externalUserMappingsKey.j2" + dest: "{{ tmp_dir }}/externalUserMappingsKey.json" + mode: 0777 + delegate_to: localhost + + +- name: Prepare Cloudera Manager settings - Operator + template: + src: "externalUserMappingsOp.j2" + dest: "{{ tmp_dir }}/externalUserMappingsOp.json" + mode: 0777 + delegate_to: localhost + + +- name: Prepare Cloudera Manager settings - Read Only + template: + src: "externalUserMappingsRO.j2" + dest: "{{ tmp_dir }}/externalUserMappingsRO.json" + mode: 0777 + delegate_to: localhost + + +- name: Prepare Cloudera Manager settings - User Admin + template: + src: "externalUserMappingsUser.j2" + dest: "{{ tmp_dir }}/externalUserMappingsUser.json" + mode: 0777 + delegate_to: localhost + + +- name: Update Cloudera Manager settings - Cluster Admin + uri: + url: "{{ cm_api_url }}/externalUserMappings" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/externalUserMappingsClu.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: scm_resp_map + delegate_to: localhost + + +- name: Update Cloudera Manager settings - Full Admin + uri: + url: "{{ cm_api_url }}/externalUserMappings" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/externalUserMappingsFull.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: scm_resp_map + delegate_to: localhost + +- name: Update Cloudera Manager settings - Key Admin + uri: + url: "{{ cm_api_url }}/externalUserMappings" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/externalUserMappingsKey.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: scm_resp_map + delegate_to: localhost + +- name: Update Cloudera Manager settings - Operator + uri: + url: "{{ cm_api_url }}/externalUserMappings" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/externalUserMappingsOp.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: scm_resp_map + delegate_to: localhost + +- name: Update Cloudera Manager settings - Read Only + uri: + url: "{{ cm_api_url }}/externalUserMappings" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/externalUserMappingsRO.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: scm_resp_map + delegate_to: localhost + +- name: Update Cloudera Manager settings - User Admin + uri: + url: "{{ cm_api_url }}/externalUserMappings" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/externalUserMappingsUser.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: scm_resp_map + delegate_to: localhost + + +- file: + path: "{{ tmp_dir }}/externalUserMappingsClu.json" + state: absent + delegate_to: localhost + +- file: + path: "{{ tmp_dir }}/externalUserMappingsFull.json" + state: absent + delegate_to: localhost + +- file: + path: "{{ tmp_dir }}/externalUserMappingsKey.json" + state: absent + delegate_to: localhost + +- file: + path: "{{ tmp_dir }}/externalUserMappingsOp.json" + state: absent + delegate_to: localhost + +- file: + path: "{{ tmp_dir }}/externalUserMappingsRO.json" + state: absent + delegate_to: localhost + +- file: + path: "{{ tmp_dir }}/externalUserMappingsUser.json" + state: absent + delegate_to: localhost diff --git a/roles/cm_roles/templates/externalUserMappings.j2 b/roles/cm_roles/templates/externalUserMappings.j2 new file mode 100644 index 0000000..bdf176d --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappings.j2 @@ -0,0 +1,27 @@ +{ + "items" : [ { + "authRoles" : [], + "name" : "{{ ldap_rdom.cm_clu_admin }}", + "type" : "LDAP" + }, { + "authRoles" : [], + "name" : "{{ ldap_rdom.cm_full_admin }}", + "type" : "LDAP" + }, { + "authRoles" : [], + "name" : "{{ ldap_rdom.cm_key_admin }}", + "type" : "LDAP" + }, { + "authRoles" : [], + "name" : "{{ ldap_rdom.cm_operator }}", + "type" : "LDAP" + }, { + "authRoles" : [], + "name" : "{{ ldap_rdom.cm_read_only }}", + "type" : "LDAP" + }, { + "authRoles" : [], + "name" : "{{ ldap_rdom.cm_user_admin }}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_roles/templates/externalUserMappingsClu.j2 b/roles/cm_roles/templates/externalUserMappingsClu.j2 new file mode 100644 index 0000000..9b1672f --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappingsClu.j2 @@ -0,0 +1,10 @@ +{ + "items" : [ { + "authRoles" : [ { + "displayName" : "Cluster Administrator", + "name" : "ROLE_CLUSTER_ADMIN", + } ], + "name" : "{{ ldap_rdom.cm_clu_admin}}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_roles/templates/externalUserMappingsFull.j2 b/roles/cm_roles/templates/externalUserMappingsFull.j2 new file mode 100644 index 0000000..a02e400 --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappingsFull.j2 @@ -0,0 +1,10 @@ +{ + "items" : [ { + "authRoles" : [ { + "displayName" : "Full Administrator", + "name" : "ROLE_ADMIN", + } ], + "name" : "{{ ldap_rdom.cm_full_admin}}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_roles/templates/externalUserMappingsKey.j2 b/roles/cm_roles/templates/externalUserMappingsKey.j2 new file mode 100644 index 0000000..aeab46c --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappingsKey.j2 @@ -0,0 +1,10 @@ +{ + "items" : [ { + "authRoles" : [ { + "displayName" : "Key Administrator", + "name" : "ROLE_KEY_ADMIN", + } ], + "name" : "{{ ldap_rdom.cm_key_admin}}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_roles/templates/externalUserMappingsOp.j2 b/roles/cm_roles/templates/externalUserMappingsOp.j2 new file mode 100644 index 0000000..bcc9b38 --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappingsOp.j2 @@ -0,0 +1,10 @@ +{ + "items" : [ { + "authRoles" : [ { + "displayName" : "Operator", + "name" : "ROLE_OPERATOR", + } ], + "name" : "{{ ldap_rdom.cm_operator }}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_roles/templates/externalUserMappingsRO.j2 b/roles/cm_roles/templates/externalUserMappingsRO.j2 new file mode 100644 index 0000000..669df0e --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappingsRO.j2 @@ -0,0 +1,10 @@ +{ + "items" : [ { + "authRoles" : [ { + "displayName" : "Read-Only", + "name" : "ROLE_USER", + } ], + "name" : "{{ ldap_rdom.cm_read_only }}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_roles/templates/externalUserMappingsUser.j2 b/roles/cm_roles/templates/externalUserMappingsUser.j2 new file mode 100644 index 0000000..50dd18e --- /dev/null +++ b/roles/cm_roles/templates/externalUserMappingsUser.j2 @@ -0,0 +1,10 @@ +{ + "items" : [ { + "authRoles" : [ { + "displayName" : "User Administrator", + "name" : "ROLE_USER_ADMIN", + } ], + "name" : "{{ ldap_rdom.cm_user_admin }}", + "type" : "LDAP" + } ] +} diff --git a/roles/cm_server_teardown/tasks/main.yml b/roles/cm_server_teardown/tasks/main.yml new file mode 100644 index 0000000..9ecce7a --- /dev/null +++ b/roles/cm_server_teardown/tasks/main.yml @@ -0,0 +1,48 @@ +--- + +- name: Stop SCM server + systemd: + name: cloudera-scm-server + state: stopped + +- name: Delete NAV MS Data Dir + file: + path: "{{ navms_data_dir }}" + state: absent + ignore_errors: true + +- name: Delete Eventserver Data Dir + file: + path: "{{ eventserver_index_dir }}" + state: absent + ignore_errors: true + +- name: Delete HMON Data Dir + file: + path: "{{ hmon_firehose_storage_dir }}" + state: absent + ignore_errors: true + +- name: Delete Headlamp Scratch Data Dir + file: + path: "{{ headlamp_scratch_dir }}" + state: absent + ignore_errors: true + +- name: Delete SMON Firehouse Dir + file: + path: "{{ smon_firehose_storage_dir }}" + state: absent + ignore_errors: true + +- name: Delete CM Server Dir + file: + path: "/var/lib/cloudera-scm-server" + state: absent + ignore_errors: true + +- name: Delete navigator audit stream directory + file: + path: "{{ nav_auditstream_directory }}" + state: absent + ignore_errors: true diff --git a/roles/db_connector/tasks/main.yml b/roles/db_connector/tasks/main.yml new file mode 100644 index 0000000..bc99acc --- /dev/null +++ b/roles/db_connector/tasks/main.yml @@ -0,0 +1,24 @@ +--- + +#- name: Install MySQL JDBC Connector +# yum: name=mysql-connector-java state=installed + +- name: Create usr share java + file: path=/usr/share/java state=directory owner=root group=root mode=0755 + + +- name: Copy MySQL connector + copy: + src: mysql-connector-java-8.0.13.jar + dest: /usr/share/java/mysql-connector-java.jar + owner: root + group: root + mode: '0755' + +- name: Copy Oracle connector + copy: + src: ojdbc8.jar + dest: /usr/share/java/oracle-connector-java.jar + owner: root + group: root + mode: '0755' diff --git a/roles/db_teardown/tasks/main.yml b/roles/db_teardown/tasks/main.yml new file mode 100644 index 0000000..90b7093 --- /dev/null +++ b/roles/db_teardown/tasks/main.yml @@ -0,0 +1,30 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + +- name: Drop amon database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=amon state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop hue database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=hue state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop hive metastore database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=metastore state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop nav database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=nav state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop navms database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=navms state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop oozie database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=oozie state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop rman database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=rman state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop sentry database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=sentry state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + + + diff --git a/roles/db_teardown_mysql_cdh/tasks/main.yml b/roles/db_teardown_mysql_cdh/tasks/main.yml new file mode 100644 index 0000000..90b7093 --- /dev/null +++ b/roles/db_teardown_mysql_cdh/tasks/main.yml @@ -0,0 +1,30 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + +- name: Drop amon database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=amon state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop hue database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=hue state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop hive metastore database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=metastore state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop nav database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=nav state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop navms database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=navms state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop oozie database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=oozie state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop rman database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=rman state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + +- name: Drop sentry database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=sentry state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} + + + diff --git a/roles/db_teardown_mysql_cm/tasks/main.yml b/roles/db_teardown_mysql_cm/tasks/main.yml new file mode 100644 index 0000000..9f27725 --- /dev/null +++ b/roles/db_teardown_mysql_cm/tasks/main.yml @@ -0,0 +1,6 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + +- name: Drop Cloudera Manager database + mysql_db: login_user=root login_password={{ mysql_root_password }} name=scm state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} diff --git a/roles/db_teardown_oracle_cdh/files/drop.sql b/roles/db_teardown_oracle_cdh/files/drop.sql new file mode 100644 index 0000000..c6e8199 --- /dev/null +++ b/roles/db_teardown_oracle_cdh/files/drop.sql @@ -0,0 +1,22 @@ +SET SERVEROUTPUT on +DECLARE + +BEGIN + FOR cur_rec IN (SELECT object_name, object_type + FROM user_objects + WHERE object_type IN ('TABLE', 'VIEW', 'PACKAGE', 'PROCEDURE', 'FUNCTION', 'SEQUENCE', 'SYNONYM', 'TRIGGER')) + LOOP + BEGIN + IF cur_rec.object_type = 'TABLE' THEN + EXECUTE IMMEDIATE 'DROP ' || cur_rec.object_type || ' "' || cur_rec.object_name || '" CASCADE CONSTRAINTS'; + ELSE + EXECUTE IMMEDIATE 'DROP ' || cur_rec.object_type || ' "' || cur_rec.object_name || '"'; + END IF; + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.put_line('ERROR: DROP ' || cur_rec.object_type || ' ' || cur_rec.object_name ); + END; + END LOOP; +END; +/ +exit diff --git a/roles/db_teardown_oracle_cdh/tasks/main.yml b/roles/db_teardown_oracle_cdh/tasks/main.yml new file mode 100644 index 0000000..7e62d01 --- /dev/null +++ b/roles/db_teardown_oracle_cdh/tasks/main.yml @@ -0,0 +1,36 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + +- name: Oracle drop object PL/SQL + copy: + backup: no + src: drop.sql + dest: "{{ tmp_dir }}/drop.sql" + owner: root + group: root + mode: '0600' + +- name: Drop amon database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.amon.user }}/{{ databases.amon.pass }}@{{ databases.amon.host }}:{{ databases.amon.port }}/{{ databases.amon.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop hue database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.hue.user }}/{{ databases.hue.pass }}@{{ databases.hue.host }}:{{ databases.hue.port }}/{{ databases.hue.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop hive metastore database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.metastore.user }}/{{ databases.metastore.pass }}@{{ databases.metastore.host }}:{{ databases.metastore.port }}/{{ databases.metastore.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop nav database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.nav.user }}/{{ databases.nav.pass }}@{{ databases.nav.host }}:{{ databases.nav.port }}/{{ databases.nav.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop navms database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.navms.user }}/{{ databases.navms.pass }}@{{ databases.navms.host }}:{{ databases.navms.port }}/{{ databases.navms.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop oozie database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.oozie.user }}/{{ databases.oozie.pass }}@{{ databases.oozie.host }}:{{ databases.oozie.port }}/{{ databases.oozie.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop rman database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.rman.user }}/{{ databases.rman.pass }}@{{ databases.rman.host }}:{{ databases.rman.port }}/{{ databases.rman.name }} @{{ tmp_dir }}/drop.sql" + +- name: Drop sentry database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.sentry.user }}/{{ databases.sentry.pass }}@{{ databases.sentry.host }}:{{ databases.sentry.port }}/{{ databases.sentry.name }} @{{ tmp_dir }}/drop.sql" diff --git a/roles/db_teardown_oracle_cm/files/drop.sql b/roles/db_teardown_oracle_cm/files/drop.sql new file mode 100644 index 0000000..9bcee91 --- /dev/null +++ b/roles/db_teardown_oracle_cm/files/drop.sql @@ -0,0 +1,22 @@ +SET SERVEROUTPUT on +DECLARE + +BEGIN + FOR cur_rec IN (SELECT object_name, object_type + FROM user_objects + WHERE object_type IN ('TABLE', 'VIEW', 'PACKAGE', 'PROCEDURE', 'FUNCTION', 'SEQUENCE', 'SYNONYM')) + LOOP + BEGIN + IF cur_rec.object_type = 'TABLE' THEN + EXECUTE IMMEDIATE 'DROP ' || cur_rec.object_type || ' "' || cur_rec.object_name || '" CASCADE CONSTRAINTS'; + ELSE + EXECUTE IMMEDIATE 'DROP ' || cur_rec.object_type || ' "' || cur_rec.object_name || '"'; + END IF; + EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.put_line('ERROR: DROP ' || cur_rec.object_type || ' ' || cur_rec.object_name ); + END; + END LOOP; +END; +/ +exit diff --git a/roles/db_teardown_oracle_cm/tasks/main.yml b/roles/db_teardown_oracle_cm/tasks/main.yml new file mode 100644 index 0000000..e633f98 --- /dev/null +++ b/roles/db_teardown_oracle_cm/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + +- name: Oracle drop object PL/SQL + copy: + backup: no + src: drop.sql + dest: "{{ tmp_dir }}/drop.sql" + owner: root + group: root + mode: '0600' + +- name: Drop Cloudera Manager database + shell: "export LD_LIBRARY_PATH=/usr/lib/oracle/12.2/client64/lib; /usr/lib/oracle/12.2/client64/bin/sqlplus {{ databases.scm.user }}/{{ databases.scm.pass }}@{{ databases.scm.host }}:{{ databases.scm.port }}/{{ databases.scm.name }} @{{ tmp_dir }}/drop.sql" diff --git a/roles/dir_teardown/tasks/main.yml b/roles/dir_teardown/tasks/main.yml new file mode 100644 index 0000000..7bf2303 --- /dev/null +++ b/roles/dir_teardown/tasks/main.yml @@ -0,0 +1,44 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" + +#- debug: var=cdh_services +#- debug: var="{{ cdh_services[0].dfs_data_dir_list }}" + +- name: DFS Data Dir Delete + shell: rm /data/*/dfs/ -rf + + +- name: SNN Dir Delete + file: + path: "{{ cdh_services[0].fs_checkpoint_dir_list }}" + state: absent + ignore_errors: true + +- name: NN Dir Delete + file: + path: "{{ cdh_services[0].dfs_name_dir_list }}" + state: absent + ignore_errors: true + +- name: JN Edits Dir + file: + path: "{{ cdh_services[0].dfs_journalnode_edits_dir }}" + state: absent + ignore_errors: true + +- name: ZK Data Dirs + file: + path: "/data/3/zookeeper" + state: absent + + +- name: ZK Edits Dirs + file: + path: "/data/4/zookeeper" + state: absent + +- name: Kafka Dir + file: + path: "/var/local/kafka/data" + state: absent diff --git a/roles/dn_dir_creation/tasks/main.yml b/roles/dn_dir_creation/tasks/main.yml new file mode 100644 index 0000000..fe7cec3 --- /dev/null +++ b/roles/dn_dir_creation/tasks/main.yml @@ -0,0 +1,37 @@ +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" + +- name: Permission data dirs on Worker Nodes + file: + path: "{{ item }}" + owner: root + group: root + mode: '0755' + when: scm_version[0] == "5" + with_items: + - /data + - /data/1 + - /data/2 + - /data/3 + - /data/4 + - /data/5 + - /data/6 + - /data/7 + - /data/8 + +- name: Create DFS dirs + file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: '0755' + when: scm_version[0] == "5" + with_items: + - /data/1/dfs + - /data/2/dfs + - /data/3/dfs + - /data/4/dfs + - /data/5/dfs + - /data/6/dfs + - /data/7/dfs + - /data/8/dfs diff --git a/roles/dn_dir_teardown/tasks/main.yml b/roles/dn_dir_teardown/tasks/main.yml new file mode 100644 index 0000000..93121b5 --- /dev/null +++ b/roles/dn_dir_teardown/tasks/main.yml @@ -0,0 +1,4 @@ +--- + +- name: DFS Data Dir Delete + shell: rm /data/*/dfs/ -rf diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml new file mode 100644 index 0000000..cbc980b --- /dev/null +++ b/roles/haproxy/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" + +- name: Install HAProxy + yum: name=haproxy state=latest + delegate_to: "{{ item }}" + with_items: "{{ groups['haproxy'] }}" + +- name: Set haproxy.cfg + template: src=haproxy.j2 dest=/etc/haproxy/haproxy.cfg backup=yes + delegate_to: "{{ item }}" + with_items: "{{ groups['haproxy'] }}" + +- name: Create combined key and pem + shell: "cat /opt/cloudera/security/x509/localhost.pem /opt/cloudera/security/x509/localhost.key | sed -ne '/-BEGIN/,/-END/p' > /etc/haproxy/combinedKeyAndCert.pem" + +- name: Set combined key and pem permissions + file: + path: /etc/haproxy/combinedKeyAndCert.pem + owner: haproxy + group: haproxy + mode: 0400 + +- name: Enable HAProxy + service: + name: haproxy + enabled: yes + state: restarted + +- name: Save template to home dir + shell: "cp /etc/haproxy/haproxy.cfg {{ tmp_dir }}/haproxy.cfg.bak" + diff --git a/roles/haproxy/templates/haproxy.j2 b/roles/haproxy/templates/haproxy.j2 new file mode 100644 index 0000000..d26f524 --- /dev/null +++ b/roles/haproxy/templates/haproxy.j2 @@ -0,0 +1,138 @@ +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + # to have these messages end up in /var/log/haproxy.log you will + # need to: + # + # 1) configure syslog to accept network log events. This is done + # by adding the '-r' option to the SYSLOGD_OPTIONS in + # /etc/sysconfig/syslog + # + # 2) configure local2 events to go to the /var/log/haproxy.log + # file. A line like the following can be added to + # /etc/sysconfig/syslog + # + # local2.* /var/log/haproxy.log + # + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + tune.ssl.default-dh-param 2048 + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +# +# increased "timeout client" and "timeout server" from 5m to 30m, see: CLCL-1410 +# +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 5m + timeout connect 10s + timeout client 30m + timeout server 30m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +frontend ft_solrsec + bind 0.0.0.0:8985 name https ssl crt /etc/haproxy/combinedKeyAndCert.pem no-sslv3 no-tlsv10 no-tlsv11 + mode http + option tcplog + log global + option forwardfor except 127.0.0.0/8 + default_backend bk_solr + +backend bk_solr + mode http + balance roundrobin + {% for host in groups['worker_servers'] %} + server solr{{ host }} {{ host }}:8985 ssl ca-file {{ tls.cert_chain }} + {% endfor %} + +# load balance the port shared by HiveServer2 and odbc version 2 and jdbc + +frontend ft_impala-thrift-sec + bind 0.0.0.0:25004 ssl crt /etc/haproxy/combinedKeyAndCert.pem no-sslv3 no-tlsv10 no-tlsv11 + mode tcp + option tcplog + timeout client 1h + log global + default_backend bk_impalathrift + +backend bk_impalathrift + mode tcp + balance source + timeout server 1h + {% for host in groups['worker_servers'] %} + server impala{{ host }} {{ host }}:21050 ssl ca-file {{ tls.cert_chain }} + {% endfor %} + +# load balance the port shared by impala-shell and odbc version 1 + +frontend ft_impala-shell-sec + bind 0.0.0.0:25003 ssl crt /etc/haproxy/combinedKeyAndCert.pem no-sslv3 + mode tcp + option tcplog + timeout client 1h + log global + default_backend bk_impalashell + +backend bk_impalashell + mode tcp + balance source + timeout server 1h + {% for host in groups['worker_servers'] %} + server impala2{{ host }} {{ host }}:21000 ssl ca-file {{ tls.cert_chain }} + {% endfor %} + +frontend ft_hs2-sec + bind 0.0.0.0:10000 ssl crt /etc/haproxy/combinedKeyAndCert.pem no-sslv3 no-tlsv10 no-tlsv11 + mode tcp + option tcplog + timeout client 12h + log global + default_backend bk_hs2 + +backend bk_hs2 + mode tcp + balance source + timeout server 12h + {% for host in groups['gatewaylb_servers'] %} + server hive{{ host }} {{ host }}:10000 ssl ca-file {{ tls.cert_chain }} + {% endfor %} + +# The Cloudera documentation recommends using pass-through TLS. We're not doing that though +# as we don't think it is neccesary. +frontend ft_oozie-sec + bind 0.0.0.0:11443 name https ssl crt /etc/haproxy/combinedKeyAndCert.pem no-sslv3 no-tlsv10 no-tlsv11 + mode http + option tcplog + log global + option forwardfor except 127.0.0.0/8 + default_backend bk_oozie + +backend bk_oozie + mode http + balance roundrobin + {% for host in groups['gatewaylb_servers'] %} + server oozie{{ host }} {{ host }}:{{ cdh_services | json_query('[?type==`oozie`].oozie_https_port') | first }} ssl ca-file {{ tls.cert_chain }} + {% endfor %} + diff --git a/roles/haproxy_teardown/tasks/main.yml b/roles/haproxy_teardown/tasks/main.yml new file mode 100644 index 0000000..269a6b1 --- /dev/null +++ b/roles/haproxy_teardown/tasks/main.yml @@ -0,0 +1,21 @@ +- name: Stop HAProxy + service: + name: haproxy + state: stopped + ignore_errors: true + +- name: Delete combined key and pem + file: + path: /etc/haproxy/combinedKeyAndCert.pem + state: absent + +- name: Delete HAProxy config + file: + path: /etc/haproxy/haproxy.cfg + state: absent + +- name: Install HAProxy + yum: + name: haproxy + state: absent + ignore_errors: true diff --git a/roles/java/tasks/main.yml b/roles/java/tasks/main.yml index c08ad99..6388215 100644 --- a/roles/java/tasks/main.yml +++ b/roles/java/tasks/main.yml @@ -3,44 +3,50 @@ - name: Install Oracle JDK yum: name={{ item }} state=latest update_cache=yes with_items: - - oracle-j2sdk1.7 + - jdk1.8 - unzip -- stat: path="{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" - register: jce_zip_exists - -- name: Download JCE unlimited policy - get_url: - url=http://download.oracle.com/otn-pub/java/jce/7/UnlimitedJCEPolicyJDK7.zip - dest="{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" - headers="Cookie:oraclelicense=accept-securebackup-cookie" - when: jce_zip_exists.stat.exists == False - -- name: Unzip JCE unlimited policy files - unarchive: - src: "{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" - dest: "{{ tmp_dir }}" - copy: no - -- name: Install local_policy.jar - copy: - src: "{{ tmp_dir }}/UnlimitedJCEPolicy/local_policy.jar" - dest: /usr/java/jdk1.7.0_67-cloudera/jre/lib/security/local_policy.jar - backup: yes - remote_src: True - -- name: Install US_export_policy.jar - copy: - src: "{{ tmp_dir }}/UnlimitedJCEPolicy/US_export_policy.jar" - dest: /usr/java/jdk1.7.0_67-cloudera/jre/lib/security/US_export_policy.jar - backup: yes - remote_src: True - -- name: Cleanup tmp files - file: - path="{{ tmp_dir }}/{{ item }}" - state=absent - with_items: - - UnlimitedJCEPolicy - - UnlimitedJCEPolicyJDK7.zip - ignore_errors: True \ No newline at end of file +- name: Set JCE unlimited + lineinfile: + path: /usr/java/default/jre/lib/security/java.security + regexp: '^#*\s*crypto\.policy.*limited$' + line: crypto.policy=unlimited + +#- stat: path="{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" +# register: jce_zip_exists + +#- name: Download JCE unlimited policy +# get_url: +# url=http://download.oracle.com/otn-pub/java/jce/7/UnlimitedJCEPolicyJDK7.zip +# dest="{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" +# headers="Cookie:oraclelicense=accept-securebackup-cookie" +# when: jce_zip_exists.stat.exists == False + +#- name: Unzip JCE unlimited policy files +# unarchive: +# src: "{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" +# dest: "{{ tmp_dir }}" +# copy: no + +#- name: Install local_policy.jar +# copy: +# src: "{{ tmp_dir }}/UnlimitedJCEPolicy/local_policy.jar" +# dest: /usr/java/jdk1.7.0_67-cloudera/jre/lib/security/local_policy.jar +# backup: yes +# remote_src: True + +#- name: Install US_export_policy.jar +# copy: +# src: "{{ tmp_dir }}/UnlimitedJCEPolicy/US_export_policy.jar" +# dest: /usr/java/jdk1.7.0_67-cloudera/jre/lib/security/US_export_policy.jar +# backup: yes +# remote_src: True + +#- name: Cleanup tmp files +# file: +# path="{{ tmp_dir }}/{{ item }}" +# state=absent +# with_items: +# - UnlimitedJCEPolicy +# - UnlimitedJCEPolicyJDK7.zip +# ignore_errors: True diff --git a/roles/kafka_dir_teardown/tasks/main.yml b/roles/kafka_dir_teardown/tasks/main.yml new file mode 100644 index 0000000..291dfaf --- /dev/null +++ b/roles/kafka_dir_teardown/tasks/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Kafka Dir + file: + path: "/var/local/kafka/data" + state: absent diff --git a/roles/kms_dir_teardown/tasks/main.yml b/roles/kms_dir_teardown/tasks/main.yml new file mode 100644 index 0000000..818836f --- /dev/null +++ b/roles/kms_dir_teardown/tasks/main.yml @@ -0,0 +1,4 @@ +- name: KMS Data Dir + file: + path: "/var/opt/cloudera/kms-keytrustee" + state: absent diff --git a/roles/kms_encryption_zones/files/kms-default.json b/roles/kms_encryption_zones/files/kms-default.json new file mode 100644 index 0000000..99071fe --- /dev/null +++ b/roles/kms_encryption_zones/files/kms-default.json @@ -0,0 +1,6 @@ +{ + "items" : [ { + "name" : "kms-acls.xml_role_safety_valve", + "value" : "{{ acl_xml }}" + } ] +} diff --git a/roles/kms_encryption_zones/tasks/main.yml b/roles/kms_encryption_zones/tasks/main.yml new file mode 100644 index 0000000..2d1bc8f --- /dev/null +++ b/roles/kms_encryption_zones/tasks/main.yml @@ -0,0 +1,143 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/encryption_zones.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +- set_fact: acl_xml="hadoop.kms.acl.CREATEhdfs hdfshadoop.kms.acl.GET_KEYShdfs hdfswhitelist.key.acl.MANAGEMENThdfs hdfs" + +- name: Prepare default ACLs + local_action: + module: template + src: "kms-default.j2" + dest: "{{ tmp_dir }}/kms_default_configs.json" + mode: 0777 + +- name: Set KMS ACLs + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/keytrustee/roleConfigGroups/keytrustee-KMS_KEYTRUSTEE-BASE/config" + method: PUT + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/kms_default_configs.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + run_once: true + +- name: Refresh KMS + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/keytrustee/commands/restart" + method: POST + status_code: 200 + body_format: json + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: refresh_resp + run_once: true + delegate_to: "{{ hostvars[groups['master_servers'][0]]['inventory_hostname'] }}" + +- debug: var=refresh_resp + +- set_fact: command_id="{{ refresh_resp.json.id }}" + +- name: Wait for refresh + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: refresh_cmd_result + until: refresh_cmd_result.json.active == false + retries: 5 + delay: 30 + run_once: true + +- name: Prepare encryption script + template: + src: "createZones.j2" + dest: "{{ tmp_dir }}/createZones.sh" + mode: 0777 + +- name: Run encryption script + command: "{{ tmp_dir }}/createZones.sh" + run_once: true + register: encryption_script_out + delegate_to: "{{ hostvars[groups['master_servers'][0]]['inventory_hostname'] }}" + +- debug: var=encryption_script_out + +- name: Create KMS ACLs + local_action: + module: template + src: "kmsACLs.j2" + dest: "{{ tmp_dir }}/kmsACLs.json" + mode: 0777 + +- name: Set KMS ACLs + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/keytrustee/roleConfigGroups/keytrustee-KMS_KEYTRUSTEE-BASE/config" + method: PUT + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/kmsACLs.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + run_once: true + +- name: Restart Cluster + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/commands/restart" + method: POST + status_code: 200 + body_format: json + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: restart_resp + run_once: true + delegate_to: "{{ hostvars[groups['master_servers'][0]]['inventory_hostname'] }}" + + +- set_fact: command_id="{{ restart_resp.json.id }}" + +- name: Wait for refresh + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: restart_cmd_result + until: restart_cmd_result.json.active == false + retries: 25 + delay: 30 + run_once: true + diff --git a/roles/kms_encryption_zones/templates/createZones.j2 b/roles/kms_encryption_zones/templates/createZones.j2 new file mode 100644 index 0000000..8f9bcc0 --- /dev/null +++ b/roles/kms_encryption_zones/templates/createZones.j2 @@ -0,0 +1,32 @@ +#!/bin/bash + +KEYTAB=$(ls -1dt /var/run/cloudera-scm-agent/process/*-hdfs-NAMENODE | head -1)/hdfs.keytab +PRINC_NAME=$(klist -kt ${KEYTAB} | grep hdfs | awk '{print $4;}' | tail -1) + +kinit -kt ${KEYTAB} ${PRINC_NAME} + +{% for enc in encryption_keys %} +hadoop key create {{ enc.key.keyname }} -size 256 +{% endfor %} + +{% for enc in encryption_zones %} +if [[ ! $(hdfs crypto -listZones | grep {{ enc.zone.path }}) ]] +then + #hdfs dfs -rm -r {{ enc.zone.path }} + hdfs dfs -mv {{ enc.zone.path }} {{ enc.zone.path }}-tmp + hdfs dfs -mkdir {{ enc.zone.path }} + + hdfs dfs -chown {{ enc.zone.user }}:{{ enc.zone.group }} {{ enc.zone.path }} + hdfs dfs -chmod {{ enc.zone.mode }} {{ enc.zone.path }} + + hdfs crypto -createZone -keyName {{ enc.zone.key }} -path {{ enc.zone.path }} + + hdfs dfs -cp -p {{ enc.zone.path }}-tmp/* {{ enc.zone.path }} + hdfs dfs -rm -r {{ enc.zone.path }}-tmp +else + echo {{ enc.zone.path}} exists... skipping. +fi + +{% endfor %} + +kdestroy diff --git a/roles/kms_encryption_zones/templates/kms-default.j2 b/roles/kms_encryption_zones/templates/kms-default.j2 new file mode 100644 index 0000000..99071fe --- /dev/null +++ b/roles/kms_encryption_zones/templates/kms-default.j2 @@ -0,0 +1,6 @@ +{ + "items" : [ { + "name" : "kms-acls.xml_role_safety_valve", + "value" : "{{ acl_xml }}" + } ] +} diff --git a/roles/kms_encryption_zones/templates/kmsACLs.j2 b/roles/kms_encryption_zones/templates/kmsACLs.j2 new file mode 100644 index 0000000..307674a --- /dev/null +++ b/roles/kms_encryption_zones/templates/kmsACLs.j2 @@ -0,0 +1,7 @@ +{ + "items": [ { + "name" : "kms-acls.xml_role_safety_valve", + "value" : "hadoop.kms.acl.CREATEnobody {{ keyAdminGroup }}hadoop.kms.acl.DELETEnobody {{ keyAdminGroup }}hadoop.kms.acl.ROLLOVERnobody {{ keyAdminGroup }}hadoop.kms.acl.GEThadoop.kms.acl.GET_KEYSnobody {{ keyAdminGroup }}hadoop.kms.acl.GET_METADATA*hadoop.kms.acl.SET_KEY_MATERIALhadoop.kms.acl.GENERATE_EEKhdfs {{ cdh_services | json_query('[?type==`hdfs`].hdfs_supergroup | first') }}hadoop.kms.blacklist.CREATEhdfs {{ cdh_services | json_query('[?type==`hdfs`].hdfs_supergroup | first') }}hadoop.kms.blacklist.DELETEhdfs {{ cdh_services | json_query('[?type==`hdfs`].hdfs_supergroup | first') }}hadoop.kms.blacklist.ROLLOVERhdfs {{ cdh_services | json_query('[?type==`hdfs`].hdfs_supergroup | first') }}hadoop.kms.blacklist.GET*hadoop.kms.blacklist.GET_KEYShadoop.kms.blacklist.SET_KEY_MATERIAL*hadoop.kms.blacklist.DECRYPT_EEKhdfs keytrustee.kms.acl.UNDELETEkeytrustee.kms.acl.PURGEdefault.key.acl.MANAGEMENTdefault.key.acl.GENERATE_EEKdefault.key.acl.DECRYPT_EEKdefault.key.acl.READwhitelist.key.acl.MANAGEMENTnobody {{ keyAdminGroup }}whitelist.key.acl.READhdfs {{ cdh_services | json_query('[?type==`hdfs`].hdfs_supergroup | first') }}whitelist.key.acl.GENERATE_EEKhdfs {{ cdh_services | json_query('[?type==`hdfs`].hdfs_supergroup | first') }}whitelist.key.acl.DECRYPT_EEKnobody keytrustee.kms.acl.PURGEkeytrustee.kms.acl.UNDELETE{% for enc in encryption_keys %} +key.acl.{{ enc.key.keyname }}.DECRYPT_EEK{{ enc.key.acl }}{% endfor %}" + } ] +} diff --git a/roles/kms_key_sync/tasks/main.yml b/roles/kms_key_sync/tasks/main.yml new file mode 100644 index 0000000..11cbe5e --- /dev/null +++ b/roles/kms_key_sync/tasks/main.yml @@ -0,0 +1,126 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" + +- name: Prepare local directories for kms files + local_action: + module: file + path: "/tmp/kms/" + owner: root + group: root + state: directory + mode: 0777 + recurse: yes + + +- name: Fetch key files from Active KMS + fetch: + src: "{{ kms_key_dir }}/keytrustee/.keytrustee/{{ item }}" + dest: "/tmp/kms/" + fail_on_missing: no + delegate_to: "{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}" + with_items: + - gpg.conf + - keytrustee.conf + - logging.conf + - pubring.gpg + - pubring.gpg~ + - random_seed + - secring.gpg + - trustdb.gpg + +- name: Copy key files to passive KMS + copy: + src: "/tmp/kms/{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}{{ kms_key_dir }}/keytrustee/.keytrustee" + dest: "{{ kms_key_dir }}/keytrustee/" + owner: keytrustee-kms + group: keytrustee-kms + mode: '0600' + +- name: Delete local directories for kts files + local_action: + module: file + path: "/tmp/kms/" + owner: root + group: root + state: absent + run_once: true + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +- name: Restart Cluster to pick up KMS changes + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/commands/restart" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + body_format: "json" + return_content: yes + register: cluster_restart_resp + run_once: true + +- debug: var=cluster_restart_resp + +- set_fact: command_id="{{ cluster_restart_resp.json.id }}" + +- name: Wait for service to start + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_start_result + until: cluster_start_result.json.active == false + retries: 25 + delay: 30 + run_once: true + +- name: Deploy Cluster Client Config + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/commands/deployClientConfig" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + body_format: "json" + return_content: yes + register: cluster_conf_resp + run_once: true + +- set_fact: command_id="{{ cluster_conf_resp.json.id }}" + +- name: Wait for service to start + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_conf_result + until: cluster_conf_result.json.active == false + retries: 25 + delay: 30 + run_once: true + diff --git a/roles/kts/tasks/main.yml b/roles/kts/tasks/main.yml new file mode 100644 index 0000000..c72cac1 --- /dev/null +++ b/roles/kts/tasks/main.yml @@ -0,0 +1,165 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/all" +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/kts_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/krb5_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ldap_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +- name: Check whether cluster exists + uri: + url: "{{ cm_api_url }}/cm/service" + method: GET + status_code: 200,404 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: clusters_resp + +- debug: var=clusters_resp + +- set_fact: cms_started={{ 'True' if clusters_resp.json.serviceState == "STARTED" else 'False' }} + +- name: Start CMS cluster + uri: + url: "{{ cm_api_url }}/cm/service/commands/start" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: template_resp + when: cms_started == False + +- debug: var=template_resp + +- set_fact: command_id="{{ template_resp.json.id }}" + when: cms_started == False + +# https://cloudera.github.io/cm_api/apidocs/v13/path__commands.html +- name: Wait for cluster to start + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_start_result + until: cluster_start_result.json.active == false + retries: 5 + delay: 30 + when: cms_started == False + +- name: Check whether cluster exists + uri: + url: "{{ cm_api_url }}/clusters/{{ kts_display_name }}" + method: GET + status_code: 200,404 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: clusters_resp + +- set_fact: cluster_exists={{ 'True' if clusters_resp.status == 200 else 'False' }} +- debug: msg="Cluster '{{ kts_display_name }}' exists - {{ cluster_exists }}" + + +# https://www.cloudera.com/documentation/enterprise/latest/topics/install_cluster_template.html +- name: Prepare cluster template + template: + src: "kts.j2" + dest: "{{ tmp_dir }}/kts.json" + mode: 0777 + when: + - cluster_exists == False + +#- debug: var= "{{ lookup('file', ''+ tmp_dir + '/cluster.json') }}" + +# https://cloudera.github.io/cm_api/apidocs/v13/path__cm_importClusterTemplate.html +- name: Import cluster template + uri: + url: "{{ cm_api_url }}/cm/importClusterTemplate?addRepositories=true" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/kts.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: template_resp + when: + - cluster_exists == False + +- debug: var=template_resp + when: + - cluster_exists == False + +- set_fact: command_id="{{ template_resp.json.id }}" + when: + - cluster_exists == False + +- debug: msg="Import cluster template command ID - {{ command_id }}" + when: + - cluster_exists == False + +- debug: msg="Login to Cloudera Manager to monitor import progress - http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}/cmf/commands/commands" + when: + - cluster_exists == False + +- name: Save template to home dir + shell: cp "{{ tmp_dir }}"/kts.json /tmp/kts.json.backup + ignore_errors: True + +- file: + path: "{{ tmp_dir }}/kts.json" + state: absent + ignore_errors: True + +- name: Wait for first run wizard to complete + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: frw_result + until: frw_result.json.active == false + retries: 25 + delay: 60 + when: + - cluster_exists == False + +- name: Create temp dir for Fetching Keys + file: + path: /tmp/kts + state: directory + owner: root + group: root + mode: '0644' diff --git a/roles/kts/templates/kts.j2 b/roles/kts/templates/kts.j2 new file mode 100644 index 0000000..0866b47 --- /dev/null +++ b/roles/kts/templates/kts.j2 @@ -0,0 +1,115 @@ +{ + "cdhVersion" : "6.0.0", + "displayName" : "{{ kts_display_name }}", + "cmVersion" : "{{ scm_version }}", + "products" : [ {% set prod_j = joiner(",") %} + {% for product in kts_products %} + {{ prod_j() }} + { + "version" : "{{ product['version'] }}", + "product" : "{{ product['product'] }}" + } + {% endfor %} + ], + "services" : [ { + "refName" : "keytrustee_server", + "serviceType" : "KEYTRUSTEE_SERVER", + "serviceConfigs" : [ { + "name" : "keytrustee.home", + "variable" : "KEYTRUSTEE_SERVER_KEYTRUSTEE_HOME" + } ], + "roleConfigGroups" : [ { + "refName" : "keytrustee_server-KEYTRUSTEE_ACTIVE_SERVER-BASE", + "roleType" : "KEYTRUSTEE_ACTIVE_SERVER", + "configs" : [ { + "name" : "ssl_server_ca_certificate_location", + "value" : "{{ tls.cert_chain }}" + }, { + "name" : "ssl_server_privatekey_location", + "value" : "{{ tls.private_key }}" + }, { + "name" : "ssl_server_privatekey_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_server_certificate_location", + "value" : "{{ tls.tls_cert }}" + } ], + "base" : true + }, { + "refName" : "keytrustee_server-KEYTRUSTEE_PASSIVE_SERVER-BASE", + "roleType" : "KEYTRUSTEE_PASSIVE_SERVER", + "configs" : [ { + "name" : "ssl_server_ca_certificate_location", + "value" : "{{ tls.cert_chain }}" + }, { + "name" : "ssl_server_privatekey_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_server_privatekey_location", + "value" : "{{ tls.private_key }}" + }, { + "name" : "ssl_server_certificate_location", + "value" : "{{ tls.tls_cert }}" + } ], + "base" : true + }, { + "refName" : "keytrustee_server-DB_ACTIVE-BASE", + "roleType" : "DB_ACTIVE", + "configs" : [ { + "name" : "db_root", + "variable" : "KEYTRUSTEE_SERVER_DB_ACTIVE_BASE_DB_ROOT" + } ], + "base" : true + }, { + "refName" : "keytrustee_server-DB_PASSIVE-BASE", + "roleType" : "DB_PASSIVE", + "configs" : [ { + "name" : "db_root", + "variable" : "KEYTRUSTEE_SERVER_DB_PASSIVE_BASE_DB_ROOT" + } ], + "base" : true + } ] + } ], + "hostTemplates" : [ { + "refName" : "KeyTrusteeActive", + "cardinality" : 0, + "roleConfigGroupsRefNames" : [ "keytrustee_server-DB_ACTIVE-BASE", "keytrustee_server-KEYTRUSTEE_ACTIVE_SERVER-BASE" ] + }, { + "refName" : "KeyTrusteePassive", + "cardinality" : 0, + "roleConfigGroupsRefNames" : [ "keytrustee_server-DB_PASSIVE-BASE", "keytrustee_server-KEYTRUSTEE_PASSIVE_SERVER-BASE" ] + } ], + "instantiator" : { + "clusterName" : "{{ kts_display_name }}", + "hosts" : [ + {% set host_joiner = joiner(",") %} + {% for host in groups['kts_servers'] %} + {% if 'host_template' in hostvars[host] %} + {{ host_joiner() }} + { + "hostName" : "{{ host }}", + "hostTemplateRefName" : "{{ hostvars[host]['host_template'] }}" + {% if 'role_ref_names' in hostvars[host] %} + ,"roleRefNames" : [ "{{ hostvars[host]['role_ref_names'] }}" ] + {% endif %} + } + {% endif %} + {% endfor %} + ], + "variables" : [ + {% set var_joiner = joiner(",") %} + {% for item in kts_services %} + {% for (k,v) in item.iteritems() %} + {% if not k|lower == 'type' %} + {{ var_joiner() }} + { + "name": "{{ k|upper }}", + "value": "{{ v }}" + } + {% endif %} + {% endfor %} + {% endfor %} + ] +} +} + diff --git a/roles/kts_dir_teardown/tasks/main.yml b/roles/kts_dir_teardown/tasks/main.yml new file mode 100644 index 0000000..c48535d --- /dev/null +++ b/roles/kts_dir_teardown/tasks/main.yml @@ -0,0 +1,4 @@ +- name: KTS Data Dir + file: + path: "/var/opt/cloudera/keytrustee" + state: absent diff --git a/roles/kts_key_sync/tasks/main.yml b/roles/kts_key_sync/tasks/main.yml new file mode 100644 index 0000000..96ce084 --- /dev/null +++ b/roles/kts_key_sync/tasks/main.yml @@ -0,0 +1,351 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/all" +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/kms_servers.yml" + +- name: Set Keytrustee directory permissions + file: + path: /var/opt/cloudera/keytrustee + mode: '0700' + owner: keytrustee + group: keytrustee + state: directory + +- name: Prepare local directories for kts files + local_action: + module: file + path: "/tmp/kts/" + owner: root + group: root + state: directory + mode: 0777 + recurse: yes + +- name: Fetch key files from Active KTS + fetch: + src: "{{ kts_services[0].keytrustee_server_keytrustee_home }}/{{ item }}" + dest: "/tmp/kts/" + fail_on_missing: no + delegate_to: "{{ hostvars[groups['kts_servers'][0]]['inventory_hostname'] }}" + with_items: + - gpg.conf + - keytrustee.conf + - logging.conf + - pubring.gpg + - pubring.gpg~ + - random_seed + - secring.gpg + - trustdb.gpg + +- name: Copy key files to passive KTS + copy: + src: "/tmp/kts/{{ hostvars[groups['kts_servers'][0]]['inventory_hostname'] }}{{ kts_services[0].keytrustee_server_keytrustee_home }}/" + dest: "{{ kts_services[0].keytrustee_server_keytrustee_home }}" + owner: keytrustee + group: keytrustee + mode: '0600' + +- name: Delete local directories for kts files + local_action: + module: file + path: "/tmp/kts/" + owner: root + group: root + state: absent + run_once: true + +- name: Permission key files + file: + path: "{{ kts_services[0].keytrustee_server_keytrustee_home }}/logging.conf" + mode: '0644' + +- name: Run ktadmin on Passive KTS + shell: "ktadmin init --confdir {{ kts_services[0].keytrustee_server_keytrustee_home }}/" + register: ktadmin_output + delegate_to: "{{ hostvars[groups['kts_servers'][1]]['inventory_hostname'] }}" + run_once: true + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" + +- name: + uri: + url: "{{ cm_api_url }}/clusters/{{ kts_display_name }}/commands/start" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: kts_resp + run_once: true + +- set_fact: command_id="{{ kts_resp.json.id }}" + +- name: Wait for cluster to start + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_start_result + until: cluster_start_result.json.active == false + retries: 5 + delay: 30 + run_once: true + +- name: Setup KTS Sync Replication HA + uri: + url: "{{ cm_api_url }}/clusters/{{ kts_display_name }}/services/keytrustee_server/commands/SetupSyncReplicationCommand" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: kts_sync_resp + run_once: true + +- set_fact: command_id="{{ kts_sync_resp.json.id }}" + +- name: Wait for sync to complete + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_start_result + until: cluster_start_result.json.active == false + retries: 5 + delay: 30 + run_once: true + +- name: Restart KTS Cluster + uri: + url: "{{ cm_api_url }}/clusters/{{ kts_display_name }}/services/keytrustee_server/commands/restart" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: kts_resp + run_once: true + +- set_fact: command_id="{{ kts_resp.json.id }}" + +- name: Wait for cluster to start + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cluster_start_result + until: cluster_start_result.json.active == false + retries: 5 + delay: 30 + run_once: true + +- name: Add KTS Org + shell: "keytrustee-orgtool add -n {{ kts_org_name }} -c admin@admin.com --confdir {{ kts_services[0].keytrustee_server_keytrustee_home }}/" + delegate_to: "{{ hostvars[groups['kts_servers'][0]]['inventory_hostname'] }}" + run_once: true + register: KTS_org + +- name: Get KTS auth_secret + shell: "keytrustee-orgtool --confdir {{ kts_services[0].keytrustee_server_keytrustee_home }}/ list" + delegate_to: "{{ hostvars[groups['kts_servers'][0]]['inventory_hostname'] }}" + run_once: true + register: KTS_auth + + +- set_fact: + kts_auth_json: "{{ KTS_auth.stdout | from_json }}" + +- debug: var=kts_auth_json +- debug: var=kts_org_name +- set_fact: + kts_auth_secret: "{{ kts_auth_json[kts_org_name].auth_secret }}" + +- debug: var=kts_auth_secret + +- name: Get SCM hostIds for inventory hosts + become: true + action: scm_hosts + register: scm_hosts_result + vars: + use_tls: True + # environment: + # PYTHONPATH: "{{ ansible_env.PYTHONPATH }}:/usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg" + +- set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" + +- set_fact: scm_host_names="{{ scm_hosts_result.host_names }}" + +- name: Prepare hosts template + local_action: + module: template + src: "kmshosts.j2" + dest: "{{ tmp_dir }}/kmshosts.json" + mode: 0777 + +- name: Add KMS Hosts to CDH Cluster + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/hosts/" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/kmshosts.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + run_once: true + register: kmshosts_resp + +- name: Prepare KMS template + local_action: + module: template + src: "kms.j2" + dest: "{{ tmp_dir }}/kms.json" + mode: 0777 + +- name: Create KMS Service + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/" + method: POST + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/kms.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + run_once: true + register: kms_resp + +- name: Prepare KMS Role Config Group template + local_action: + module: template + src: "kmsRCG.j2" + dest: "{{ tmp_dir }}/kmsRCG.json" + mode: 0777 + +- name: Create KMS Role Config Group + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/keytrustee/roleConfigGroups/keytrustee-KMS_KEYTRUSTEE-BASE" + method: PUT + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/kmsRCG.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + run_once: true + register: kms2_resp + +- name: Prepare HDFS template + local_action: + module: template + src: "hdfs.j2" + dest: "{{ tmp_dir }}/hdfs.json" + mode: 0777 + +- name: Update HDFS settings + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/hdfs/config" + method: PUT + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/hdfs.json') }}" + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + run_once: true + register: hdfs_resp + +- name: Check CDH parcel is activated on all hosts + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/parcels/products/CDH/versions/6.2.0-1.cdh6.2.0.p0.967373/" + method: GET + status_code: 200,404 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cdh_parcel_result + until: cdh_parcel_result.json.stage == "ACTIVATED" + retries: 25 + delay: 30 + run_once: true + +- name: Check KEYTRUSTEE parcel is activated on all hosts + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/parcels/products/KEYTRUSTEE/versions/6.1.0-1.KEYTRUSTEE6.1.0.p0.592714/" + method: GET + status_code: 200,404 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: kms_parcel_result + until: kms_parcel_result.json.stage == "ACTIVATED" + retries: 25 + delay: 5 + run_once: true + +- name: Restart KMS Service + uri: + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/keytrustee/commands/start" + method: POST + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: kms_start_resp + run_once: true + +- set_fact: command_id="{{ kms_start_resp.json.id }}" + +- name: Wait for service to restart + uri: + url: "{{ cm_api_url }}/commands/{{ command_id }}" + body_format: json + status_code: 200 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: service_start_result + until: service_start_result.json.active == false + retries: 25 + delay: 30 + run_once: true diff --git a/roles/kts_key_sync/templates/hdfs.j2 b/roles/kts_key_sync/templates/hdfs.j2 new file mode 100644 index 0000000..c07b535 --- /dev/null +++ b/roles/kts_key_sync/templates/hdfs.j2 @@ -0,0 +1,7 @@ +{ + "items" : [ { + "name" : "kms_service", + "value" : "keytrustee" + } + ] +} diff --git a/roles/kts_key_sync/templates/kms.j2 b/roles/kts_key_sync/templates/kms.j2 new file mode 100644 index 0000000..eeb7e6d --- /dev/null +++ b/roles/kts_key_sync/templates/kms.j2 @@ -0,0 +1,33 @@ +{ + "items" : [ { + "name" : "keytrustee", + "displayName" : "Key Trustee KMS", + "type" : "KEYTRUSTEE", + "config" : { + "items" : [ { + "name" : "zookeeper_service", + "value" : "zookeeper" + {% if (krb5_kdc_type == 'ad') or (krb5_kdc_type == 'mit') %} + }, { + "name" : "hadoop_kms_authentication_type", + "value" : "kerberos" + {% endif %} + } ] + }, + "roles": [ + { + "type" : "KMS_KEYTRUSTEE", + "hostRef" : { + "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][0]]['inventory_hostname']] }}", + "hostname" : "{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}" + } + }, { + "type" : "KMS_KEYTRUSTEE", + "hostRef" : { + "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][1]]['inventory_hostname']] }}", + "hostname" : "{{ hostvars[groups['kms_servers'][1]]['inventory_hostname'] }}" + } + } + ] + } +] } diff --git a/roles/kts_key_sync/templates/kmsRCG.j2 b/roles/kts_key_sync/templates/kmsRCG.j2 new file mode 100644 index 0000000..c3f4973 --- /dev/null +++ b/roles/kts_key_sync/templates/kmsRCG.j2 @@ -0,0 +1,46 @@ +{ + "config" : { + "items": [ { + "name" : "cloudera_trustee_keyprovider_hostname-ACTIVE", + "value" : "{{ hostvars[groups['kts_servers'][0]]['inventory_hostname'] }}" + }, { + {% if (cdh_tls) %} + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_client_truststore_password", + "value" : "{{ tls.truststore_password }}" + }, { + "name" : "cloudera_trustee_keyprovider_hostname-PASSIVE", + "value" : "{{ hostvars[groups['kts_servers'][1]]['inventory_hostname'] }}" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_server_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_enabled", + "value" : "true" + }, { + "name" : "ssl_client_truststore_location", + "value" : "{{ tls.truststore_path }}" + }, { + {% endif %} + "name" : "cloudera_trustee_keyprovider_org", + "value" : "{{ kts_org_name }}" + }, { + "name" : "cloudera_trustee_keyprovider_auth", + "value" : "{{ kts_auth_secret }}" + }, { + "name" : "hadoop_security_key_provider_dir", + "value" : "{{ kms_key_dir }}" + }, { + "name" : "keytrustee_security_key_provider_conf_dir", + "value" : "{{ kms_conf_dir }}" + }, { + "name" : "kms-acls.xml_role_safety_valve", + "value" : "hadoop.kms.acl.CREATEnobody" + } ] + } +} diff --git a/roles/kts_key_sync/templates/kmshosts.j2 b/roles/kts_key_sync/templates/kmshosts.j2 new file mode 100644 index 0000000..d23da53 --- /dev/null +++ b/roles/kts_key_sync/templates/kmshosts.j2 @@ -0,0 +1,11 @@ +{ + "items" : [ + { + "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][0]]['inventory_hostname']] }}", + "hostname" : "{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}" + }, { + "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][1]]['inventory_hostname']] }}", + "hostname" : "{{ hostvars[groups['kms_servers'][1]]['inventory_hostname'] }}" + } + ] +} diff --git a/roles/mariadb/tasks/databases.yml b/roles/mariadb/tasks/databases.yml index dfc5f9b..a261893 100644 --- a/roles/mariadb/tasks/databases.yml +++ b/roles/mariadb/tasks/databases.yml @@ -1,12 +1,14 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + - name: Create databases mysql_db: login_user=root login_password={{ mysql_root_password }} - name={{ item.value.name }} state=present + name={{ item.value.name }} state=present encoding=utf8 config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} with_dict: "{{ databases }}" - name: Create database users mysql_user: login_user=root login_password={{ mysql_root_password }} name={{ item.value.user }} password={{ item.value.pass }} update_password=always - priv={{ item.value.name }}.*:ALL host='%' state=present - with_dict: "{{ databases }}" \ No newline at end of file + priv={{ item.value.name }}.*:ALL host='%' config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} state=present + with_dict: "{{ databases }}" diff --git a/roles/mariadb/tasks/main.yml b/roles/mariadb/tasks/main.yml index c9f889c..ed41f49 100644 --- a/roles/mariadb/tasks/main.yml +++ b/roles/mariadb/tasks/main.yml @@ -1,11 +1,21 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + - name: Install MariaDB packages yum: name={{ item }} state=installed with_items: - mariadb-server - MySQL-python +- name: Create Mysql Dir + file: + path: "{{ mysql_datadir }}" + state: directory + owner: mysql + group: mysql + mode: '0755' + - name: Create MariaDB configuration file template: src=my.cnf.j2 dest=/etc/my.cnf notify: diff --git a/roles/mariadb/tasks/mysql_secure_installation.yml b/roles/mariadb/tasks/mysql_secure_installation.yml index f0f1309..9c7d72e 100644 --- a/roles/mariadb/tasks/mysql_secure_installation.yml +++ b/roles/mariadb/tasks/mysql_secure_installation.yml @@ -1,7 +1,9 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + - name: Set root password - mysql_user: name=root password={{ mysql_root_password }} state=present + mysql_user: name=root password={{ mysql_root_password }} state=present config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} ignore_errors: True - name: Remove anonymous users @@ -15,7 +17,7 @@ - DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('127.0.0.1', '::1', 'localhost') - name: Remove the test database - mysql_db: login_user=root login_password={{ mysql_root_password }} db=test state=absent + mysql_db: login_user=root login_password={{ mysql_root_password }} db=test config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} state=absent - name: Reload privilege tables command: 'mysql -uroot -p{{ mysql_root_password }} -ne "{{ item }}"' diff --git a/roles/mariadb/templates/my.cnf.j2 b/roles/mariadb/templates/my.cnf.j2 index 9b88b67..5cb62f9 100644 --- a/roles/mariadb/templates/my.cnf.j2 +++ b/roles/mariadb/templates/my.cnf.j2 @@ -40,6 +40,10 @@ innodb_thread_concurrency = 8 innodb_flush_method = O_DIRECT innodb_log_file_size = 512M +[mysql] +socket = {{ mysql_socket }} +port = {{ mysql_port }} + [mysqld_safe] log-error = {{ mysql_log }} -pid-file = {{ mysql_pid_file }} \ No newline at end of file +pid-file = {{ mysql_pid_file }} diff --git a/roles/mn_dir_teardown/tasks/main.yml b/roles/mn_dir_teardown/tasks/main.yml new file mode 100644 index 0000000..83c98c6 --- /dev/null +++ b/roles/mn_dir_teardown/tasks/main.yml @@ -0,0 +1,32 @@ +--- + +- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" + +- name: SNN Dir Delete + file: + path: "{{ cdh_services[0].fs_checkpoint_dir_list }}" + state: absent + ignore_errors: true + +- name: NN Dir Delete + file: + path: "{{ cdh_services[0].dfs_name_dir_list }}" + state: absent + ignore_errors: true + +- name: JN Edits Dir + file: + path: "{{ cdh_services[0].dfs_journalnode_edits_dir }}" + state: absent + ignore_errors: true + +- name: ZK Data Dirs + file: + path: "/data/3/zookeeper" + state: absent + + +- name: ZK Edits Dirs + file: + path: "/data/4/zookeeper" + state: absent diff --git a/roles/nn_dir_creation/tasks/main.yml b/roles/nn_dir_creation/tasks/main.yml new file mode 100644 index 0000000..606f637 --- /dev/null +++ b/roles/nn_dir_creation/tasks/main.yml @@ -0,0 +1,52 @@ +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" + +- name: Permission data dirs on Master Nodes + file: + path: "{{ item }}" + owner: root + group: root + mode: '0755' + when: scm_version[0] == "5" + with_items: + - /data + - /data/1 + - /data/2 + +- name: Create DFS dirs + file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: '0755' + when: scm_version[0] == "5" + with_items: + - /data/1/dfs + - /data/2/dfs + +- name: Create NN dirs + file: + path: /data/1/dfs/nn + state: directory + owner: hdfs + group: hadoop + mode: '0700' + when: scm_version[0] == "5" + +- name: Create SNN dirs + file: + path: /data/1/dfs/snn + state: directory + owner: hdfs + group: hadoop + mode: '0700' + when: scm_version[0] == "5" + +- name: Create JN dirs + file: + path: /data/2/dfs/jn + state: directory + owner: hdfs + group: hadoop + mode: '0700' + when: scm_version[0] == "5" diff --git a/roles/pre_reqs/tasks/main.yml b/roles/pre_reqs/tasks/main.yml new file mode 100644 index 0000000..08717d0 --- /dev/null +++ b/roles/pre_reqs/tasks/main.yml @@ -0,0 +1,196 @@ +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" + +- name: change swappiness value + sysctl: + name: vm.swappiness + value: 1 + state: present + reload: true + +- name: change overcommit value + sysctl: + name: vm.overcommit_memory + value: 1 + state: present + reload: true + +- name: disable tuned service + systemd: + name: tuned + state: stopped + enabled: no + +- name: install nscd service + yum: + name: nscd + state: present + +- name: enable nscd service + systemd: + name: nscd + state: started + enabled: yes + +- name: nscd config + copy: + backup: yes + src: nscd.conf + dest: /etc/nscd.conf + owner: root + group: root + mode: '0644' + register: nscd_config + +- name: Restart the nscd service + systemd: + name: nscd + state: restarted + enabled: yes + ignore_errors: true + when: nscd_config.changed + +- name: disable chronyd service + systemd: + name: chronyd + state: stopped + enabled: no + +- name: disable transparent huge pages until reboot - enabled conf + shell: 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' + ignore_errors: true + +- name: disable transparent huge pages until reboot - defrag conf + shell: 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + ignore_errors: true + +- name: Check for THP in rc.local + shell: grep -c "enabled" /etc/rc.d/rc.local || true + register: thp_enabled + +- name: Check for THP in rc.local + shell: grep -c "defrag" /etc/rc.d/rc.local || true + register: thp_defrag + +- name: Disable THP in rc.local - enabled conf + lineinfile: + backup: yes + path: /etc/rc.d/rc.local + line: 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' + mode: '0755' + when: thp_enabled.stdout == "0" + +- name: Disable THP in rc.local - defrag conf + lineinfile: + backup: yes + path: /etc/rc.d/rc.local + line: 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + mode: '0755' + when: thp_defrag.stdout == "0" + +- name: Disable THP in GRUB + lineinfile: + backup: yes + state: present + path: /etc/default/grub + backrefs: yes + regexp: '^(GRUB_CMDLINE_LINUX=(?!.*hugepage)\"[^\"]+)(\".*)' + line: '\1 transparent_hugepage=never\2' + +- name: Disable THP - rebuild GRUB + shell: "grub2-mkconfig -o /boot/grub2/grub.cfg" + +- name: data permissions + file: + path: /data + state: directory + owner: root + group: root + mode: '0755' + +- name: Set SELINUX to permissive - config + lineinfile: + dest: /etc/selinux/config + regexp: ^SELINUX= + line: SELINUX=permissive + state: present + +- name: Set SELINUX to permissive - setenforce + shell: "setenforce 0" + +#- name: Verify required SSSD packages have been yum installed +# yum: +# name: "{{ item }}" +# state: present +# with_items: +# - adcli +# - sssd +# - realmd +# - oddjob +# - oddjob-mkhomedir +# - samba-common +# - samba-common-tools + +- name: Yum Install required KRB5 packages + yum: + name: "{{ item }}" + state: present + with_items: + - krb5-workstation + - krb5-libs + - openldap + - openldap-clients + +- name: Deploy KRB5 config + template: + backup: yes + src: krb5.conf.j2 + dest: /etc/krb5.conf + owner: root + group: root + mode: '0644' + +- name: Remove conflicting krb5 config + file: + path: /var/lib/sss/pubconf/krb5.include.d/krb5_libdefaults + state: absent + +- name: + file: + path: /var/opt/cloudera/ + state: directory + owner: root + group: root + mode: '0777' + +#- name: Restart the messagebus service +# systemd: +# name: messagebus +# state: restarted +# enabled: yes + +#- name: Restart the realmd service +# systemd: +# name: realmd +# state: restarted +# enabled: yes + +#- name: Restart the sssd service +# systemd: +# name: sssd +# state: restarted +# enabled: yes + +#- name: Restart the nscd service +# systemd: +# name: nscd +# state: restarted +# enabled: yes +# + +- name: Set disk reserved block size to zero + shell: | + for disk in $(df -h --output=source --type=ext4 | grep /dev/sd); do + tune2fs -m 0 $disk + done + args: + executable: /bin/bash diff --git a/roles/pre_reqs/templates/krb5.conf.j2 b/roles/pre_reqs/templates/krb5.conf.j2 new file mode 100644 index 0000000..56294b4 --- /dev/null +++ b/roles/pre_reqs/templates/krb5.conf.j2 @@ -0,0 +1,24 @@ +# Configuration snippets may be placed in this directory as well +includedir /etc/krb5.conf.d/ + +includedir /var/lib/sss/pubconf/krb5.include.d/ + +[logging] + default = FILE:/var/log/krb5libs.log + kdc = FILE:/var/log/krb5kdc.log + admin_server = FILE:/var/log/kadmind.log + +[libdefaults] + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + default_realm = {{ krb5_realm }} + udp_preference_limit = 1 + +[realms] + {{ krb5_realm }} = { + } + +[domain_realm] diff --git a/roles/scm/tasks/api.yml b/roles/scm/tasks/api.yml index ec214f8..c7d62db 100644 --- a/roles/scm/tasks/api.yml +++ b/roles/scm/tasks/api.yml @@ -5,4 +5,4 @@ yum: name=python-pip state=latest update_cache=yes - name: Install CM Python API Client - pip: name=cm-api \ No newline at end of file + pip: name=cm-api diff --git a/roles/scm/tasks/cms.yml b/roles/scm/tasks/cms.yml index 4d76813..5379766 100644 --- a/roles/scm/tasks/cms.yml +++ b/roles/scm/tasks/cms.yml @@ -1,19 +1,41 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" + # Wait for agents to send heartbeats in case SCM has just been restarted # Adding CMS will fail if host details haven't been reported in - name: Wait for agent heartbeats pause: seconds=30 # Prepare CMS template -- name: Prepare CMS template +- name: Prepare CMS role template template: src: "cms_base.j2" dest: "{{ tmp_dir }}/cms_base.json" - delegate_to: localhost + mode: '0777' + +- name: Prepare CMS service template + template: + src: "cms_service.j2" + dest: "{{ tmp_dir }}/cms_service.json" + mode: '0777' + +# https://cloudera.github.io/cm_api/apidocs/v12/path__cm_service.html#PUT +- name: Check whether the CMS exists + uri: + url: "{{ cm_api_url }}/cm/service" + method: GET + body_format: json + status_code: 200,404 + force_basic_auth: yes + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + return_content: yes + register: cms_resp1 # https://cloudera.github.io/cm_api/apidocs/v12/path__cm_service.html#PUT -- name: Setup the Cloudera Management Services (CMS) +- name: Setup the Cloudera Management Service Roles uri: url: "{{ cm_api_url }}/cm/service" method: PUT @@ -28,26 +50,35 @@ failed_when: - "'MGMT' not in cms_resp.content" - "'CMS instance already exists' not in cms_resp.content" - delegate_to: localhost - -- debug: var=cms_resp + when: "'MGMT' not in cms_resp1.content" -# https://cloudera.github.io/cm_api/apidocs/v12/path__cm_service_commands_start.html -- name: Start Cloudera Management Services (CMS) +- name: Setup the Cloudera Management Service (CMS) uri: - url: "{{ cm_api_url }}/cm/service/commands/start" - method: POST - status_code: 200 + url: "{{ cm_api_url }}/cm/service/config" + method: PUT + body_format: json + body: "{{ lookup('file', ''+ tmp_dir + '/cms_service.json') }}" + status_code: 200,400 force_basic_auth: yes user: "{{ scm_default_user }}" password: "{{ scm_default_pass }}" return_content: yes - register: start_resp - failed_when: "'startTime' not in start_resp.content" - -- debug: var=start_resp + register: cms_resp2 + when: "'MGMT' not in cms_resp1.content" - file: path: "{{ tmp_dir }}/cms_base.json" state: absent - delegate_to: localhost + +- file: + path: "{{ tmp_dir }}/cms_service.json" + state: absent + +- name: Create Navigator audit stream directory + file: + path: "{{ nav_auditstream_directory }}" + state: directory + owner: cloudera-scm + group: cloudera-scm + mode: '0700' + diff --git a/roles/scm/tasks/license.yml b/roles/scm/tasks/license.yml index 669511b..ecff73e 100644 --- a/roles/scm/tasks/license.yml +++ b/roles/scm/tasks/license.yml @@ -1,11 +1,20 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" + +- name: Copy cm_api to Python lib directory + copy: + src: cloudera_license.txt + dest: "{{ hostvars[scm_hostname]['license_file'] }}" + owner: cloudera-scm + group: cloudera-scm + mode: '0644' + - name: Check license file exists stat: path={{ hostvars[scm_hostname]['license_file'] }} register: file -- debug: msg="License file '{{ hostvars[scm_hostname]['license_file'] }}' exists = {{ file.stat.exists }}" - # https://cloudera.github.io/cm_api/apidocs/v12/path__cm_license.html - name: Upload license file to Cloudera Manager shell: > @@ -22,8 +31,6 @@ - restart cloudera-scm-server when: file.stat.exists == True -- debug: var=resp - # URI module does not have the equivalent to cURL's -F/--form # uri: # url: {{ cm_api_url }}/cm/license diff --git a/roles/scm/tasks/main.yml b/roles/scm/tasks/main.yml index 99bdb3b..9babc2e 100644 --- a/roles/scm/tasks/main.yml +++ b/roles/scm/tasks/main.yml @@ -1,21 +1,46 @@ --- -- include_vars: ../../../group_vars/db_server.yml +- include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ldap_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" - name: Install the Cloudera Manager Server Packages yum: name={{ item }} state=installed with_items: - - cloudera-manager-daemons - - cloudera-manager-server - - cloudera-manager-agent + - "cloudera-manager-daemons-{{ scm_version }}" + - "cloudera-manager-server-{{ scm_version }}" + - "cloudera-manager-agent-{{ scm_version }}" + +#To Do +#Change scm_dir to use if statement - previously /usr/share/cmf/ +#Remove MySQL ref - name: Prepare Cloudera Manager Server External Database - command: /usr/share/cmf/schema/scm_prepare_database.sh + command: "{{ scm_dir }}/schema/scm_prepare_database.sh -f - --host {{ hostvars[db_hostname]['inventory_hostname'] }} - mysql {{ databases.scm.name }} {{ databases.scm.user }} {{ databases.scm.pass }} + --host {{ databases.scm.host }} + --port {{ databases.scm.port }} + {{ databases.scm.type }} {{ databases.scm.name }} {{ databases.scm.user }} {{ databases.scm.pass }}" changed_when: False +#- name: Download & Install Custom Service Descriptors - Spark2 +# get_url: +# url: http://archive.cloudera.com/spark2/CSD/SPARK2_ON_YARN-2.3.0.cloudera4.jar +# dest: /opt/cloudera/csd +# mode: 0600 +# owner: cloudera-scm +# group: cloudera-scm + +#- name: Download & Install Custom Service Descriptors - CDSW +# get_url: +# url: http://archive.cloudera.com/cdsw1/CSD/CLOUDERA_DATA_SCIENCE_WORKBENCH-CDH5-1.4.2.jar +# dest: /opt/cloudera/csd +# mode: 0600 +# owner: cloudera-scm +# group: cloudera-scm + - name: Start the Cloudera Manager Server service: name={{ item }} state=restarted enabled=yes notify: @@ -40,21 +65,23 @@ # Set base CM API URL - set_fact: cm_api_url="http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}/api/{{ result.content }}" -- debug: var=cm_api_url # Install Cloudera Manager Python API -- include: api.yml +# - include: api.yml # Retrieve auto-generated host IDs from SCM - name: Get SCM hostIds for inventory hosts + become: true action: scm_hosts register: scm_hosts_result + vars: + use_tls: False +# environment: +# PYTHONPATH: "{{ ansible_env.PYTHONPATH }}:/usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg" - set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" -- debug: var=scm_host_ids - set_fact: scm_host_names="{{ scm_hosts_result.host_names }}" -- debug: var=scm_host_names - include: license.yml - include: scm.yml diff --git a/roles/scm/tasks/scm.yml b/roles/scm/tasks/scm.yml index 46244a3..ce1291b 100644 --- a/roles/scm/tasks/scm.yml +++ b/roles/scm/tasks/scm.yml @@ -1,10 +1,15 @@ --- +- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/krb5_server.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ldap_enc.yml" + - name: Prepare Cloudera Manager settings template: src: "scm.j2" - dest: "{{ tmp_dir }}/scm.json" - delegate_to: localhost + dest: "{{ tmp_dir }}/scm.json" + mode: 0777 # https://cloudera.github.io/cm_api/apidocs/v13/path__cm_config.html - name: Update Cloudera Manager settings @@ -19,13 +24,38 @@ password: "{{ scm_default_pass }}" return_content: yes register: scm_resp - delegate_to: localhost -- debug: var=scm_resp - file: path: "{{ tmp_dir }}/scm.json" state: absent - delegate_to: localhost + +- name: Restart SCM + service: name=cloudera-scm-server state=restarted enabled=yes + +- set_fact: scm_new_port={{ "{{ scm_port_tls }}" if scm_web_tls==True else "{{ scm_port }}" }} + +- name: wait cloudera-scm-server + wait_for: + host={{ hostvars[scm_hostname]['inventory_hostname'] }} + port={{ scm_new_port }} + delay=5 + state=started + timeout=300 + +- set_fact: cm_api_url={{ "https://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port_tls }}" if scm_web_tls==True else "http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}" }} + +- name: Get Cloudera Manager API version + uri: + url: "{{ cm_api_url }}/api/version" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: result + +- set_fact: cm_api_url="{{ cm_api_url }}/api/{{ result.content }}" # https://cloudera.github.io/cm_api/apidocs/v13/path__cm_commands_importAdminCredentials.html - name: Import KDC admin credentials diff --git a/roles/scm/templates/cms_base.j2 b/roles/scm/templates/cms_base.j2 index 0b08ca6..9b941b5 100644 --- a/roles/scm/templates/cms_base.j2 +++ b/roles/scm/templates/cms_base.j2 @@ -1,6 +1,17 @@ { "name": "mgmt", "type": "MGMT", + "type" : "MGMT", + "config" : { + "items" : [ + {% if (scm_web_tls) %} + { + "name" : "ssl_client_truststore_location", + "value" : "{{ tls.truststore_path }}" + } + {% endif %} + ] + }, "roles": [ { "name": "mgmt-SERVICEMONITOR", @@ -77,7 +88,7 @@ "items": [ { "name": "firehose_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" + "value": "{{ databases.amon.host }}:{{ databases.amon.port }}" }, { "name": "firehose_database_name", "value": "{{ databases.amon.name }}" @@ -90,9 +101,35 @@ }, { "name": "firehose_database_user", "value": "{{ databases.amon.user }}" + }, { + "name" : "firehose_debug_port", + "value" : "-1" + }, { + "name" : "firehose_debug_tls_port", + "value" : "-1" + }, { + "name": "oom_heap_dump_dir", + "value": "{{ oom_heap_dump_dir }}" + }, { + "name": "oom_heap_dump_enabled", + "value": "false" } ] } + }, { + "name" : "mgmt-EVENTSERVER-BASE", + "displayName" : "Event Server Default Group", + "roleType" : "EVENTSERVER", + "base" : true, + "serviceRef" : { + "serviceName" : "mgmt" + }, + "config" : { + "items" : [ { + "name" : "eventserver_debug_port", + "value" : "-1", + } ] + } }, { "name": "mgmt-REPORTSMANAGER-BASE", "displayName": "Reports Manager Default Group", @@ -105,7 +142,7 @@ "items": [ { "name": "headlamp_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" + "value": "{{ databases.rman.host }}:{{ databases.rman.port }}" }, { "name": "headlamp_database_name", "value": "{{ databases.rman.name }}" @@ -118,6 +155,21 @@ }, { "name": "headlamp_database_user", "value": "{{ databases.rman.user }}" + }, { + "name": "headlamp_scratch_dir", + "value": "{{ headlamp_scratch_dir }}" + }, { + "name" : "headlamp_debug_port", + "value" : "-1", + }, { + "name": "oom_heap_dump_dir", + "value": "{{ oom_heap_dump_dir }}" + }, { + "name": "oom_heap_dump_enabled", + "value": "false" + }, { + "name": "headlamp_heapsize", + "value": "8589934592" } ] } @@ -132,8 +184,14 @@ "config": { "items": [ { + "name" : "NAVIGATOR_role_env_safety_valve", + "value" : "NAVIGATOR_EXTRA_CLASSPATH=/opt/cloudera/cm/cloudera-navigator-server/libs/cdh6/apache-log4j-extras-1.2.17.jar", + }, { + "name" : "log4j_safety_valve", + "value" : "log4j.logger.auditStreamEnVision = TRACE,RFA2\nlog4j.appender.RFA2 = org.apache.log4j.rolling.RollingFileAppender\nlog4j.appender.RFA2.rollingPolicy = org.apache.log4j.rolling.TimeBasedRollingPolicy\nlog4j.appender.RFA2.rollingPolicy.FileNamePattern = {{ nav_auditstream_filename_pattern }}\nlog4j.appender.RFA2.layout = org.apache.log4j.PatternLayout\nlog4j.appender.RFA2.layout.ConversionPattern = %d{ISO8601} %m%n\nlog4j.additivity.auditStreamEnVision = false\n", + }, { "name": "navigator_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" + "value": "{{ databases.nav.host }}:{{ databases.nav.port }}" }, { "name": "navigator_database_name", "value": "{{ databases.nav.name }}" @@ -146,6 +204,38 @@ }, { "name": "navigator_database_user", "value": "{{ databases.nav.user }}" + {% if (cdh_tls) %} + }, { + "name" : "ssl_enabled", + "value" : "true", + }, { + "name" : "ssl_server_keystore_keypassword", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + }, { + "name" : "navigator_truststore_file", + "value" : "{{ tls.truststore_path }}" + }, { + "name" : "navigator_truststore_password", + "value" : "{{ tls.truststore_password }}" + {% endif %} + }, { + "name" : "navigator_debug_port", + "value" : "-1" + }, { + "name": "oom_heap_dump_dir", + "value": "{{ oom_heap_dump_dir }}" + }, { + "name": "oom_heap_dump_enabled", + "value": "false" + }, { + "name": "navigator_heapsize", + "value": "10737418240" } ] } @@ -161,7 +251,7 @@ "items": [ { "name": "nav_metaserver_database_host", - "value": "{{ hostvars[db_hostname]['inventory_hostname'] }}" + "value": "{{ databases.navms.host }}:{{ databases.navms.port }}" }, { "name": "nav_metaserver_database_name", "value": "{{ databases.navms.name }}" @@ -174,6 +264,135 @@ }, { "name": "nav_metaserver_database_user", "value": "{{ databases.navms.user }}" + }, { + "name": "data_dir", + "value": "{{ navms_data_dir }}" + }, { + "name": "oom_heap_dump_dir", + "value": "{{ oom_heap_dump_dir }}" + }, { + "name" : "allow_usage_data", + "value" : "false", + }, { + "name" : "auth_backend_order", + "value" : "CM_THEN_EXTERNAL", + }, { + "name" : "external_auth_type", + "value" : "{{ ldap_rdom.auth_type }}", + }, { + "name" : "nav_ldap_bind_dn", + "value" : "{{ ldap_rdom.bind_dn }}", + }, { + "name" : "nav_ldap_bind_pw", + "value" : "{{ ldap_rdom.bind_pass }}", + }, { + "name" : "nav_ldap_group_search_base", + "value" : "{{ ldap_rdom.base_dn }}", + }, { + "name" : "nav_ldap_group_search_filter", + "value" : "({{ ldap_rdom.group_member_attr }}={0})", + }, { + "name" : "nav_ldap_groups_search_filter", + "value" : "(&{{ ldap_rdom.group_filter }}({{ ldap_rdom.group_name_attr }}=*{0}*))", + }, { + "name" : "nav_ldap_url", + "value" : "{{ ldap_rdom.url }}", + }, { + "name" : "nav_ldap_user_search_base", + "value" : "{{ ldap_rdom.base_dn }}", + }, { + "name" : "nav_ldap_user_search_filter", + "value" : "({{ ldap_rdom.user_name_attr }}={0})", + }, { + "name" : "nav_nt_domain", + "value" : "{{ ldap_rdom.domain }}", + }, { + "name" : "navigator_heapsize", + "value" : "34359738368", + }, { + "name" : "oom_heap_dump_enabled", + "value" : "false", + } + {% if (scm_web_tls) %} + , { + "name" : "ssl_enabled", + "value" : "true", + }, { + "name" : "ssl_server_keystore_keypassword", + "value" : "{{ tls.keystore_password }}", + }, { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}", + } + {% endif %} + , { + "name": "oom_heap_dump_enabled", + "value": "false" + } + ] + } + }, { + "name": "mgmt-SERVICEMONITOR-BASE", + "displayName": "Service Monitor Default Group", + "roleType": "SERVICEMONITOR", + "base": true, + "serviceRef": { + "serviceName": "mgmt" + }, + "config": { + "items": [ + { + "name" : "firehose_debug_port", + "value" : "-1", + }, { + "name": "firehose_heapsize", + "value": "2147483648" + }, { + "name": "firehose_non_java_memory_bytes", + "value": "12884901888" + }, { + "name": "firehose_storage_dir", + "value": "{{ smon_firehose_storage_dir }}" + }, { + "name": "oom_heap_dump_dir", + "value": "{{ oom_heap_dump_dir }}" + }, { + "name": "oom_heap_dump_enabled", + "value": "false" + } + ] + } + }, { + "name": "mgmt-HOSTMONITOR-BASE", + "displayName": "Host Monitor Default Group", + "roleType": "HOSTMONITOR", + "base": true, + "serviceRef": { + "serviceName": "mgmt" + }, + "config": { + "items": [ + { + "name" : "firehose_debug_port", + "value" : "-1", + }, { + "name" : "firehose_debug_tls_port", + "value" : "-1", + }, { + "name": "firehose_non_java_memory_bytes", + "value": "4294967296" + }, { + "name": "firehose_storage_dir", + "value": "{{ hmon_firehose_storage_dir }}" + }, { + "name": "oom_heap_dump_dir", + "value": "{{ oom_heap_dump_dir }}" + }, { + "name": "oom_heap_dump_enabled", + "value": "false" } ] } diff --git a/roles/scm/templates/cms_service.j2 b/roles/scm/templates/cms_service.j2 new file mode 100644 index 0000000..9097148 --- /dev/null +++ b/roles/scm/templates/cms_service.j2 @@ -0,0 +1,13 @@ +{ + "items" : [ + {% if (scm_web_tls) %} + { + "name": "ssl_client_truststore_location", + "value": "{{ tls.truststore_path }}" + }, { + "name": "ssl_client_truststore_password", + "value": "{{ tls.truststore_password }}" + } + {% endif %} + ] +} diff --git a/roles/scm/templates/scm.j2 b/roles/scm/templates/scm.j2 index a2b1c38..2309e5e 100644 --- a/roles/scm/templates/scm.j2 +++ b/roles/scm/templates/scm.j2 @@ -2,27 +2,119 @@ "items" : [ {% if krb5_kdc_type != 'none' %} { - "name" : "KDC_HOST", - "value" : "{{ hostvars[groups['krb5_server'][0]]['inventory_hostname'] }}" - }, { "name" : "KDC_TYPE", {% if krb5_kdc_type == 'ad' %} "value" : "Active Directory" + }, { + "name" : "KDC_HOST", + "value" : "{{ kdc }}" {% else %} "value" : "MIT KDC" + }, { + "name" : "KDC_HOST", + "value" : "{{ hostvars[groups['krb5_server'][0]]['inventory_hostname'] }}" {% endif %} }, { "name" : "SECURITY_REALM", - "value" : "{{ default_realm|upper }}" + "value" : "{{ krb5_realm }}" }, { "name" : "KRB_MANAGE_KRB5_CONF", "value" : "false" + }, { + "name" : "AD_ACCOUNT_PREFIX", + "value" : "{{ ad_account_prefix }}" + }, { + "name" : "AD_KDC_DOMAIN", + "value" : "{{ computer_ou }}" + }, { + "name" : "KRB_ENC_TYPES", + "value" : "{{ enc_types }}" + }, { + "name" : "NT_DOMAIN", + "value" : "{{ domain }}" + }, { + "name" : "KDC_ACCOUNT_CREATION_HOST_OVERRIDE", + "value" : "{{ kdc_account_creation_host_override }}" + }, { + "name" : "AD_DELETE_ON_REGENERATE", + "value" : "true", + }, + {% endif %} + {% if (agent_tls) %} + { + "name" : "AGENT_TLS", + "value" : "true", + }, { + "name" : "KEYSTORE_PASSWORD", + "value" : "{{ tls.keystore_password }}", + }, { + "name" : "KEYSTORE_PATH", + "value" : "{{ tls.keystore_path }}", + }, { + "name" : "TRUSTSTORE_PASSWORD", + "value" : "{{ tls.truststore_password }}", + }, { + "name" : "TRUSTSTORE_PATH", + "value" : "{{ tls.truststore_path }}", + }, { + "name" : "NEED_AGENT_VALIDATION", + "value" : "true" + }, + {% endif %} + {% if (scm_web_tls) %} + { + "name" : "WEB_TLS", + "value" : "true" + }, { + "name" : "HTTPS_PORT", + "value" : "{{ scm_port_tls }}" + }, + {% else %} + { + "name" : "WEB_TLS", + "value" : "false" }, {% endif %} { - "name" : "REMOTE_PARCEL_REPO_URLS", - "value" : {% set repo_j = joiner(",") %} - "{% for repo in scm_repositories %}{{ repo_j() }}{{ repo }}{% endfor %}" + "name" : "HTTP_PORT", + "value" : "{{ scm_port }}" + }, { + "name" : "CUSTOM_BANNER_HTML", + "value" : "{{ banner_text }}" + }, { + "name" : "CUSTOM_HEADER_COLOR", + "value" : "{{ banner_colour }}" + }, { + "name" : "ALLOW_USAGE_DATA", + "value" : "false" + }, { + "name" : "USING_HELP_FROM_CCP", + "value" : "false", + }, { + "name" : "PARCEL_DISTRIBUTE_RATE_LIMIT_KBS_PER_SECOND", + "value" : "512000", + }, { + "name" : "PHONE_HOME", + "value" : "false", + }, { + "name" : "LDAP_URL", + "value" : "{{ ldap_rdom.url }}", + }, { + "name" : "LDAP_BIND_DN", + "value" : "{{ ldap_rdom.bind_dn }}", + }, { + "name" : "AUTH_BACKEND_ORDER", + "value" : "DB_THEN_LDAP", + }, { + "name" : "LDAP_BIND_PW", + "value" : "{{ ldap_rdom.bind_pass }}", + }, { + "name" : "LDAP_CLUSTER_ADMIN_GROUPS", + "value" : "{{ ldap_rdom.cm_clu_admin }}", + }, { + "name" : "REMOTE_PARCEL_REPO_URLS", + "value" : {% set repo_j = joiner(",") %} + "{% for repo in scm_repositories %}{{ repo_j() }}{{ repo }}{% endfor %}" } ] } diff --git a/roles/yum_teardown_cm_agent/tasks/main.yml b/roles/yum_teardown_cm_agent/tasks/main.yml new file mode 100644 index 0000000..c3f8c16 --- /dev/null +++ b/roles/yum_teardown_cm_agent/tasks/main.yml @@ -0,0 +1,8 @@ +--- + +- name: Remove the Cloudera Manager Agent Packages + yum: name={{ item }} state=installed + with_items: + - "cloudera-manager-daemons-{{ scm_version }}" + - "cloudera-manager-agent-{{ scm_version }}" + diff --git a/roles/yum_teardown_cm_server/tasks/main.yml b/roles/yum_teardown_cm_server/tasks/main.yml new file mode 100644 index 0000000..830b4b0 --- /dev/null +++ b/roles/yum_teardown_cm_server/tasks/main.yml @@ -0,0 +1,7 @@ +--- + +- name: Remove the Cloudera Manager Server Packages + yum: name={{ item }} state=absent + with_items: + - "cloudera-manager-server-{{ scm_version }}" + diff --git a/site.yml b/site.yml index 326bbf9..f19ea8c 100644 --- a/site.yml +++ b/site.yml @@ -1,63 +1,142 @@ --- # Cloudera playbook +- name: Apply pre-reqs fix + hosts: cdh_servers:kts_servers:kms_servers + roles: + - pre_reqs + tags: [pre_reqs, pre_build] + - name: Configure Cloudera Manager Repository - hosts: cdh_servers + hosts: cdh_servers:kts_servers:kms_servers roles: - cm_repo - tags: cm_repo + tags: [cm_repo, pre_build] - name: Install rngd - hosts: cdh_servers + hosts: cdh_servers:kts_servers:kms_servers roles: - rngd - tags: rngd + tags: [rngd, pre_build] - name: Install Java - hosts: cdh_servers + hosts: cdh_servers:kts_servers:kms_servers roles: - java - tags: java + tags: [java, pre_build] -- name: Install MariaDB and create databases +- name: Install database server and create databases hosts: db_server roles: - - mariadb + - { role: mariadb, when: database_type == 'mysql' } + - { role: postgres, when: database_type == 'postgres' } tags: mysql -- name: Install MySQL Connector - hosts: utility_servers:master_servers +- name: Install DB Connectors + hosts: cdh_servers roles: - - mysql_connector - tags: mysql + - db_connector + tags: [db_connector, pre_build] - name: Install MIT KDC Server hosts: krb5_server roles: - { role: krb5_server, when: krb5_kdc_type == 'mit' } - tags: krb5 + tags: [krb5, pre_build] -- name: Configure EPEL Repository - hosts: scm_server +- name: Setup certificates + hosts: all roles: - - epel - tags: epel + - certs + tags: certs + +- name: Install certificates + hosts: all + roles: + - certs_signed_install + tags: certs_signed_install - name: Install Cloudera Manager Agents - hosts: cdh_servers + hosts: cdh_servers:kts_servers:kms_servers roles: - cm_agents tags: cm_agents +# Must be done after the cm_agents role otherwise the local users do not exist +- name: Create Data Node directories + hosts: worker_servers + roles: + - dn_dir_creation + tags: dn_dir_creation + +# Must be done after the cm_agents role otherwise the local users do not exist +- name: Create Master Node Directories + hosts: master_servers + roles: + - nn_dir_creation + tags: nn_dir_creation + +- name: Install Cloudera Manager Python API + hosts: scm_server + roles: + - cm_api + tags: [cm_install] + - name: Install Cloudera Manager Server hosts: scm_server roles: - scm - tags: cluster_template + tags: [cm_cluster_template, cm_install] + +- name: Map Cloudera Manager Roles to LDAP groups + hosts: scm_server + roles: + - cm_roles + tags: [cm_roles, cm_install] + +- name: Install Cloudera Manager Agents - tls + hosts: cdh_servers:kts_servers:kms_servers + roles: + - cm_agents_tls + tags: [cm_agents_tls, cm_install] - name: Install CDH hosts: scm_server roles: - cdh - tags: cluster_template + tags: + - cdh_cluster_install + +- name: Install KTS + hosts: scm_server + roles: + - { role: kts, when: hdfs_tde_enabled == 'True' } + tags: + - kts_cluster_template + +- name: Sync KTS keys + hosts: kts_servers + roles: + - { role: kts_key_sync, when: hdfs_tde_enabled == 'True' } + tags: + - kts_key_sync + +- name: Sync KMS keys + hosts: kms_servers + roles: + - { role: kms_key_sync, when: hdfs_tde_enabled == 'True' } + tags: + - kms_key_sync + +- name: Create Encryption Zones + hosts: master_servers + roles: + - { role: kms_encryption_zones, when: hdfs_tde_enabled == 'True' } + tags: + - enc_zones +- name: Install and Configure HAProxy + hosts: haproxy + roles: + - haproxy + tags: haproxy diff --git a/teardown.yml b/teardown.yml new file mode 100644 index 0000000..5d97b75 --- /dev/null +++ b/teardown.yml @@ -0,0 +1,92 @@ +--- +# Cloudera playbook teardown + + +#- name: CDH Teardown +# hosts: scm_server +# roles: +# - cdh_teardown +# tags: cdh_teardown + +- name: Datanode Dir teardown + hosts: cdh_servers + roles: + - dn_dir_teardown + tags: dn_dir_teardown + +- name: Masternode Dir teardown + hosts: master_servers + roles: + - mn_dir_teardown + tags: mn_dir_teardown + +- name: Kafka Dir teardown + hosts: cdh_servers + roles: + - kafka_dir_teardown + tags: kafka_dir_teardown + +- name: KTS Dir teardown + hosts: kts_servers + roles: + - kts_dir_teardown + tags: kts_dir_teardown + +- name: KMS Dir teardown + hosts: kms_servers + roles: + - kms_dir_teardown + tags: kms_dir_teardown + +- name: CDH DB teardown + hosts: scm_server + tasks: + - include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + roles: + - { role: db_teardown_mysql_cdh, when: database_type == 'mysql' } + - { role: db_teardown_postgres_cdh, when: database_type == 'postgres' } + - { role: db_teardown_oracle_cdh, when: database_type == 'oracle' } + tags: db_teardown_cdh + +- name: Remove HAProxy + hosts: haproxy + roles: + - haproxy_teardown + tags: haproxy_teardown + +- name: CM Agent teardown + hosts: cdh_servers:scm_server:kts_servers:kms_servers + roles: + - cm_agents_teardown + tags: cm_agents_teardown + +- name: CM Server teardown + hosts: scm_server + roles: + - cm_server_teardown + tags: cm_server_teardown + +- name: CM DB teardown + hosts: db_server + tasks: + - { include_vars: "{{ inventory_dir }}/group_vars/db_server_mysql.yml", when: database_type == 'mysql' } + - { include_vars: "{{ inventory_dir }}/group_vars/db_server_postgres.yml", when: database_type == 'postgres' } + - { include_vars: "{{ inventory_dir }}/group_vars/db_server_oracle.yml", when: database_type == 'oracle' } + roles: + - { role: db_teardown_mysql_cm, when: database_type == 'mysql' } + - { role: db_teardown_postgres_cm, when: database_type == 'postgres' } + - { role: db_teardown_oracle_cm, when: database_type == 'oracle' } + tags: db_teardown_cm + +- name: CM server yum package removal + hosts: scm_server + roles: + - { role: yum_teardown_cm_server, when: full_teardown == 'True' } + tags: yum_teardown_cm_server + +- name: CM agent yum package removal + hosts: cdh_servers:kts_servers:kms_servers + roles: + - { role: yum_teardown_cm_agent, when: full_teardown == 'True' } + tags: yum_teardown_cm_agent + diff --git a/teardown_restart.yml b/teardown_restart.yml new file mode 100644 index 0000000..da39c99 --- /dev/null +++ b/teardown_restart.yml @@ -0,0 +1,40 @@ +--- + +- name: Start CM agents on all nodes + hosts: cdh_servers:scm_server:kts_servers:kms_servers + tasks: + - shell: "service cloudera-scm-agent start" + +- name: Hard Stop CM agents on all nodes + hosts: cdh_servers:scm_server:kts_servers:kms_servers + tasks: + - shell: "systemctl stop supervisord" + +- name: Delete CM agent run directories on all nodes + hosts: cdh_servers:scm_server:kts_servers:kms_servers + tasks: + - shell: "rm /var/run/cloudera-scm-agent/process/* -rf" + +- name: Delete CM agent directories on all nodes + hosts: cdh_servers:scm_server:kts_servers:kms_servers + tasks: + - shell: "rm /var/lib/cloudera-scm-agent/* -rf" + +- name: Stop SCM server + hosts: scm_server + tasks: + - systemd: + name: cloudera-scm-server + state: stopped + +- name: Delete SCM and Navigator directories on SCM server + hosts: scm_server + tasks: + - shell: "rm /var/lib/cloudera-scm-server/* -rf" + - shell: "rm /var/lib/cloudera-scm-navigator/* -rf" + +- name: Drop databases + hosts: db_server + tasks: + - include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" + - mysql_db: login_user=root login_password={{ mysql_root_password }} name=scm state=absent config_file=/etc/my.cnf login_unix_socket={{ mysql_socket }} From 4e609861634b798cf5cbf5d26192db54e07dbfae Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Jan 2020 04:11:36 -0800 Subject: [PATCH 02/15] Initial version working on Cloudcat --- action_plugins/scm_hosts.pyc | Bin 3219 -> 3411 bytes group_vars/all | 18 ++- group_vars/ca.yml | 17 +++ group_vars/cdh_servers.yml | 24 ++-- group_vars/db_server.yml | 18 +-- group_vars/db_server_mysql.yml | 6 +- group_vars/krb5_server.yml | 4 +- group_vars/ldap_enc.yml | 10 +- group_vars/pki.yml | 11 ++ group_vars/scm_server.yml | 39 +++--- group_vars/tls_enc.yml | 8 +- hosts | 39 +++--- roles/cdh/templates/hbase.j2 | 4 +- roles/cdh/templates/hdfs.j2 | 24 ++-- roles/cdh/templates/hive.j2 | 20 ++- roles/cdh/templates/impala.j2 | 4 +- roles/cdh/templates/solr.j2 | 12 +- roles/cdh/templates/spark.j2 | 4 +- roles/cdh/templates/spark2.j2 | 39 +++--- roles/cdh/templates/yarn.j2 | 36 +++-- roles/certs/tasks/main.yml | 3 +- roles/cm_agents/templates/config.ini.j2 | 7 + roles/cm_agents_tls/templates/config.ini.j2 | 7 + roles/cm_api/tasks/main.yml | 14 +- roles/cm_repo/tasks/main.yml | 2 +- roles/db_connector/tasks/main.yml | 28 ++-- roles/java/tasks/install_jce_from_config.yml | 6 + roles/java/tasks/install_jce_from_zip.yml | 37 +++++ roles/java/tasks/main.yml | 136 ++++++++++++------- roles/mn_dir_teardown/tasks/main.yml | 4 +- roles/mysql_connector/tasks/main.yml | 40 +++++- roles/pre_reqs/files/nscd.conf | 54 ++++++++ roles/pre_reqs/tasks/main.yml | 3 + roles/scm/tasks/license.yml | 4 +- site.yml | 25 +++- teardown.yml | 6 +- 36 files changed, 493 insertions(+), 220 deletions(-) create mode 100644 group_vars/ca.yml create mode 100644 group_vars/pki.yml create mode 100644 roles/java/tasks/install_jce_from_config.yml create mode 100644 roles/java/tasks/install_jce_from_zip.yml create mode 100644 roles/pre_reqs/files/nscd.conf diff --git a/action_plugins/scm_hosts.pyc b/action_plugins/scm_hosts.pyc index 0c5dd0700d2010f35a2151140beba17391e7de82..ff97588ccf52319ebd8e0dd065b34e4fe435d5e1 100644 GIT binary patch delta 1357 zcmZuwO>f*(6g}79V|y}_nMs<-h_+HC2vluFqpFJr)G`r^Afd{Ym=MuMA$HnM=7Wsg zw2^FO5!STPTXe&oAJ85A0VKqtuto@e0fHr)-upZeiNx5?bMEzd=bm@pvugjk*fIa9 z`tS5=54JJ=+5CT-NBZ)(vz)|0Y++zy!^S}i651Fz7`oVSLA}v?uo4fuJ|;PcNC*xF z6%2iB_@Knc5rjG-tj8`A!VV$!@HOPc=Nkd*AAk1=eR}-&>zO&O>+$GH{eRVK>K=Z9 z+(1ld#|94*?ZVL+i;=65&B)WpVN}t`W#kL$+fwX^Py{DoMTa4Jqsy=|M|=MfR8&D3 zE1HOXtmIYnS=2m49B^df-g+L&@^?L?e}HjT!LEVHU&vh)GZGdxj6&pr4mnvv4)oau zJDx$#a%`@KBO65>c^&;gT2Ra)3(>D4W=*EeioJOrB+;P`dEHVgK4mbjYO7Oo&+d}6 zX$I}p`>Rwmka6*bUT9NWYvEZ)ti5{QBJSY9PvWZ2Hp*Fflbc&ZTdMpZRf-lyo6I>x zfA;@Kzn_q9AwHwB>tM2Ii1^ug^-_G%D8=^{jS-NyFnOTK4dfQs#mdx{t%EG&`rH|o zYHGtw)3$favPE}Uq__7Co$TYWQSM{eP-d6g*u;)ePGBzI>pXf~JRQ@t_blI>GRCy0 z@1DH+h^x26Te%~p?i_M^nkWyul(^bDYZ_}Kz+^Y*MPDk=JWg;R)MJV#rntT4Hl#vvBh{Quw6(aE?;bU{k z)8o-5+sULJbJfsY^OhN3n~cZm!|>V%QJbTSJkouBfIDnY0wD=fSG(a(a-eUxLq0ulmk> SgT?A3S4bwccwKd)YQ6!^WF}rbU?PDSBF5+l8ITZzv;+1FS*wAh3kN)y z7!Tw-8;@R>aPbfD>cMy?-Zb%KG+y=n+KO<%wePp@dw)F7^S*6A4T*d~{gFy+e)v|| zK=jl2|2CWZ@~CiQWxpRbV8_tV(A2T2gDS17h&cx83ACSph-89>MjTBOt0u@XaR`~( zA}rfF972^~8+Z;GSXoWLjvpN_9R2>7&M=d~HY*vA#0kwF+=H*cCgg2}O+~(=2c8)6 zzRn(vJ;pf?dpq%S9)mq5Dy<2`JS|ZA4%A72JjPO)9@BmTHHP7X$p;mS%RW$xi*XM+~ zj&_%1UFNKtlY+W#RJYqDF73Q{iL&C0eob65M#a1_ zW6BTU&U(Xj#0z7LGNNlt4fe9HoaM!f%NKQ{`A8y_P4UYZ)`(q>Scu=v&G4?YDY;fhMhZ2+`O&JuXSvW45Cp6H< zxn)HzlBB51-IORKN2WZL_g=#-PsjjUxVRTf(1ti^O}t9ZUI}2OB7SXIlDF<{Z!{fm zYsW5MW=DBQd{6fMryDVCW%XMs-7hA^qBSSpSW6dYm&8OW+b=)#4DqF?)X!~Zl$81h Dk8#`| diff --git a/group_vars/all b/group_vars/all index d479093..e46004f 100644 --- a/group_vars/all +++ b/group_vars/all @@ -7,8 +7,20 @@ krb5_realm: MIT.EXAMPLE.COM ad_domain: "{{ krb5_realm.lower() }}" computer_ou: OU=computer_hosts,OU=hadoop_prd,DC=ad,DC=sec,DC=example,DC=com domain: MIT.EXAMPLE.COM -kdc: mit.example.com -admin_server: w2k8-1.ad.sec.cloudera.com +kdc: tristan-ansible-1.vpc.cloudera.com +admin_server: tristan-ansible-1.vpc.cloudera.com enc_types: rc4-hmac DES-CBC-MD5 DES-CBC-CRC ad_account_prefix: prefix_ -kdc_account_creation_host_override: w2k8-1.ad.sec.cloudera.com +kdc_account_creation_host_override: tristan-ansible-1.vpc.cloudera.com + +## ------------------------------------------------------------------------------------------------------------ +## Java installation options +## ------------------------------------------------------------------------------------------------------------ + +java_installation_strategy: package # can be set to 'none', 'package' or 'rpm' + +java_package: java-1.8.0-openjdk-devel +java_rpm_location: /tmp/jdk-8u181-linux-x64.rpm +java_rpm_remote_src: no +java_jce_location: /tmp/jce_policy-8.zip +java_jce_remote_src: no diff --git a/group_vars/ca.yml b/group_vars/ca.yml new file mode 100644 index 0000000..948801f --- /dev/null +++ b/group_vars/ca.yml @@ -0,0 +1,17 @@ +--- +openssl_path: "openssl" +ca_root_location: "/ca" +ca_root_key_password: password +ca_countryname_default: GB +ca_state_or_province: England +ca_org_name: Cloudera Inc +ca_ou: PS +ca_root_cn: Root CA +ca_intermediate_location: "/ca/intermediate" +ca_intermediate_key_password: password +ca_intermediate_cn: Intermediate CA +root_ca_cert_name: ca.cert.pem +intermediate_ca_cert_name: intermediate.cert.pem +chain_cert_name: ca-chain.cert.pem +signed_certificates_local_location: "/tmp/ca/signedcerts" +csr_certificates_local_location: "/tmp/ca/csrs" diff --git a/group_vars/cdh_servers.yml b/group_vars/cdh_servers.yml index c6ec222..61a0773 100644 --- a/group_vars/cdh_servers.yml +++ b/group_vars/cdh_servers.yml @@ -8,23 +8,25 @@ cluster_display_name: Cluster1 cdh_tls: true log_base: /var/log +trusted_realm: EXAMPLE.COM cdh_services: - type: hdfs nameservice: nameservice1 - dfs_data_dir_list: /data/1/dfs/dn,/data/2/dfs/dn,/data/3/dfs/dn,/data/4/dfs/dn,/data/5/dfs/dn,/data/6/dfs/dn,/data/7/dfs/dn,/data/8/dfs/dn + dfs_data_dir_list: /data/1/dfs/dn fs_checkpoint_dir_list: /data/1/dfs/snn dfs_name_dir_list: /data/1/dfs/nn dfs_journalnode_edits_dir: /data/2/dfs/jn - hdfs_supergroup: group_np_hdfs_super + hdfs_supergroup: hdfs + sentry_sync_path_prefixes: /user/hive/warehouse # - type: cdsw - type: hive - type: hbase - hbase_superuser: "@group_np_hdfs_super" + hbase_superuser: "@hbase" - type: hue hue_timezone: Europe/London @@ -33,10 +35,10 @@ cdh_services: secure_content_security_policy: "script-src 'self' 'unsafe-inline' 'unsafe-eval' *.google-analytics.com *.doubleclick.net *.mathjax.org data:;img-src 'self' *.google-analytics.com *.doubleclick.net *.gstatic.com data:;style-src 'self' 'unsafe-inline';connect-src 'self';child-src 'self' data:;object-src 'none'" - type: impala - impala_scratch_dirs: /data/1/impala/impalad,/data/2/impala/impalad,/data/3/impala/impalad,/data/4/impala/impalad,/data/5/impala/impalad,/data/6/impala/impalad,/data/7/impala/impalad,/data/8/impala/impalad + impala_scratch_dirs: /data/1/impala/impalad - - type: kafka - kafka_super_users: kafka +# - type: kafka +# kafka_super_users: kafka # - type: keytrustee @@ -57,12 +59,12 @@ cdh_services: - type: spark - type: yarn - yarn_nodemanager_local_dirs: /data/1/yarn/nm,/data/2/yarn/nm,/data/3/yarn/nm,/data/4/yarn/nm,/data/5/yarn/nm,/data/6/yarn/nm,/data/7/yarn/nm,/data/8/yarn/nm - yarn_nodemanager_log_dirs: /data/1/yarn/nm/log,/data/2/yarn/nm/log,/data/3/yarn/nm/log,/data/4/yarn/nm/log,/data/5/yarn/nm/log,/data/6/yarn/nm/log,/data/7/yarn/nm/log,/data/8/yarn/nm/log + yarn_nodemanager_local_dirs: /data/1/yarn/nm + yarn_nodemanager_log_dirs: /data/1/yarn/nm/log CMJOBUSER: user1 - YARN_ADMIN_ACL: nobody group_yarn_admin,hue + YARN_ADMIN_ACL: nobody mapred,hue - type: zookeeper - zookeeper_data_log_dir: /data/3/zookeeper - zookeeper_edits_dir: /data/4/zookeeper + zookeeper_data_log_dir: /data/1/zookeeper + zookeeper_edits_dir: /data/1/zookeeper diff --git a/group_vars/db_server.yml b/group_vars/db_server.yml index 5040a13..7be71d6 100644 --- a/group_vars/db_server.yml +++ b/group_vars/db_server.yml @@ -15,61 +15,61 @@ databases: user: 'scm' pass: 'scm_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' amon: name: 'amon' user: 'amon' pass: 'amon_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' rman: name: 'rman' user: 'rman' pass: 'rman_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' nav: name: 'nav' user: 'nav' pass: 'nav_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' navms: name: 'navms' user: 'navms' pass: 'navms_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' metastore: name: 'metastore' user: 'hive' pass: 'hive_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' sentry: name: 'sentry' user: 'sentry' pass: 'sentry_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' hue: name: 'hue' user: 'hue' pass: 'hue_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' oozie: name: 'oozie' user: 'oozie' pass: 'oozie_password' type: 'mysql' - host: '' + host: 'tristan-ansible-1.vpc.cloudera.com' port: '3306' diff --git a/group_vars/db_server_mysql.yml b/group_vars/db_server_mysql.yml index bf83de6..9a2126d 100644 --- a/group_vars/db_server_mysql.yml +++ b/group_vars/db_server_mysql.yml @@ -1,9 +1,9 @@ --- -mysql_datadir: /logs/mysql -mysql_socket: /logs/mysql/mysql.sock +mysql_datadir: /var/log/mysql +mysql_socket: /var/log/mysql/mysql.sock mysql_port: 3306 -mysql_log_bin: /logs/mysql/mysql_binary_log +mysql_log_bin: /var/log/mysql/mysql_binary_log mysql_log: /var/log/mysqld.log mysql_pid_dir: /var/run/mysqld mysql_pid_file: "{{ mysql_pid_dir }}/mysqld.pid" diff --git a/group_vars/krb5_server.yml b/group_vars/krb5_server.yml index 57fae71..84cc14a 100644 --- a/group_vars/krb5_server.yml +++ b/group_vars/krb5_server.yml @@ -1,10 +1,10 @@ --- # 'ad', 'mit', or 'none' to disable security -krb5_kdc_type: 'ad' +krb5_kdc_type: 'mit' krb5_kdc_master_passwd: changeme -krb5_kdc_admin_user: "cloudera-scm@{{ hostvars[groups['krb5_server'][0]]['default_realm'] }}" +krb5_kdc_admin_user: "cloudera-scm/admin@{{ hostvars[groups['krb5_server'][0]]['default_realm'] }}" krb5_kdc_admin_passwd: "changeme" diff --git a/group_vars/ldap_enc.yml b/group_vars/ldap_enc.yml index 9f8ad77..d2c5fbd 100644 --- a/group_vars/ldap_enc.yml +++ b/group_vars/ldap_enc.yml @@ -25,8 +25,8 @@ ldap_rdom: group_name_attr: cn group_member_attr: member cm_clu_admin: GROUP_CDH_ADMIN - cm_full_admin: GROUP_CDH_ADMIN - cm_key_admin: GROUP_CDH_ADMIN - cm_operator: GROUP_CDH_ADMIN - cm_read_only: GROUP_CDH_ADMIN - cm_user_admin: GROUP_CDH_ADMIN + cm_full_admin: GROUP_CDH_ADMIN2 + cm_key_admin: GROUP_CDH_ADMIN3 + cm_operator: GROUP_CDH_ADMIN4 + cm_read_only: GROUP_CDH_ADMIN5 + cm_user_admin: GROUP_CDH_ADMIN6 diff --git a/group_vars/pki.yml b/group_vars/pki.yml new file mode 100644 index 0000000..b5c94b3 --- /dev/null +++ b/group_vars/pki.yml @@ -0,0 +1,11 @@ +--- + +security_root: "/opt/cloudera/security" +keystore_name: "server.jks" +root_ca_alias: RootCA +intermediate_ca_alias: IntermediateCA +keystore_password: password +cacerts_file: /etc/pki/ca-trust/extracted/java/cacerts +jssecacerts_file: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.x86_64/jre/lib/security/jssecacerts +jssecacerts_pw: password +keytool_path: keytool diff --git a/group_vars/scm_server.yml b/group_vars/scm_server.yml index 9da0420..04322c3 100644 --- a/group_vars/scm_server.yml +++ b/group_vars/scm_server.yml @@ -1,6 +1,6 @@ --- -scm_version: 6.3.0 +scm_version: 6.3.x scm_port: 7180 scm_port_tls: 7183 scm_hostname: "{{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }}" @@ -9,13 +9,15 @@ scm_web_tls: True banner_text: "Ansible Cluster Build" banner_colour: RED -yum_repo_base: http:///RPMS/ +yum_repo_base: http://cloudera-build-3-us-west-2.vpc.cloudera.com/s3/build/1712953/cm6/6.3.x scm_repositories: - - http:///PARCELS/CDH6.2/ - - http:///PARCELS/KTS/6.1.0/ - - http:///PARCELS/KMS/6.1.0/ - - http:///PARCELS/ANACONDA/4.4.1/ + - http://cloudera-build-3-us-west-2.vpc.cloudera.com/s3/build/1746013/cdh6/6.3.x/parcels/ + - http://tristan-ansible-1.vpc.cloudera.com/parcels/keytrustee-kms-6.1.0-parcels/6.1.0/parcels/ + - http://tristan-ansible-1.vpc.cloudera.com/parcels/keytrustee-server-6.1.0-parcels/6.1.0/parcels/ +# - http:///PARCELS/KTS/6.1.0/ +# - http:///PARCELS/KMS/6.1.0/ +# - http:///PARCELS/ANACONDA/4.4.1/ # - http:///PARCELS/Oracle/134/ scm_csd: @@ -24,32 +26,29 @@ scm_csd: scm_products: - product: CDH - version: 6.2.0-1.cdh6.2.0.p0.967373 + version: 6.3.x-1.cdh6.3.x.p0.1746013 # - product: KAFKA # version: 4.0.0-1.4.0.0.p0.1 - - product: KEYTRUSTEE - version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 + # - product: KEYTRUSTEE + # version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 # - product: ORACLE_INSTANT_CLIENT # version: 11.2-1.oracleinstantclient1.0.0.p0.134 - - product: KEYTRUSTEE - version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 + # - product: KEYTRUSTEE + # version: 6.1.0.p0.592714 kts_products: - product: KEYTRUSTEE_SERVER version: 6.1.0-1.keytrustee6.1.0.p0.592761 - - product: CDH - version: 6.2.0-1.cdh6.2.0.p0.967373 - -oom_heap_dump_dir: /logs/heapdumps -eventserver_index_dir: /logs/cloudera-scm-eventserver -hmon_firehose_storage_dir: /logs/cloudera-host-monitor -navms_data_dir: /logs/cloudera-scm-navigator -headlamp_scratch_dir: /logs/cloudera-scm-headlamp -smon_firehose_storage_dir: /logs/cloudera-service-monitor +oom_heap_dump_dir: /var/log/heapdumps +eventserver_index_dir: /var/log/cloudera-scm-eventserver +hmon_firehose_storage_dir: /var/log/cloudera-host-monitor +navms_data_dir: /var/log/cloudera-scm-navigator +headlamp_scratch_dir: /var/log/cloudera-scm-headlamp +smon_firehose_storage_dir: /var/log/cloudera-service-monitor nav_auditstream_filename_pattern: /var/log/cloudera-audit/audit_%d{yyyyMMdd}.evt nav_auditstream_directory: /var/log/cloudera-audit diff --git a/group_vars/tls_enc.yml b/group_vars/tls_enc.yml index 16e723e..e6b29d4 100644 --- a/group_vars/tls_enc.yml +++ b/group_vars/tls_enc.yml @@ -2,11 +2,11 @@ tls: keystore_path: /opt/cloudera/security/jks/localhost.jks - keystore_password: changeme + keystore_password: password key_password_file: /opt/cloudera/security/x509/key.pw tls_cert: /opt/cloudera/security/x509/localhost.pem private_key: /opt/cloudera/security/x509/localhost.key cert_dir: /opt/cloudera/security/CAcerts/ - cert_chain: /opt/cloudera/security/CAcerts/root.pem - truststore_path: /usr/java/latest/jre/lib/security/jssecacerts - truststore_password: changeit + cert_chain: /opt/cloudera/security/CAcerts/ca-chain.cert.pem + truststore_path: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.x86_64/jre/lib/security/jssecacerts + truststore_password: password diff --git a/hosts b/hosts index 4d707a2..1712849 100644 --- a/hosts +++ b/hosts @@ -1,13 +1,16 @@ # Note for AWS: 'Public DNS' name is too long for ansible_host, use 'Public IP' (https://github.com/ansible/ansible/issues/11536) [scm_server] - license_file=/opt/cloudera_license.txt +tristan-ansible-1.vpc.cloudera.com license_file=/root/tristan_stevens_2019_2020_cloudera_license.txt [db_server] - +tristan-ansible-1.vpc.cloudera.com [krb5_server] - default_realm=MIT.EXAMPLE.COM +tristan-ansible-1.vpc.cloudera.com default_realm=MIT.EXAMPLE.COM + +[ca_server] +tristan-ansible-1.vpc.cloudera.com [utility_servers:children] scm_server @@ -19,31 +22,27 @@ gatewayen_servers gatewaylb_servers [gatewayen_servers] - host_template=HostTemplate-GatewayEdge +#tristan-ansible-1.vpc.cloudera.com host_template=HostTemplate-GatewayEdge [gatewaylb_servers] -# host_template=HostTemplate-GatewayLB - host_template=HostTemplate-GatewayLB +tristan-ansible-4.vpc.cloudera.com host_template=HostTemplate-GatewayLB [master_servers] - host_template=HostTemplate-Master1 - host_template=HostTemplate-Master2 - host_template=HostTemplate-Master3 +tristan-ansible-1.vpc.cloudera.com host_template=HostTemplate-Master1 +tristan-ansible-2.vpc.cloudera.com host_template=HostTemplate-Master2 +tristan-ansible-3.vpc.cloudera.com host_template=HostTemplate-Master3 [worker_servers] - - - - - +tristan-ansible-5.vpc.cloudera.com +tristan-ansible-6.vpc.cloudera.com [worker_servers:vars] host_template=HostTemplate-Workers #host_template=HostTemplate-Kafka [kms_servers] -# -# +#tristan-ansible-2.vpc.cloudera.com +#tristan-ansible-3.vpc.cloudera.com [cdh_servers:children] utility_servers @@ -53,16 +52,16 @@ worker_servers kms_servers [kts_servers] -# host_template=KeyTrusteeActive -# host_template=KeyTrusteePassive +#tristan-ansible-1.vpc.cloudera.com host_template=KeyTrusteeActive +#tristan-ansible-1.vpc.cloudera.com host_template=KeyTrusteePassive [haproxy] - +tristan-ansible-1.vpc.cloudera.com [all:vars] # 'ad', 'mit', or 'none' to disable security # This value must match that in group_vars/krb5_server.yml -krb5_kdc_type='ad' +krb5_kdc_type='mit' hdfs_tde_enabled='False' database_type='mysql' full_teardown='False' diff --git a/roles/cdh/templates/hbase.j2 b/roles/cdh/templates/hbase.j2 index fa77f01..e4239e0 100644 --- a/roles/cdh/templates/hbase.j2 +++ b/roles/cdh/templates/hbase.j2 @@ -85,9 +85,11 @@ }, { "name" : "oom_heap_dump_enabled", "value" : "false" + {% if (cdh_services | json_query('[?type==`hbase`].hbase_regionserver_java_heapsize') | length > 0 ) %} }, { "name" : "hbase_regionserver_java_heapsize", - "value" : "2631925760" + "variable" : "HBASE_REGIONSERVER_JAVA_HEAPSIZE" + {% endif %} } ], "base" : true }, { diff --git a/roles/cdh/templates/hdfs.j2 b/roles/cdh/templates/hdfs.j2 index 6dde90a..6d77dc7 100644 --- a/roles/cdh/templates/hdfs.j2 +++ b/roles/cdh/templates/hdfs.j2 @@ -127,9 +127,6 @@ , { "name" : "oom_heap_dump_enabled", "value" : "false" - }, { - "name" : "dfs_datanode_max_locked_memory", - "value" : "2610954240" }, { "name" : "datanode_log_dir", "value" : "{{ log_base }}/hadoop-hdfs" @@ -138,10 +135,17 @@ "value" : "700" }, { "name" : "dfs_datanode_failed_volumes_tolerated", - "value" : "3" + "value" : "{{ (((cdh_services | json_query('[?type==`hdfs`].dfs_data_dir_list') | first).split(',') | count ) / 2 ) | int }}" + {% if (cdh_services | json_query('[?type==`hdfs`].datanode_max_locked_memory') | length > 0 ) %} + }, { + "name" : "dfs_datanode_max_locked_memory", + "variable" : "DATANODE_MAX_LOCKED_MEMORY" + {% endif %} + {% if (cdh_services | json_query('[?type==`hdfs`].datanode_java_heapsize') | length > 0 ) %} }, { "name" : "datanode_java_heapsize", - "value" : "2147483648" + "variable" : "DATANODE_JAVA_HEAPSIZE" + {% endif %} } ], "base": true @@ -180,9 +184,11 @@ }, { "name" : "dfs_namenode_handler_count", "value" : "63" + {% if (cdh_services | json_query('[?type==`hdfs`].namenode_java_heapsize') | length > 0 ) %} }, { "name" : "namenode_java_heapsize", - "value" : "21474836480" + "variable" : "NAMENODE_JAVA_HEAPSIZE" + {% endif %} }, { "name" : "fs_trash_interval", "value" : "5760" @@ -205,9 +211,11 @@ }, { "name" : "journalnode_log_dir", "value" : "{{ log_base }}/hadoop-hdfs" + {% if (cdh_services | json_query('[?type==`hdfs`].jn_java_heapsize') | length > 0 ) %} }, { - "name" : "journalNode_java_heapsize", - "value" : "268435456" + "name" : "journalNode_java_heapsiz", + "variable" : "JN_JAVA_HEAPSIZE" + {% endif %} } ], "base": true diff --git a/roles/cdh/templates/hive.j2 b/roles/cdh/templates/hive.j2 index f364309..216b26e 100644 --- a/roles/cdh/templates/hive.j2 +++ b/roles/cdh/templates/hive.j2 @@ -110,12 +110,21 @@ } , { "name" : "hiveserver2_load_balancer", "value" : "{{ hostvars[groups['haproxy'][0]]['inventory_hostname'] }}:10000" - }, { - "name" : "hiveserver2_spark_driver_memory", - "value" : "966367641" + {% if (cdh_services | json_query('[?type==`hive`].hiveserver2_java_heapsize') | length > 0 ) %} }, { "name" : "hiveserver2_java_heapsize", - "value" : "8589934592" + "variable" : "HIVESERVER2_JAVA_HEAPSIZE" + {% endif %} + {% if (cdh_services | json_query('[?type==`hive`].hiveserver2_spark_executor_memory') | length > 0 ) %} + }, { + "name" : "hiveserver2_spark_executor_memory", + "variable" : "HIVESERVER2_SPARK_EXECUTOR_MEMORY" + {% endif %} + {% if (cdh_services | json_query('[?type==`hive`].hiveserver2_spark_driver_memory') | length > 0 ) %} + }, { + "name" : "hiveserver2_spark_driver_memory", + "variable" : "HIVESERVER2_SPARK_DRIVER_MEMORY" + {% endif %} }, { "name" : "hiveserver2_enable_impersonation", "value" : "false" @@ -131,9 +140,6 @@ }, { "name" : "hiveserver2_spark_yarn_driver_memory_overhead", "value" : "102" - }, { - "name" : "hiveserver2_spark_executor_memory", - "value" : "1016007228" } ], "base": true } diff --git a/roles/cdh/templates/impala.j2 b/roles/cdh/templates/impala.j2 index 717cba9..77a4938 100644 --- a/roles/cdh/templates/impala.j2 +++ b/roles/cdh/templates/impala.j2 @@ -63,9 +63,11 @@ }, { "name" : "default_query_options", "value" : "query_timeout_s=0" + {% if (cdh_services | json_query('[?type==`impala`].impalad_memory_limit') | length > 0 ) %} }, { "name" : "impalad_memory_limit", - "value" : "75161927680" + "variable" : "IMPALAD_MEMORY_LIMIT" + {% endif %} }, { "name" : "enable_audit_event_log", "value" : "true" diff --git a/roles/cdh/templates/solr.j2 b/roles/cdh/templates/solr.j2 index c11ce39..9719314 100644 --- a/roles/cdh/templates/solr.j2 +++ b/roles/cdh/templates/solr.j2 @@ -62,17 +62,21 @@ "refName" : "SOLR-1-SOLR_SERVER-BASE", "roleType" : "SOLR_SERVER", "configs" : [ { + {% if (cdh_services | json_query('[?type==`solr`].solr_java_heapsize') | length > 0 ) %} "name" : "solr_java_heapsize", - "value" : "53687091200" + "variable" : "SOLR_JAVA_HEAPSIZE" }, { + {% endif %} + {% if (cdh_services | json_query('[?type==`solr`].solr_java_direct_memory_size') | length > 0 ) %} + "name" : "solr_java_direct_memory_size", + "variable" : "SOLR_JAVA_DIRECT_MEMORY_SIZE" + }, { + {% endif %} "name" : "solr_log_dir", "value" : "{{ log_base }}/solr" }, { "name" : "log4j_safety_valve", "value" : "" - }, { - "name" : "solr_java_direct_memory_size", - "value" : "17179869184" }, { "name" : "oom_heap_dump_enabled", "value" : "false" diff --git a/roles/cdh/templates/spark.j2 b/roles/cdh/templates/spark.j2 index bcf545d..16d3472 100644 --- a/roles/cdh/templates/spark.j2 +++ b/roles/cdh/templates/spark.j2 @@ -38,9 +38,11 @@ { "name" : "log_dir", "value" : "{{ log_base }}/spark" + {% if (cdh_services | json_query('[?type==`spark`].history_server_max_heapsize') | length > 0 ) %} }, { "name" : "history_server_max_heapsize", - "value" : "3221225472" + "variable" : "HISTORY_SERVER_MAX_HEAPSIZE" + {% endif %} } {% if (cdh_tls) %} , { diff --git a/roles/cdh/templates/spark2.j2 b/roles/cdh/templates/spark2.j2 index dc668df..95a8b21 100644 --- a/roles/cdh/templates/spark2.j2 +++ b/roles/cdh/templates/spark2.j2 @@ -32,25 +32,26 @@ "refName": "SPARK2_ON_YARN-1-SPARK2_YARN_HISTORY_SERVER-BASE", "roleType": "SPARK2_YARN_HISTORY_SERVER", "configs": [ { - "name" : "event_log_cleaner_max_age", - "value" : "1209600" - }, { - "name" : "history_server_max_heapsize", - "value" : "4294967296" - } - {% if (cdh_tls) %} - { - "name" : "ssl_server_keystore_location", - "value" : "{{ tls.keystore_path }}" - }, { - "name" : "ssl_enabled", - "value" : "true" - }, { - "name" : "ssl_server_keystore_password", - "value" : "{{ tls.keystore_password }}" - } - - {% endif %} + "name" : "event_log_cleaner_max_age", + "value" : "1209600" + {% if (cdh_services | json_query('[?type==`spark2`].history_server_max_heapsize') | length > 0 ) %} + }, { + "name" : "history_server_max_heapsize", + "variable" : "HISTORY_SERVER_MAX_HEAPSIZE" + {% endif %} + } + {% if (cdh_tls) %} + , { + "name" : "ssl_server_keystore_location", + "value" : "{{ tls.keystore_path }}" + }, { + "name" : "ssl_enabled", + "value" : "true" + }, { + "name" : "ssl_server_keystore_password", + "value" : "{{ tls.keystore_password }}" + } + {% endif %} ], "base": true } diff --git a/roles/cdh/templates/yarn.j2 b/roles/cdh/templates/yarn.j2 index fc4a828..0124cd2 100644 --- a/roles/cdh/templates/yarn.j2 +++ b/roles/cdh/templates/yarn.j2 @@ -88,15 +88,19 @@ }, { "name" : "node_manager_log_dir", "value" : "{{ log_base }}/hadoop-yarn" + {% if (cdh_services | json_query('[?type==`yarn`].yarn_nodemanager_resource_memory_mb') | length > 0 ) %} + }, { + "name" : "yarn_nodemanager_resource_memory_mb", + "variable" : "YARN_NODEMANAGER_RESOURCE_MEMORY_MB" + {% endif %} + {% if (cdh_services | json_query('[?type==`yarn`].yarn_nodemanager_resource_cpu_vcores') | length > 0 ) %} + }, { + "name" : "yarn_nodemanager_resource_cpu_vcores", + "variable" : "YARN_NODEMANAGER_RESOURCE_CPU_VCORES" + {% endif %} }, { - "name" : "yarn_nodemanager_resource_memory_mb", - "value" : "40960" - }, { "name" : "yarn_nodemanager_heartbeat_interval_ms", "value" : "240" - }, { - "name" : "yarn_nodemanager_resource_cpu_vcores", - "value" : "24" }, { "name" : "oom_heap_dump_enabled", "value" : "false" @@ -109,15 +113,24 @@ "configs": [ { "name" : "resource_manager_log_dir", "value" : "{{ log_base }}/hadoop-yarn" + {% if (cdh_services | json_query('[?type==`yarn`].yarn_scheduler_maximum_allocation_vcores') | length > 0 ) %} }, { "name" : "yarn_scheduler_maximum_allocation_vcores", - "value" : "24" + "variable" : "YARN_SCHEDULER_MAXIMUM_ALLOCATION_VCORES" + {% endif %} + {% if (cdh_services | json_query('[?type==`yarn`].yarn_scheduler_maximum_allocation_mb') | length > 0 ) %} + }, { + "name" : "yarn_scheduler_maximum_allocation_mb", + "variable" : "YARN_SCHEDULER_MAXIMUM_ALLOCATION_MB" + {% endif %} + {% if (cdh_services | json_query('[?type==`yarn`].resource_manager_java_heapsize') | length > 0 ) %} + }, { + "name" : "resource_manager_java_heapsize", + "variable" : "RESOURCE_MANAGER_JAVA_HEAPSIZE" + {% endif %} }, { "name" : "resourcemanager_fair_scheduler_preemption", "value" : "true" - }, { - "name" : "yarn_scheduler_maximum_allocation_mb", - "value" : "32768" }, { "name" : "oom_heap_dump_enabled", "value" : "false" @@ -127,9 +140,6 @@ }, { "name" : "resourcemanager_fair_scheduler_assign_multiple", "value" : "false" - }, { - "name" : "resource_manager_java_heapsize", - "value" : "4294967296" } ], "base": true } diff --git a/roles/certs/tasks/main.yml b/roles/certs/tasks/main.yml index 5655c4a..1196341 100644 --- a/roles/certs/tasks/main.yml +++ b/roles/certs/tasks/main.yml @@ -194,8 +194,7 @@ path: "{{ signed_certificates_local_location }}" - name: Copy cacerts to jssecacerts - local_action: shell cp {{ cacerts_file }} {{ jssecacerts_local_location }} - sudo: yes + local_action: "shell cp {{ cacerts_file }} {{ jssecacerts_local_location }}" run_once: true args: creates: "{{ jssecacerts_local_location }}" diff --git a/roles/cm_agents/templates/config.ini.j2 b/roles/cm_agents/templates/config.ini.j2 index 96e4219..7ed919e 100644 --- a/roles/cm_agents/templates/config.ini.j2 +++ b/roles/cm_agents/templates/config.ini.j2 @@ -120,6 +120,13 @@ dns_resolution_collection_timeout_seconds=30 # Use TLS and certificate validation when connecting to the CM server. use_tls=0 +# Minimum TLS or SSL protocol. +# Allowed values: SSLv2, SSLv3, TLSv1.0, TLSv1.1, TLSv1.2 +minimum_tls_protocol=TLSv1.2 + +# List of allowed ciphers for TLS +cipher_list=HIGH:!DSS:!DH:!ADH:!DES:!3DES:!SHA1:!aNULL:!eNULL:!EXPORT:!SSLv2:!SSLv3:!TLSv1 + # The maximum allowed depth of the certificate chain returned by the peer. # The default value of 9 matches the default specified in openssl's # SSL_CTX_set_verify. diff --git a/roles/cm_agents_tls/templates/config.ini.j2 b/roles/cm_agents_tls/templates/config.ini.j2 index 4a6fa61..e31957d 100644 --- a/roles/cm_agents_tls/templates/config.ini.j2 +++ b/roles/cm_agents_tls/templates/config.ini.j2 @@ -121,6 +121,13 @@ dns_resolution_collection_timeout_seconds=30 # Use TLS and certificate validation when connecting to the CM server. use_tls=1 +# Minimum TLS or SSL protocol. +# Allowed values: SSLv2, SSLv3, TLSv1.0, TLSv1.1, TLSv1.2 +minimum_tls_protocol=TLSv1.2 + +# List of allowed ciphers for TLS +cipher_list=HIGH:!DSS:!DH:!ADH:!DES:!3DES:!SHA1:!aNULL:!eNULL:!EXPORT:!SSLv2:!SSLv3:!TLSv1 + # The maximum allowed depth of the certificate chain returned by the peer. # The default value of 9 matches the default specified in openssl's # SSL_CTX_set_verify. diff --git a/roles/cm_api/tasks/main.yml b/roles/cm_api/tasks/main.yml index d004905..97867bf 100644 --- a/roles/cm_api/tasks/main.yml +++ b/roles/cm_api/tasks/main.yml @@ -1,10 +1,10 @@ --- -- name: Copy cm_api to Python lib directory - copy: - src: cm_api-19.0.0-py2.7.egg - dest: /usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg - owner: root - group: root - mode: '0644' +#- name: Copy cm_api to Python lib directory +# copy: +# src: /usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg +# dest: /usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg +# owner: root +# group: root +# mode: '0644' diff --git a/roles/cm_repo/tasks/main.yml b/roles/cm_repo/tasks/main.yml index e54555d..39ff8e8 100644 --- a/roles/cm_repo/tasks/main.yml +++ b/roles/cm_repo/tasks/main.yml @@ -5,7 +5,7 @@ yum_repository: name: cloudera-manager description: Cloudera Manager - baseurl: "{{ yum_repo_base }}/el{{ ansible_distribution_major_version }}/" + baseurl: "{{ yum_repo_base }}/redhat{{ ansible_distribution_major_version }}/yum" gpgcheck: no enabled: yes when: (ansible_distribution|lower == "redhat") or (ansible_distribution|lower == "centos") diff --git a/roles/db_connector/tasks/main.yml b/roles/db_connector/tasks/main.yml index bc99acc..7c31431 100644 --- a/roles/db_connector/tasks/main.yml +++ b/roles/db_connector/tasks/main.yml @@ -7,18 +7,18 @@ file: path=/usr/share/java state=directory owner=root group=root mode=0755 -- name: Copy MySQL connector - copy: - src: mysql-connector-java-8.0.13.jar - dest: /usr/share/java/mysql-connector-java.jar - owner: root - group: root - mode: '0755' +#- name: Copy MySQL connector +# copy: +# src: mysql-connector-java-8.0.13.jar +# dest: /usr/share/java/mysql-connector-java.jar +# owner: root +# group: root +# mode: '0755' -- name: Copy Oracle connector - copy: - src: ojdbc8.jar - dest: /usr/share/java/oracle-connector-java.jar - owner: root - group: root - mode: '0755' +#- name: Copy Oracle connector +# copy: +# src: ojdbc8.jar +# dest: /usr/share/java/oracle-connector-java.jar +# owner: root +# group: root +# mode: '0755' diff --git a/roles/java/tasks/install_jce_from_config.yml b/roles/java/tasks/install_jce_from_config.yml new file mode 100644 index 0000000..a63a31c --- /dev/null +++ b/roles/java/tasks/install_jce_from_config.yml @@ -0,0 +1,6 @@ +--- + +- lineinfile: + path: "{{ java_home }}/jre/lib/security/java.security" + regexp: '#?crypto.policy=' + line: crypto.policy=unlimited diff --git a/roles/java/tasks/install_jce_from_zip.yml b/roles/java/tasks/install_jce_from_zip.yml new file mode 100644 index 0000000..df4d2d6 --- /dev/null +++ b/roles/java/tasks/install_jce_from_zip.yml @@ -0,0 +1,37 @@ +--- + +- name: Install unzip package + package: + name: + - unzip + state: installed + +- name: Copy JCE policy zip to temp directory + copy: + src: "{{java_jce_location}}" + dest: "{{ tmp_dir }}/jce.zip" + remote_src: "{{java_jce_remote_src}}" + +- name: Extract JCE policy zip + unarchive: + src: "{{ tmp_dir }}/jce.zip" + dest: "{{ tmp_dir }}" + copy: no + +- name: Copy JCE policy jars into correct location + copy: + src: "{{ item }}" + dest: "{{ java_home }}/jre/lib/security/" + backup: yes + remote_src: True + with_fileglob: + - "{{ tmp_dir }}/{{ unarchived_directory }}/*.jar" + +- name: Cleanup tmp files + file: + path: "{{ tmp_dir }}/{{ item }}" + state: absent + with_items: + - jce.zip + - "{{ unarchived_directory }}" + ignore_errors: True diff --git a/roles/java/tasks/main.yml b/roles/java/tasks/main.yml index 6388215..a095642 100644 --- a/roles/java/tasks/main.yml +++ b/roles/java/tasks/main.yml @@ -1,52 +1,88 @@ --- -- name: Install Oracle JDK - yum: name={{ item }} state=latest update_cache=yes - with_items: - - jdk1.8 - - unzip - -- name: Set JCE unlimited - lineinfile: - path: /usr/java/default/jre/lib/security/java.security - regexp: '^#*\s*crypto\.policy.*limited$' - line: crypto.policy=unlimited - -#- stat: path="{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" -# register: jce_zip_exists - -#- name: Download JCE unlimited policy -# get_url: -# url=http://download.oracle.com/otn-pub/java/jce/7/UnlimitedJCEPolicyJDK7.zip -# dest="{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" -# headers="Cookie:oraclelicense=accept-securebackup-cookie" -# when: jce_zip_exists.stat.exists == False - -#- name: Unzip JCE unlimited policy files -# unarchive: -# src: "{{ tmp_dir }}/UnlimitedJCEPolicyJDK7.zip" -# dest: "{{ tmp_dir }}" -# copy: no - -#- name: Install local_policy.jar -# copy: -# src: "{{ tmp_dir }}/UnlimitedJCEPolicy/local_policy.jar" -# dest: /usr/java/jdk1.7.0_67-cloudera/jre/lib/security/local_policy.jar -# backup: yes -# remote_src: True - -#- name: Install US_export_policy.jar -# copy: -# src: "{{ tmp_dir }}/UnlimitedJCEPolicy/US_export_policy.jar" -# dest: /usr/java/jdk1.7.0_67-cloudera/jre/lib/security/US_export_policy.jar -# backup: yes -# remote_src: True - -#- name: Cleanup tmp files -# file: -# path="{{ tmp_dir }}/{{ item }}" -# state=absent -# with_items: -# - UnlimitedJCEPolicy -# - UnlimitedJCEPolicyJDK7.zip -# ignore_errors: True +- name: Copy Java RPM file to temp directory + copy: + src: "{{ java_rpm_location }}" + dest: "{{ tmp_dir }}/jdk.rpm" + remote_src: "{{ java_rpm_remote_src }}" + when: java_installation_strategy == 'rpm' + +- name: Install Java from RPM + package: + name: + - "{{ tmp_dir }}/jdk.rpm" + state: installed + when: java_installation_strategy == 'rpm' + +- name: Install Java from package repository + package: + name: + - "{{ java_package }}" + state: installed + update_cache: yes + when: java_installation_strategy == 'package' + +- name: Add missing symlinks (if installed from Cloudera repo) + block: + - name: Find Java home directory + find: + paths: /usr/java + patterns: 'jdk*-cloudera' + file_type: directory + recurse: no + register: cloudera_jdk_home + - name: Create alternatives symlink for java + alternatives: + name: java + link: /usr/bin/java + path: "{{ cloudera_jdk_home.files[0].path}}/bin/java" + when: cloudera_jdk_home.matched + - name: Create default symlink for Java home directory + file: + src: "{{ cloudera_jdk_home.files[0].path}}" + dest: /usr/java/default + state: link + when: cloudera_jdk_home.matched + when: java_installation_strategy != 'none' + +- name: Capture installed Java provider + raw: /usr/bin/java -version 2>&1 | egrep -o 'Java\(TM\)|OpenJDK' | sed 's/Java(TM)/Oracle/' | tr '[A-Z]' '[a-z]' | head -1 + register: provider + when: java_installation_strategy != 'none' + +- name: Capture installed Java version + raw: /usr/bin/java -version 2>&1 | grep version | tr -d '"' | tr "_" " " | awk '{print $3"\n"$4}' + register: version + when: java_installation_strategy != 'none' + +- set_fact: + installed_jdk_provider: "{{ provider.stdout_lines[0] }}" + installed_jdk_version: "{{ version.stdout_lines[0] }}" + installed_jdk_update: "{{ version.stdout_lines[1] }}" + when: java_installation_strategy != 'none' + +- name: Enable Unlimited Strength Policy (Oracle JDK7) + include_tasks: install_jce_from_zip.yml + vars: + java_home: /usr/java/default + unarchived_directory: UnlimitedJCEPolicy + when: java_installation_strategy != 'none' and installed_jdk_provider == 'oracle' and installed_jdk_version == '1.7.0' + +- name: Enable Unlimited Strength Policy (Oracle JDK8 before u151) + include_tasks: install_jce_from_zip.yml + vars: + java_home: /usr/java/default + unarchived_directory: UnlimitedJCEPolicyJDK8 + when: java_installation_strategy != 'none' and installed_jdk_provider == 'oracle' and installed_jdk_version == '1.8.0' and installed_jdk_update|int < 151 + +- name: Enable Unlimited Strength Policy (Oracle JDK8 after u151) + include_tasks: install_jce_from_config.yml + vars: + java_home: /usr/java/default + when: java_installation_strategy != 'none' and installed_jdk_provider == 'oracle' and installed_jdk_version == '1.8.0' and installed_jdk_update|int >= 151 + +- name: Enable Unlimited Strength Policy (OpenJDK) + include_tasks: install_jce_from_config.yml + vars: + java_home: /usr/lib/jvm + when: java_installation_strategy != 'none' and installed_jdk_provider == 'openjdk' and installed_jdk_version is not match("11.*") diff --git a/roles/mn_dir_teardown/tasks/main.yml b/roles/mn_dir_teardown/tasks/main.yml index 83c98c6..a0fa025 100644 --- a/roles/mn_dir_teardown/tasks/main.yml +++ b/roles/mn_dir_teardown/tasks/main.yml @@ -22,11 +22,11 @@ - name: ZK Data Dirs file: - path: "/data/3/zookeeper" + path: "/data/1/zookeeper" state: absent - name: ZK Edits Dirs file: - path: "/data/4/zookeeper" + path: "/data/1/zookeeper" state: absent diff --git a/roles/mysql_connector/tasks/main.yml b/roles/mysql_connector/tasks/main.yml index d209053..0873016 100644 --- a/roles/mysql_connector/tasks/main.yml +++ b/roles/mysql_connector/tasks/main.yml @@ -1,4 +1,40 @@ --- -- name: Install MySQL JDBC Connector - yum: name=mysql-connector-java state=installed +- name: Create /usr/share/java directory + file: + path: /usr/share/java + state: directory + mode: 0755 + +- name: Install unzip package + package: + name: + - unzip + state: installed + +- stat: + path: /usr/share/java/mysql-connector-java.jar + register: mysql_connector_java + +- name: Download MySQL Connector/J + get_url: + url: https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.48.zip + dest: "{{ tmp_dir }}/mysql-connector-java.zip" + checksum: "md5:5da24facd99964f296ecde32abcd2384" + mode: 0644 + when: not mysql_connector_java.stat.exists + +- name: Extract MySQL Connector/J zip file + unarchive: + src: "{{ tmp_dir }}/mysql-connector-java.zip" + dest: "{{ tmp_dir }}" + copy: no + when: not mysql_connector_java.stat.exists + +- name: Copy MySQL Connector/J jar file to correct location + copy: + src: "{{ tmp_dir }}/mysql-connector-java-5.1.48/mysql-connector-java-5.1.48.jar" + dest: /usr/share/java/mysql-connector-java.jar + mode: 0644 + remote_src: yes + when: not mysql_connector_java.stat.exists diff --git a/roles/pre_reqs/files/nscd.conf b/roles/pre_reqs/files/nscd.conf new file mode 100644 index 0000000..8e5700f --- /dev/null +++ b/roles/pre_reqs/files/nscd.conf @@ -0,0 +1,54 @@ +# max-threads 32 + server-user nscd +# stat-user somebody + debug-level 0 +# reload-count 5 + paranoia no +# restart-interval 3600 + + enable-cache passwd no + positive-time-to-live passwd 600 + negative-time-to-live passwd 20 + suggested-size passwd 211 + check-files passwd yes + persistent passwd yes + shared passwd yes + max-db-size passwd 33554432 + auto-propagate passwd yes + + enable-cache group no + positive-time-to-live group 3600 + negative-time-to-live group 60 + suggested-size group 211 + check-files group yes + persistent group yes + shared group yes + max-db-size group 33554432 + auto-propagate group yes + + enable-cache hosts yes + positive-time-to-live hosts 3600 + negative-time-to-live hosts 20 + suggested-size hosts 211 + check-files hosts yes + persistent hosts yes + shared hosts yes + max-db-size hosts 33554432 + + enable-cache services no + positive-time-to-live services 28800 + negative-time-to-live services 20 + suggested-size services 211 + check-files services yes + persistent services yes + shared services yes + max-db-size services 33554432 + + enable-cache netgroup no + positive-time-to-live netgroup 28800 + negative-time-to-live netgroup 20 + suggested-size netgroup 211 + check-files netgroup yes + persistent netgroup yes + shared netgroup yes + max-db-size netgroup 33554432 diff --git a/roles/pre_reqs/tasks/main.yml b/roles/pre_reqs/tasks/main.yml index 08717d0..41e0a76 100644 --- a/roles/pre_reqs/tasks/main.yml +++ b/roles/pre_reqs/tasks/main.yml @@ -116,6 +116,9 @@ - name: Set SELINUX to permissive - setenforce shell: "setenforce 0" + register: disable_selinux + ignore_errors: True + when: ansible_os_family == 'RedHat' #- name: Verify required SSSD packages have been yum installed # yum: diff --git a/roles/scm/tasks/license.yml b/roles/scm/tasks/license.yml index ecff73e..124602a 100644 --- a/roles/scm/tasks/license.yml +++ b/roles/scm/tasks/license.yml @@ -3,9 +3,9 @@ - include_vars: "{{ inventory_dir }}/group_vars/scm_server_enc.yml" - include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" -- name: Copy cm_api to Python lib directory +- name: Copy license file to scm_server copy: - src: cloudera_license.txt + src: "{{ hostvars[scm_hostname]['license_file'] }}" dest: "{{ hostvars[scm_hostname]['license_file'] }}" owner: cloudera-scm group: cloudera-scm diff --git a/site.yml b/site.yml index f19ea8c..35ba57a 100644 --- a/site.yml +++ b/site.yml @@ -29,13 +29,14 @@ hosts: db_server roles: - { role: mariadb, when: database_type == 'mysql' } - - { role: postgres, when: database_type == 'postgres' } +# - { role: postgres, when: database_type == 'postgres' } tags: mysql - name: Install DB Connectors hosts: cdh_servers roles: - db_connector + - mysql_connector tags: [db_connector, pre_build] - name: Install MIT KDC Server @@ -44,12 +45,24 @@ - { role: krb5_server, when: krb5_kdc_type == 'mit' } tags: [krb5, pre_build] +- name: Configure CA + hosts: ca_server + roles: + - ca_server + tags: ca_server + - name: Setup certificates hosts: all roles: - certs tags: certs +- name: Sign Certs + hosts: ca_server + roles: + - ca_server_signing + tags: ca_server_signing + - name: Install certificates hosts: all roles: @@ -88,11 +101,11 @@ - scm tags: [cm_cluster_template, cm_install] -- name: Map Cloudera Manager Roles to LDAP groups - hosts: scm_server - roles: - - cm_roles - tags: [cm_roles, cm_install] +#- name: Map Cloudera Manager Roles to LDAP groups +# hosts: scm_server +# roles: +# - cm_roles +# tags: [cm_roles, cm_install] - name: Install Cloudera Manager Agents - tls hosts: cdh_servers:kts_servers:kms_servers diff --git a/teardown.yml b/teardown.yml index 5d97b75..1d1fd2b 100644 --- a/teardown.yml +++ b/teardown.yml @@ -44,7 +44,7 @@ - include_vars: "{{ inventory_dir }}/group_vars/db_server.yml" roles: - { role: db_teardown_mysql_cdh, when: database_type == 'mysql' } - - { role: db_teardown_postgres_cdh, when: database_type == 'postgres' } + #- { role: db_teardown_postgres_cdh, when: database_type == 'postgres' } - { role: db_teardown_oracle_cdh, when: database_type == 'oracle' } tags: db_teardown_cdh @@ -70,11 +70,11 @@ hosts: db_server tasks: - { include_vars: "{{ inventory_dir }}/group_vars/db_server_mysql.yml", when: database_type == 'mysql' } - - { include_vars: "{{ inventory_dir }}/group_vars/db_server_postgres.yml", when: database_type == 'postgres' } + #- { include_vars: "{{ inventory_dir }}/group_vars/db_server_postgres.yml", when: database_type == 'postgres' } - { include_vars: "{{ inventory_dir }}/group_vars/db_server_oracle.yml", when: database_type == 'oracle' } roles: - { role: db_teardown_mysql_cm, when: database_type == 'mysql' } - - { role: db_teardown_postgres_cm, when: database_type == 'postgres' } + #- { role: db_teardown_postgres_cm, when: database_type == 'postgres' } - { role: db_teardown_oracle_cm, when: database_type == 'oracle' } tags: db_teardown_cm From 255b63402cc1c77ec002d6695e010d48145eac4b Mon Sep 17 00:00:00 2001 From: Tristan Stevens Date: Wed, 29 Jan 2020 13:12:23 +0000 Subject: [PATCH 03/15] Teardown consolidation --- roles/dir_teardown/tasks/main.yml | 52 +++++++++++++++++++------ roles/dn_dir_teardown/tasks/main.yml | 4 -- roles/haproxy_teardown/tasks/main.yml | 21 ---------- roles/kafka_dir_teardown/tasks/main.yml | 6 --- roles/kms_dir_teardown/tasks/main.yml | 4 -- roles/kts_dir_teardown/tasks/main.yml | 4 -- roles/mn_dir_teardown/tasks/main.yml | 32 --------------- teardown.yml | 39 ++----------------- 8 files changed, 45 insertions(+), 117 deletions(-) delete mode 100644 roles/dn_dir_teardown/tasks/main.yml delete mode 100644 roles/haproxy_teardown/tasks/main.yml delete mode 100644 roles/kafka_dir_teardown/tasks/main.yml delete mode 100644 roles/kms_dir_teardown/tasks/main.yml delete mode 100644 roles/kts_dir_teardown/tasks/main.yml delete mode 100644 roles/mn_dir_teardown/tasks/main.yml diff --git a/roles/dir_teardown/tasks/main.yml b/roles/dir_teardown/tasks/main.yml index 7bf2303..83eaa4e 100644 --- a/roles/dir_teardown/tasks/main.yml +++ b/roles/dir_teardown/tasks/main.yml @@ -2,43 +2,73 @@ - include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" -#- debug: var=cdh_services -#- debug: var="{{ cdh_services[0].dfs_data_dir_list }}" - -- name: DFS Data Dir Delete - shell: rm /data/*/dfs/ -rf - +- debug: + var=group_names - name: SNN Dir Delete file: - path: "{{ cdh_services[0].fs_checkpoint_dir_list }}" + path: "{{ cdh_services | json_query('[?type==`hdfs`].fs_checkpoint_dir_list') | first }}" state: absent ignore_errors: true + when: "'master_servers' in group_names" - name: NN Dir Delete file: - path: "{{ cdh_services[0].dfs_name_dir_list }}" + path: "{{ item }}" state: absent ignore_errors: true + with_items: "{{ cdh_services | json_query('[?type==`hdfs`].dfs_name_dir_list') }}" + when: "'master_servers' in group_names" - name: JN Edits Dir file: - path: "{{ cdh_services[0].dfs_journalnode_edits_dir }}" + path: "{{ cdh_services | json_query('[?type==`hdfs`].dfs_journalnode_edits_dir') | first }}" state: absent ignore_errors: true + when: "'master_servers' in group_names" - name: ZK Data Dirs file: - path: "/data/3/zookeeper" + path: "{{ cdh_services | json_query('[?type==`zookeeper`].zookeeper_data_log_dir') | first }}" state: absent + when: "'master_servers' in group_names" - name: ZK Edits Dirs file: - path: "/data/4/zookeeper" + path: "{{ cdh_services | json_query('[?type==`zookeeper`].zookeeper_edits_dir') | first }}" state: absent + when: "'master_servers' in group_names" + +- name: DFS Data Dir Delete + file: + path: "{{ item }}" + state: absent + ignore_errors: true + with_items: "{{ cdh_services | json_query('[?type==`hdfs`].dfs_data_dir_list') }}" + when: "'worker_servers' in group_names" - name: Kafka Dir file: path: "/var/local/kafka/data" state: absent + when: "'kafka_brokers' in group_names" + +- name: KMS Data Dir + file: + path: "{{ kms_conf_dir }}" + state: absent + when: "'kms_servers' in group_names" + +- name: KMS Key Dir + file: + path: "{{ kms_key_dir }}" + state: absent + when: "'kms_servers' in group_names" + +- name: KTS Data Dir + file: + path: "{{ item }}" + state: absent + with_items: "{{ kts_services[0] }}" + when: "'kts_servers' in group_names" diff --git a/roles/dn_dir_teardown/tasks/main.yml b/roles/dn_dir_teardown/tasks/main.yml deleted file mode 100644 index 93121b5..0000000 --- a/roles/dn_dir_teardown/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -- name: DFS Data Dir Delete - shell: rm /data/*/dfs/ -rf diff --git a/roles/haproxy_teardown/tasks/main.yml b/roles/haproxy_teardown/tasks/main.yml deleted file mode 100644 index 269a6b1..0000000 --- a/roles/haproxy_teardown/tasks/main.yml +++ /dev/null @@ -1,21 +0,0 @@ -- name: Stop HAProxy - service: - name: haproxy - state: stopped - ignore_errors: true - -- name: Delete combined key and pem - file: - path: /etc/haproxy/combinedKeyAndCert.pem - state: absent - -- name: Delete HAProxy config - file: - path: /etc/haproxy/haproxy.cfg - state: absent - -- name: Install HAProxy - yum: - name: haproxy - state: absent - ignore_errors: true diff --git a/roles/kafka_dir_teardown/tasks/main.yml b/roles/kafka_dir_teardown/tasks/main.yml deleted file mode 100644 index 291dfaf..0000000 --- a/roles/kafka_dir_teardown/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: Kafka Dir - file: - path: "/var/local/kafka/data" - state: absent diff --git a/roles/kms_dir_teardown/tasks/main.yml b/roles/kms_dir_teardown/tasks/main.yml deleted file mode 100644 index 818836f..0000000 --- a/roles/kms_dir_teardown/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ -- name: KMS Data Dir - file: - path: "/var/opt/cloudera/kms-keytrustee" - state: absent diff --git a/roles/kts_dir_teardown/tasks/main.yml b/roles/kts_dir_teardown/tasks/main.yml deleted file mode 100644 index c48535d..0000000 --- a/roles/kts_dir_teardown/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ -- name: KTS Data Dir - file: - path: "/var/opt/cloudera/keytrustee" - state: absent diff --git a/roles/mn_dir_teardown/tasks/main.yml b/roles/mn_dir_teardown/tasks/main.yml deleted file mode 100644 index a0fa025..0000000 --- a/roles/mn_dir_teardown/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- - -- include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" - -- name: SNN Dir Delete - file: - path: "{{ cdh_services[0].fs_checkpoint_dir_list }}" - state: absent - ignore_errors: true - -- name: NN Dir Delete - file: - path: "{{ cdh_services[0].dfs_name_dir_list }}" - state: absent - ignore_errors: true - -- name: JN Edits Dir - file: - path: "{{ cdh_services[0].dfs_journalnode_edits_dir }}" - state: absent - ignore_errors: true - -- name: ZK Data Dirs - file: - path: "/data/1/zookeeper" - state: absent - - -- name: ZK Edits Dirs - file: - path: "/data/1/zookeeper" - state: absent diff --git a/teardown.yml b/teardown.yml index 1d1fd2b..fc6e881 100644 --- a/teardown.yml +++ b/teardown.yml @@ -1,42 +1,11 @@ --- # Cloudera playbook teardown - -#- name: CDH Teardown -# hosts: scm_server -# roles: -# - cdh_teardown -# tags: cdh_teardown - -- name: Datanode Dir teardown - hosts: cdh_servers - roles: - - dn_dir_teardown - tags: dn_dir_teardown - -- name: Masternode Dir teardown - hosts: master_servers - roles: - - mn_dir_teardown - tags: mn_dir_teardown - -- name: Kafka Dir teardown - hosts: cdh_servers - roles: - - kafka_dir_teardown - tags: kafka_dir_teardown - -- name: KTS Dir teardown - hosts: kts_servers - roles: - - kts_dir_teardown - tags: kts_dir_teardown - -- name: KMS Dir teardown - hosts: kms_servers +- name: Dir teardown + hosts: all roles: - - kms_dir_teardown - tags: kms_dir_teardown + - dir_teardown + tags: dir_teardown - name: CDH DB teardown hosts: scm_server From c98066937e355b98f3762622aa6b9d844aa99558 Mon Sep 17 00:00:00 2001 From: Tristan Stevens Date: Wed, 29 Jan 2020 14:23:04 +0000 Subject: [PATCH 04/15] Remove deprecated YUM statement --- roles/cm_agents/tasks/main.yml | 16 ++++++---------- roles/scm/tasks/main.yml | 12 +++++++----- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/roles/cm_agents/tasks/main.yml b/roles/cm_agents/tasks/main.yml index 3919748..0cbd75d 100644 --- a/roles/cm_agents/tasks/main.yml +++ b/roles/cm_agents/tasks/main.yml @@ -5,10 +5,12 @@ - include: 36322.yml - name: Install Cloudera Manager Agents - yum: name={{ item }} state=installed - with_items: - - "cloudera-manager-daemons-{{ scm_version }}" - - "cloudera-manager-agent-{{ scm_version }}" + yum: + name: + - cloudera-manager-daemons + - cloudera-manager-agent + update_cache: yes + state: installed - name: Deploy CM Agent config ini template: @@ -18,11 +20,5 @@ owner: root mode: '0644' -#- name: Configure Cloudera Manager Agent 'server_host' -# lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^server_host line=server_host={{ hostvars[scm_hostname]['inventory_hostname'] }} - -#- name: Configure Clouder Manager Agent use_tls -# lineinfile: dest=/etc/cloudera-scm-agent/config.ini regexp=^use_tls line=use_tls={{ use_tls }} - - name: Restart Cloudera Manager Agents service: name=cloudera-scm-agent state=restarted enabled=yes diff --git a/roles/scm/tasks/main.yml b/roles/scm/tasks/main.yml index 9babc2e..5276e9c 100644 --- a/roles/scm/tasks/main.yml +++ b/roles/scm/tasks/main.yml @@ -7,11 +7,13 @@ - include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" - name: Install the Cloudera Manager Server Packages - yum: name={{ item }} state=installed - with_items: - - "cloudera-manager-daemons-{{ scm_version }}" - - "cloudera-manager-server-{{ scm_version }}" - - "cloudera-manager-agent-{{ scm_version }}" + yum: + name: + - cloudera-manager-daemons + - cloudera-manager-server + - cloudera-manager-agent + - openldap-clients + state: installed #To Do #Change scm_dir to use if statement - previously /usr/share/cmf/ From 033e80660f4cd679124a5ade3d17378560280402 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Jan 2020 08:38:46 -0800 Subject: [PATCH 05/15] Latest changes --- .gitignore | 1 + action_plugins/scm_hosts.pyc | Bin 3411 -> 3411 bytes group_vars/all | 6 +++--- group_vars/db_server.yml | 18 +++++++++--------- group_vars/pki.yml | 2 +- group_vars/scm_server.yml | 4 ++-- group_vars/tls_enc.yml | 2 +- hosts | 35 ++++++++++++++++++----------------- teardown.yml | 10 +++++----- 9 files changed, 40 insertions(+), 38 deletions(-) diff --git a/.gitignore b/.gitignore index 1ff6a72..d530a96 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ .idea/ .DS_Store *.retry +*.pyc diff --git a/action_plugins/scm_hosts.pyc b/action_plugins/scm_hosts.pyc index ff97588ccf52319ebd8e0dd065b34e4fe435d5e1..4876165964953f3c6bdfb983ec79a5172dfffefe 100644 GIT binary patch delta 16 XcmcaCby^{5zGjIid delta 16 XcmcaCby/PARCELS/KTS/6.1.0/ # - http:///PARCELS/KMS/6.1.0/ # - http:///PARCELS/ANACONDA/4.4.1/ diff --git a/group_vars/tls_enc.yml b/group_vars/tls_enc.yml index e6b29d4..5ebd3c8 100644 --- a/group_vars/tls_enc.yml +++ b/group_vars/tls_enc.yml @@ -8,5 +8,5 @@ tls: private_key: /opt/cloudera/security/x509/localhost.key cert_dir: /opt/cloudera/security/CAcerts/ cert_chain: /opt/cloudera/security/CAcerts/ca-chain.cert.pem - truststore_path: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.x86_64/jre/lib/security/jssecacerts + truststore_path: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-0.el7_7.x86_64/jre/lib/security/jssecacerts truststore_password: password diff --git a/hosts b/hosts index 1712849..bf051cd 100644 --- a/hosts +++ b/hosts @@ -1,16 +1,16 @@ # Note for AWS: 'Public DNS' name is too long for ansible_host, use 'Public IP' (https://github.com/ansible/ansible/issues/11536) [scm_server] -tristan-ansible-1.vpc.cloudera.com license_file=/root/tristan_stevens_2019_2020_cloudera_license.txt +tristan2-1.vpc.cloudera.com license_file=/root/tristan_stevens_2019_2020_cloudera_license.txt [db_server] -tristan-ansible-1.vpc.cloudera.com +tristan2-1.vpc.cloudera.com [krb5_server] -tristan-ansible-1.vpc.cloudera.com default_realm=MIT.EXAMPLE.COM +tristan2-1.vpc.cloudera.com default_realm=MIT.EXAMPLE.COM [ca_server] -tristan-ansible-1.vpc.cloudera.com +tristan2-1.vpc.cloudera.com [utility_servers:children] scm_server @@ -22,27 +22,28 @@ gatewayen_servers gatewaylb_servers [gatewayen_servers] -#tristan-ansible-1.vpc.cloudera.com host_template=HostTemplate-GatewayEdge +#tristan2-1.vpc.cloudera.com host_template=HostTemplate-GatewayEdge [gatewaylb_servers] -tristan-ansible-4.vpc.cloudera.com host_template=HostTemplate-GatewayLB +tristan2-4.vpc.cloudera.com host_template=HostTemplate-GatewayLB [master_servers] -tristan-ansible-1.vpc.cloudera.com host_template=HostTemplate-Master1 -tristan-ansible-2.vpc.cloudera.com host_template=HostTemplate-Master2 -tristan-ansible-3.vpc.cloudera.com host_template=HostTemplate-Master3 +tristan2-1.vpc.cloudera.com host_template=HostTemplate-Master1 +tristan2-2.vpc.cloudera.com host_template=HostTemplate-Master2 +tristan2-3.vpc.cloudera.com host_template=HostTemplate-Master3 [worker_servers] -tristan-ansible-5.vpc.cloudera.com -tristan-ansible-6.vpc.cloudera.com +tristan2-5.vpc.cloudera.com +tristan2-6.vpc.cloudera.com +tristan2-7.vpc.cloudera.com [worker_servers:vars] host_template=HostTemplate-Workers #host_template=HostTemplate-Kafka [kms_servers] -#tristan-ansible-2.vpc.cloudera.com -#tristan-ansible-3.vpc.cloudera.com +tristan2-8.vpc.cloudera.com +tristan2-9.vpc.cloudera.com [cdh_servers:children] utility_servers @@ -52,16 +53,16 @@ worker_servers kms_servers [kts_servers] -#tristan-ansible-1.vpc.cloudera.com host_template=KeyTrusteeActive -#tristan-ansible-1.vpc.cloudera.com host_template=KeyTrusteePassive +tristan2-10.vpc.cloudera.com host_template=KeyTrusteeActive +tristan2-11.vpc.cloudera.com host_template=KeyTrusteePassive [haproxy] -tristan-ansible-1.vpc.cloudera.com +tristan2-1.vpc.cloudera.com [all:vars] # 'ad', 'mit', or 'none' to disable security # This value must match that in group_vars/krb5_server.yml krb5_kdc_type='mit' -hdfs_tde_enabled='False' +hdfs_tde_enabled='True' database_type='mysql' full_teardown='False' diff --git a/teardown.yml b/teardown.yml index fc6e881..15485f8 100644 --- a/teardown.yml +++ b/teardown.yml @@ -17,11 +17,11 @@ - { role: db_teardown_oracle_cdh, when: database_type == 'oracle' } tags: db_teardown_cdh -- name: Remove HAProxy - hosts: haproxy - roles: - - haproxy_teardown - tags: haproxy_teardown +#- name: Remove HAProxy +# hosts: haproxy +# roles: +# - haproxy_teardown +# tags: haproxy_teardown - name: CM Agent teardown hosts: cdh_servers:scm_server:kts_servers:kms_servers From c46bc7aebe928a8b3d33626fd3d52a0c4dbc33e5 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 31 Jan 2020 03:38:34 -0800 Subject: [PATCH 06/15] Final running version on Cloudcat --- action_plugins/scm_hosts.pyc | Bin 3411 -> 3411 bytes group_vars/all | 6 +- group_vars/db_server.yml | 18 +++--- group_vars/scm_server.yml | 11 ++-- hosts | 34 ++++++------ roles/cdh/templates/base.j2 | 8 +++ roles/cm_agents/tasks/main.yml | 2 - roles/dir_teardown/tasks/main.yml | 4 +- roles/dn_dir_creation/tasks/main.yml | 37 ------------- roles/haproxy/tasks/main.yml | 7 ++- roles/kms_encryption_zones/tasks/main.yml | 2 +- roles/kms_key_sync/tasks/main.yml | 4 +- roles/kts_key_sync/tasks/main.yml | 10 +++- roles/nn_dir_creation/tasks/main.yml | 52 ------------------ roles/{cm_agents/tasks => pre_reqs}/36322.yml | 0 roles/pre_reqs/tasks/main.yml | 26 +++++++-- roles/scm/tasks/main.yml | 4 +- roles/scm/templates/cms_base.j2 | 20 +++++-- site.yml | 30 ++++------ 19 files changed, 109 insertions(+), 166 deletions(-) delete mode 100644 roles/dn_dir_creation/tasks/main.yml delete mode 100644 roles/nn_dir_creation/tasks/main.yml rename roles/{cm_agents/tasks => pre_reqs}/36322.yml (100%) diff --git a/action_plugins/scm_hosts.pyc b/action_plugins/scm_hosts.pyc index 4876165964953f3c6bdfb983ec79a5172dfffefe..ccad8a229bf5d813e366b8de2116d8000daa66ac 100644 GIT binary patch delta 15 WcmcaCby/PARCELS/KTS/6.1.0/ # - http:///PARCELS/KMS/6.1.0/ # - http:///PARCELS/ANACONDA/4.4.1/ @@ -31,8 +31,8 @@ scm_products: # - product: KAFKA # version: 4.0.0-1.4.0.0.p0.1 - # - product: KEYTRUSTEE - # version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 + - product: KEYTRUSTEE + version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 # - product: ORACLE_INSTANT_CLIENT # version: 11.2-1.oracleinstantclient1.0.0.p0.134 @@ -44,6 +44,9 @@ kts_products: - product: KEYTRUSTEE_SERVER version: 6.1.0-1.keytrustee6.1.0.p0.592761 + - product: CDH + version: 6.3.x-1.cdh6.3.x.p0.1746013 + oom_heap_dump_dir: /var/log/heapdumps eventserver_index_dir: /var/log/cloudera-scm-eventserver hmon_firehose_storage_dir: /var/log/cloudera-host-monitor diff --git a/hosts b/hosts index bf051cd..332658a 100644 --- a/hosts +++ b/hosts @@ -1,16 +1,16 @@ # Note for AWS: 'Public DNS' name is too long for ansible_host, use 'Public IP' (https://github.com/ansible/ansible/issues/11536) [scm_server] -tristan2-1.vpc.cloudera.com license_file=/root/tristan_stevens_2019_2020_cloudera_license.txt +tristan3-1.vpc.cloudera.com license_file=/root/tristan_stevens_2019_2020_cloudera_license.txt [db_server] -tristan2-1.vpc.cloudera.com +tristan3-1.vpc.cloudera.com [krb5_server] -tristan2-1.vpc.cloudera.com default_realm=MIT.EXAMPLE.COM +tristan3-1.vpc.cloudera.com default_realm=MIT.EXAMPLE.COM [ca_server] -tristan2-1.vpc.cloudera.com +tristan3-1.vpc.cloudera.com [utility_servers:children] scm_server @@ -22,28 +22,28 @@ gatewayen_servers gatewaylb_servers [gatewayen_servers] -#tristan2-1.vpc.cloudera.com host_template=HostTemplate-GatewayEdge +#tristan3-1.vpc.cloudera.com host_template=HostTemplate-GatewayEdge [gatewaylb_servers] -tristan2-4.vpc.cloudera.com host_template=HostTemplate-GatewayLB +tristan3-4.vpc.cloudera.com host_template=HostTemplate-GatewayLB [master_servers] -tristan2-1.vpc.cloudera.com host_template=HostTemplate-Master1 -tristan2-2.vpc.cloudera.com host_template=HostTemplate-Master2 -tristan2-3.vpc.cloudera.com host_template=HostTemplate-Master3 +tristan3-1.vpc.cloudera.com host_template=HostTemplate-Master1 +tristan3-2.vpc.cloudera.com host_template=HostTemplate-Master2 +tristan3-3.vpc.cloudera.com host_template=HostTemplate-Master3 [worker_servers] -tristan2-5.vpc.cloudera.com -tristan2-6.vpc.cloudera.com -tristan2-7.vpc.cloudera.com +tristan3-5.vpc.cloudera.com +tristan3-6.vpc.cloudera.com +tristan3-7.vpc.cloudera.com [worker_servers:vars] host_template=HostTemplate-Workers #host_template=HostTemplate-Kafka [kms_servers] -tristan2-8.vpc.cloudera.com -tristan2-9.vpc.cloudera.com +tristan3-8.vpc.cloudera.com +tristan3-9.vpc.cloudera.com [cdh_servers:children] utility_servers @@ -53,11 +53,11 @@ worker_servers kms_servers [kts_servers] -tristan2-10.vpc.cloudera.com host_template=KeyTrusteeActive -tristan2-11.vpc.cloudera.com host_template=KeyTrusteePassive +tristan3-10.vpc.cloudera.com host_template=KeyTrusteeActive +tristan3-11.vpc.cloudera.com host_template=KeyTrusteePassive [haproxy] -tristan2-1.vpc.cloudera.com +tristan3-1.vpc.cloudera.com [all:vars] # 'ad', 'mit', or 'none' to disable security diff --git a/roles/cdh/templates/base.j2 b/roles/cdh/templates/base.j2 index 2267d1f..3ad0de2 100644 --- a/roles/cdh/templates/base.j2 +++ b/roles/cdh/templates/base.j2 @@ -1,6 +1,14 @@ { "cdhVersion" : "{{ cdh_version }}", "displayName" : "{{ cluster_display_name }}", + "repositories" : [ + {% for repo in scm_repositories %} + "{{ repo }}" + {% if not loop.last %} + , + {% endif %} + {% endfor %} + ], "cmVersion" : "{{ scm_version }}", "products" : [ {% set prod_j = joiner(",") %} diff --git a/roles/cm_agents/tasks/main.yml b/roles/cm_agents/tasks/main.yml index 0cbd75d..94a34bb 100644 --- a/roles/cm_agents/tasks/main.yml +++ b/roles/cm_agents/tasks/main.yml @@ -2,8 +2,6 @@ - include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" -- include: 36322.yml - - name: Install Cloudera Manager Agents yum: name: diff --git a/roles/dir_teardown/tasks/main.yml b/roles/dir_teardown/tasks/main.yml index 83eaa4e..ec71c89 100644 --- a/roles/dir_teardown/tasks/main.yml +++ b/roles/dir_teardown/tasks/main.yml @@ -68,7 +68,7 @@ - name: KTS Data Dir file: - path: "{{ item }}" + path: "{{ item.value }}" state: absent - with_items: "{{ kts_services[0] }}" + with_dict: "{{ kts_services[0] }}" when: "'kts_servers' in group_names" diff --git a/roles/dn_dir_creation/tasks/main.yml b/roles/dn_dir_creation/tasks/main.yml deleted file mode 100644 index fe7cec3..0000000 --- a/roles/dn_dir_creation/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ -- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" - -- name: Permission data dirs on Worker Nodes - file: - path: "{{ item }}" - owner: root - group: root - mode: '0755' - when: scm_version[0] == "5" - with_items: - - /data - - /data/1 - - /data/2 - - /data/3 - - /data/4 - - /data/5 - - /data/6 - - /data/7 - - /data/8 - -- name: Create DFS dirs - file: - path: "{{ item }}" - state: directory - owner: root - group: root - mode: '0755' - when: scm_version[0] == "5" - with_items: - - /data/1/dfs - - /data/2/dfs - - /data/3/dfs - - /data/4/dfs - - /data/5/dfs - - /data/6/dfs - - /data/7/dfs - - /data/8/dfs diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index cbc980b..6d8690e 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -1,6 +1,8 @@ --- - include_vars: "{{ inventory_dir }}/group_vars/cdh_servers.yml" - include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/pki.yml" +- include_vars: "{{ inventory_dir }}/group_vars/ca.yml" - name: Install HAProxy yum: name=haproxy state=latest @@ -13,7 +15,10 @@ with_items: "{{ groups['haproxy'] }}" - name: Create combined key and pem - shell: "cat /opt/cloudera/security/x509/localhost.pem /opt/cloudera/security/x509/localhost.key | sed -ne '/-BEGIN/,/-END/p' > /etc/haproxy/combinedKeyAndCert.pem" + shell: "cat {{ tls.tls_cert }} > /etc/haproxy/combinedKeyAndCert.pem" + +- name: Create combined key and pem + shell: "{{ openssl_path }} rsa -in {{ tls.private_key }} -passin pass:{{ keystore_password }} >> /etc/haproxy/combinedKeyAndCert.pem" - name: Set combined key and pem permissions file: diff --git a/roles/kms_encryption_zones/tasks/main.yml b/roles/kms_encryption_zones/tasks/main.yml index 2d1bc8f..0870873 100644 --- a/roles/kms_encryption_zones/tasks/main.yml +++ b/roles/kms_encryption_zones/tasks/main.yml @@ -107,7 +107,7 @@ user: "{{ scm_default_user }}" password: "{{ scm_default_pass }}" return_content: yes - run_once: true + run_once: true - name: Restart Cluster uri: diff --git a/roles/kms_key_sync/tasks/main.yml b/roles/kms_key_sync/tasks/main.yml index 11cbe5e..9db4b64 100644 --- a/roles/kms_key_sync/tasks/main.yml +++ b/roles/kms_key_sync/tasks/main.yml @@ -34,8 +34,8 @@ copy: src: "/tmp/kms/{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}{{ kms_key_dir }}/keytrustee/.keytrustee" dest: "{{ kms_key_dir }}/keytrustee/" - owner: keytrustee-kms - group: keytrustee-kms + owner: kms + group: kms mode: '0600' - name: Delete local directories for kts files diff --git a/roles/kts_key_sync/tasks/main.yml b/roles/kts_key_sync/tasks/main.yml index 96ce084..075e107 100644 --- a/roles/kts_key_sync/tasks/main.yml +++ b/roles/kts_key_sync/tasks/main.yml @@ -291,9 +291,12 @@ run_once: true register: hdfs_resp +- set_fact: + cdh_version: "{{ scm_products | json_query('[?product==`CDH`].version') | first }}" + - name: Check CDH parcel is activated on all hosts uri: - url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/parcels/products/CDH/versions/6.2.0-1.cdh6.2.0.p0.967373/" + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/parcels/products/CDH/versions/{{ cdh_version }}/" method: GET status_code: 200,404 force_basic_auth: yes @@ -306,9 +309,12 @@ delay: 30 run_once: true +- set_fact: + kms_version: "{{ scm_products | json_query('[?product==`KEYTRUSTEE`].version') | first }}" + - name: Check KEYTRUSTEE parcel is activated on all hosts uri: - url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/parcels/products/KEYTRUSTEE/versions/6.1.0-1.KEYTRUSTEE6.1.0.p0.592714/" + url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/parcels/products/KEYTRUSTEE/versions/{{ kms_version }}/" method: GET status_code: 200,404 force_basic_auth: yes diff --git a/roles/nn_dir_creation/tasks/main.yml b/roles/nn_dir_creation/tasks/main.yml deleted file mode 100644 index 606f637..0000000 --- a/roles/nn_dir_creation/tasks/main.yml +++ /dev/null @@ -1,52 +0,0 @@ -- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" - -- name: Permission data dirs on Master Nodes - file: - path: "{{ item }}" - owner: root - group: root - mode: '0755' - when: scm_version[0] == "5" - with_items: - - /data - - /data/1 - - /data/2 - -- name: Create DFS dirs - file: - path: "{{ item }}" - state: directory - owner: root - group: root - mode: '0755' - when: scm_version[0] == "5" - with_items: - - /data/1/dfs - - /data/2/dfs - -- name: Create NN dirs - file: - path: /data/1/dfs/nn - state: directory - owner: hdfs - group: hadoop - mode: '0700' - when: scm_version[0] == "5" - -- name: Create SNN dirs - file: - path: /data/1/dfs/snn - state: directory - owner: hdfs - group: hadoop - mode: '0700' - when: scm_version[0] == "5" - -- name: Create JN dirs - file: - path: /data/2/dfs/jn - state: directory - owner: hdfs - group: hadoop - mode: '0700' - when: scm_version[0] == "5" diff --git a/roles/cm_agents/tasks/36322.yml b/roles/pre_reqs/36322.yml similarity index 100% rename from roles/cm_agents/tasks/36322.yml rename to roles/pre_reqs/36322.yml diff --git a/roles/pre_reqs/tasks/main.yml b/roles/pre_reqs/tasks/main.yml index 41e0a76..3d762d3 100644 --- a/roles/pre_reqs/tasks/main.yml +++ b/roles/pre_reqs/tasks/main.yml @@ -1,4 +1,7 @@ - include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" +- include_vars: "{{ inventory_dir }}/group_vars/kms_servers.yml" + +- include: 36322.yml - name: change swappiness value sysctl: @@ -135,13 +138,12 @@ - name: Yum Install required KRB5 packages yum: - name: "{{ item }}" + name: + - krb5-workstation + - krb5-libs + - openldap + - openldap-clients state: present - with_items: - - krb5-workstation - - krb5-libs - - openldap - - openldap-clients - name: Deploy KRB5 config template: @@ -197,3 +199,15 @@ done args: executable: /bin/bash + +- name: Create KMS Directories + file: + path: "{{ item }}" + state: directory + owner: kms + group: kms + mode: '0700' + with_items: + - "{{ kms_key_dir }}" + - "{{ kms_conf_dir }}" + when: "'kms_servers' in group_names" diff --git a/roles/scm/tasks/main.yml b/roles/scm/tasks/main.yml index 5276e9c..37ee6c1 100644 --- a/roles/scm/tasks/main.yml +++ b/roles/scm/tasks/main.yml @@ -69,7 +69,7 @@ - set_fact: cm_api_url="http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}/api/{{ result.content }}" # Install Cloudera Manager Python API -# - include: api.yml +- include: api.yml # Retrieve auto-generated host IDs from SCM - name: Get SCM hostIds for inventory hosts @@ -78,8 +78,6 @@ register: scm_hosts_result vars: use_tls: False -# environment: -# PYTHONPATH: "{{ ansible_env.PYTHONPATH }}:/usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg" - set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" diff --git a/roles/scm/templates/cms_base.j2 b/roles/scm/templates/cms_base.j2 index 9b941b5..4abf6f9 100644 --- a/roles/scm/templates/cms_base.j2 +++ b/roles/scm/templates/cms_base.j2 @@ -167,9 +167,11 @@ }, { "name": "oom_heap_dump_enabled", "value": "false" + {% if (headlamp_heapsize is defined) %} }, { "name": "headlamp_heapsize", - "value": "8589934592" + "value": "{{ headlamp_heapsize }}" + {% endif %} } ] } @@ -233,9 +235,11 @@ }, { "name": "oom_heap_dump_enabled", "value": "false" + {% if (navigator_audit_heapsize is defined) %} }, { "name": "navigator_heapsize", - "value": "10737418240" + "value": "{{ navigator_audit_heapsize }}" + {% endif %} } ] } @@ -306,9 +310,11 @@ }, { "name" : "nav_nt_domain", "value" : "{{ ldap_rdom.domain }}", + {% if (navigator_metastore_heapsize is defined) %} }, { "name" : "navigator_heapsize", - "value" : "34359738368", + "value" : "{{ navigator_metastore_heapsize }}", + {% endif %} }, { "name" : "oom_heap_dump_enabled", "value" : "false", @@ -347,12 +353,16 @@ { "name" : "firehose_debug_port", "value" : "-1", + {% if (smon_heapsize is defined) %} }, { "name": "firehose_heapsize", - "value": "2147483648" + "value": "{{ smon_heapsize }}" + {% endif %} + {% if (smon_non_java_memory is defined) %} }, { "name": "firehose_non_java_memory_bytes", - "value": "12884901888" + "value": "{{ smon_non_java_memory }}" + {% endif %} }, { "name": "firehose_storage_dir", "value": "{{ smon_firehose_storage_dir }}" diff --git a/site.yml b/site.yml index 35ba57a..9fd8647 100644 --- a/site.yml +++ b/site.yml @@ -2,25 +2,25 @@ # Cloudera playbook - name: Apply pre-reqs fix - hosts: cdh_servers:kts_servers:kms_servers + hosts: all roles: - pre_reqs tags: [pre_reqs, pre_build] - name: Configure Cloudera Manager Repository - hosts: cdh_servers:kts_servers:kms_servers + hosts: all roles: - cm_repo tags: [cm_repo, pre_build] - name: Install rngd - hosts: cdh_servers:kts_servers:kms_servers + hosts: all roles: - rngd tags: [rngd, pre_build] - name: Install Java - hosts: cdh_servers:kts_servers:kms_servers + hosts: all roles: - java tags: [java, pre_build] @@ -70,25 +70,11 @@ tags: certs_signed_install - name: Install Cloudera Manager Agents - hosts: cdh_servers:kts_servers:kms_servers + hosts: all roles: - cm_agents tags: cm_agents -# Must be done after the cm_agents role otherwise the local users do not exist -- name: Create Data Node directories - hosts: worker_servers - roles: - - dn_dir_creation - tags: dn_dir_creation - -# Must be done after the cm_agents role otherwise the local users do not exist -- name: Create Master Node Directories - hosts: master_servers - roles: - - nn_dir_creation - tags: nn_dir_creation - - name: Install Cloudera Manager Python API hosts: scm_server roles: @@ -108,7 +94,7 @@ # tags: [cm_roles, cm_install] - name: Install Cloudera Manager Agents - tls - hosts: cdh_servers:kts_servers:kms_servers + hosts: all roles: - cm_agents_tls tags: [cm_agents_tls, cm_install] @@ -126,6 +112,7 @@ - { role: kts, when: hdfs_tde_enabled == 'True' } tags: - kts_cluster_template + - kts - name: Sync KTS keys hosts: kts_servers @@ -133,6 +120,7 @@ - { role: kts_key_sync, when: hdfs_tde_enabled == 'True' } tags: - kts_key_sync + - kts - name: Sync KMS keys hosts: kms_servers @@ -140,6 +128,7 @@ - { role: kms_key_sync, when: hdfs_tde_enabled == 'True' } tags: - kms_key_sync + - kms - name: Create Encryption Zones hosts: master_servers @@ -147,6 +136,7 @@ - { role: kms_encryption_zones, when: hdfs_tde_enabled == 'True' } tags: - enc_zones + - kms - name: Install and Configure HAProxy hosts: haproxy From e28c90d844bc7170e4047275684c7497f549cdc0 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 7 Feb 2020 03:42:11 -0800 Subject: [PATCH 07/15] 5.16.2 build now works --- action_plugins/scm_hosts.py | 122 ------------------ action_plugins/scm_hosts.pyc | Bin 3411 -> 0 bytes group_vars/all | 64 +++++++++ group_vars/cdh_servers.yml | 3 +- group_vars/scm_server.yml | 19 +-- hosts | 2 +- roles/cdh/tasks/main.yml | 16 --- roles/cdh/templates/base.j2 | 6 +- roles/cdh/templates/hdfs.j2 | 7 +- roles/cdh/templates/hive.j2 | 2 + roles/cdh/templates/host.j2 | 12 ++ roles/cdh/templates/hue.j2 | 4 + roles/cdh/templates/instantiator.j2 | 2 +- roles/cm_agents_teardown/tasks/main.yml | 1 + roles/cm_agents_tls/tasks/main.yml | 9 +- roles/cm_repo/tasks/main.yml | 9 +- roles/kms_encryption_zones/tasks/main.yml | 3 + roles/kms_key_sync/tasks/main.yml | 3 + roles/kts/templates/kts.j2 | 4 +- roles/kts_key_sync/tasks/main.yml | 31 +++-- roles/kts_key_sync/templates/kms.j2 | 6 +- roles/kts_key_sync/templates/kmsRCG.j2 | 2 + roles/kts_key_sync/templates/kmshosts.j2 | 6 +- roles/kts_key_sync/templates/scm_host_list.j2 | 16 +++ roles/scm/tasks/api.yml | 8 -- roles/scm/tasks/main.yml | 72 ++++++----- roles/scm/templates/cms_base.j2 | 24 ++-- roles/scm/templates/scm.j2 | 2 +- roles/scm/templates/scm_host_list.j2 | 16 +++ roles/yum_teardown_cm_agent/tasks/main.yml | 10 +- roles/yum_teardown_cm_server/tasks/main.yml | 8 +- 31 files changed, 238 insertions(+), 251 deletions(-) delete mode 100755 action_plugins/scm_hosts.py delete mode 100644 action_plugins/scm_hosts.pyc create mode 100644 roles/kts_key_sync/templates/scm_host_list.j2 delete mode 100644 roles/scm/tasks/api.yml create mode 100644 roles/scm/templates/scm_host_list.j2 diff --git a/action_plugins/scm_hosts.py b/action_plugins/scm_hosts.py deleted file mode 100755 index b82f699..0000000 --- a/action_plugins/scm_hosts.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python - -# (c) Copyright 2016 Cloudera, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ansible.plugins.action import ActionBase -from cm_api.api_client import ApiException -from cm_api.api_client import ApiResource -import sys -try: - from __main__ import display -except ImportError: - from ansible.utils.display import Display - - display = Display() - - -class ActionModule(ActionBase): - """ Returns map of inventory hosts and their associated SCM hostIds """ - - def run(self, tmp=None, task_vars=None): - if task_vars is None: - task_vars = dict() - - result = super(ActionModule, self).run(tmp, task_vars) - - host_ids = {} - host_names = {} - - # Get SCM host details from inventory - try: - scm_host = task_vars["groups"]["scm_server"][0] - scm_port = task_vars["scm_port"] - scm_user = task_vars["scm_default_user"] - scm_pass = task_vars["scm_default_pass"] - scm_tls = task_vars["scm_web_tls"] - scm_port_tls = task_vars.get("scm_port_tls") - use_tls = task_vars.get("use_tls") - - except KeyError as e: - result['failed'] = True - result['msg'] = e.message - return result - - if use_tls is None or use_tls == False: - scm_port_in_use = scm_port - tls=False - else: - scm_port_in_use = scm_port_tls - tls=True - - api = self.get_api_handle(scm_host, scm_port_in_use, scm_user, scm_pass, tls) - scm_host_list = api.get_all_hosts() - display.vv("Retrieved %d host(s) from SCM" % len(scm_host_list)) - - if len(scm_host_list) == 0: - result['failed'] = True - result['msg'] = "No hosts defined in SCM" - return result - - for inv_host in task_vars["hostvars"]: - host = str(inv_host) - found_host = False - for scm_host in scm_host_list: - try: - if scm_host.hostname == task_vars["hostvars"][host]["inventory_hostname"]: - found_host = True - elif scm_host.ipAddress == task_vars["hostvars"][host]["inventory_hostname"]: - found_host = True - elif "private_ip" in task_vars["hostvars"][host]: - if scm_host.ipAddress == task_vars["hostvars"][host]["private_ip"]: - found_host = True - - if found_host: - host_ids[host] = scm_host.hostId - host_names[host] = scm_host.hostname - display.vv("Inventory host '%s', SCM hostId: '%s', SCM hostname: '%s'" - % (host, scm_host.hostId, scm_host.hostname)) - break - except KeyError as e: - display.vv("Key '%s' not defined for inventory host '%s'" % (e.message, host)) - continue - - if not found_host: - display.vv("Unable to determine SCM host details for inventory host '%s'" % host) - continue - - display.vv("host_ids: %s" % host_ids) - display.vv("host_names: %s" % host_names) - result['changed'] = True - result['host_ids'] = host_ids - result['host_names'] = host_names - return result - - @staticmethod - def get_api_handle(host, port='7180', user='admin', passwd='admin', tls=False): - """ - Get a handle to the CM API client - :param host: Hostname of the Cloudera Manager Server (CMS) - :param port: Port of the server - :param user: SCM username - :param passwd: SCM password - :param tls: Whether to use TLS - :return: Resource object referring to the root - """ - api = None - try: - api = ApiResource(host, port, user, passwd, tls) - except ApiException: - pass - return api diff --git a/action_plugins/scm_hosts.pyc b/action_plugins/scm_hosts.pyc deleted file mode 100644 index ccad8a229bf5d813e366b8de2116d8000daa66ac..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3411 zcmb_eTW=f36+W}PNTNvV>Wgfr*%u>po62j%~Y@}^DGF`pxv2(ztnH}cI z@hRegM>r8A!XK6CDAy;Rs)OquhP{nRLv3h^IiR#AKSgf>_KL<5$O`{-oeg+XrPpxi z97Q{AmSXQ>3M1`Crm*248HQyq%!(sjxY8Vl2c>m3Oo}vg2RbuhVr|*Y5~tH}=l&zv z-b!uQtw73ojCqXv820;sXpEzY4(G{SBB$7klYk01Eg)B+G5i`(ze;1plz=A{%0N?< z8BzSZNKmu^8#Z9U#-uKoDxf)mYJlbissmaOXbw<=p+1bCXKN>mv*m&;o3vZ}f~u}A z+3+7ylNKoliSSc{?u=X1hfpjQv8d3_({XcZi2a5Dy9GK7$b3uVIhvf~l#g#X}nDRMe)W8A;kR_rH~1-71r(ohB{fnYCbA`OTD!9 zwr;N;D|Yia+IaB!12tJChqG73%r(($onAE+L_hevs^GuluUM%*wD4TxHMn+(Hu#i( z^C^>cDq^8o`7xTWXEeX3^c^9GbuyO&mi;c@vX^~1@UmZA4v+!kbu!O{c$LP;fHgSU zYgLoQ)Gd&-CaQo+wtE%tpU04~`{EyJ7Q=rAK8D_#p!OkLqh8<{K#Si~j9r{BG;U9%s3{8kU?oWZadkFy_x z`!D)f2*RRt;Y^`kY0k7Np|N+s@Yf=Fp6k$+pmy2}V8O|0jDkmO_)`Mdp=?W>rM44Z zv-aE!!D+EQ#O~jCd#*7O2 z-`7qg!sds@AT^l#i5Y1*6X)9mB+T|nl3R^lfh5x|PKH?wh0P8?9q3L zj*jGz91Wgv)|nIaC3JGX4ezF@!9kM1Y|K{L=JqbCSe^HT*A0f~eZVI6FqU-SWCI`4 zH(Cc63C}FcSx&?YIgk@rQhc^8sm)&H9i(-{QHaTQuLeqhO@aMl zIP*&Y^2~ZqB0~iu>hqQasaKAQRPb$ZlT7wEfNq$wbT_(rIZCxje#v+9^Rhg=k;qjX z5A)G}R@fV}bL{5uIO2K_G3*g0Qccwit^{>euT<2kYD~XNYAsj-+)}mRlB%l(rU5)6}2E^9di?uEqoW$MK!OM)Md2@%!+D&9i`TG~m3(bsgs2Z-v6V1{#BPl^ z%jHcL-2x=hl#7AOpY@kw^a@8enc>&WfbpKG`ynLsF)SAxE!L5GSCMK}wcY00lxmG* z_Ff!I;l%Mko}*ZDy~c4`cH`Jv!gb&;B$v3Xi@V^UOrxIx8}T`(>b_sOabEK^W?4fn zqNP7qRGSTO?9imJ8QTBmv{W~Jcw}J^B0^MShJEl)kalVAXcg15nHG7bt)5N}g z^1Zt?;F$Qn%-wBv(#YXL*_^)QBMzA~_WzT;qRbud^*^uymM`9A#os34wT^f#2d&Eg E0A|Sr8UO$Q diff --git a/group_vars/all b/group_vars/all index 25114e1..db4e432 100644 --- a/group_vars/all +++ b/group_vars/all @@ -13,6 +13,70 @@ enc_types: rc4-hmac DES-CBC-MD5 DES-CBC-CRC ad_account_prefix: prefix_ kdc_account_creation_host_override: tristan3-1.vpc.cloudera.com +## ------------------------------------------------------------------------------------------------------------ +## Cluster software installation options +## ------------------------------------------------------------------------------------------------------------ + +# Version of CDH to install +cluster_version_cdh: 5.16.2 +#cluster_version_cdh: 7.x + +# Version of Cloudera Manager to install +cluster_version_cm: 5.16.2 +#cluster_version_cm: "{{ cluster_version_cdh }}" +#cluster_version_cm: 7.x.0 + +# Version of CDS Powered by Apache Spark (note: not installed if CDH6/7 is also selected) +cluster_version_cds: 2.4.0.cloudera2 + +# Helper variables for major and minor versions +cluster_version_cdh_major: "{{ cluster_version_cdh.split('.')[0] }}" +cluster_version_cdh_minor: "{{ cluster_version_cdh.split('.')[1] }}" +cluster_version_cm_major: "{{ cluster_version_cm.split('.')[0] }}" +cluster_version_cm_minor: "{{ cluster_version_cm.split('.')[1] }}" + +cloudera_archive_protocol: https:// +cloudera_archive: archive.cloudera.com +cloudera_archive_authn: "" + +configs_by_version: + "5": + scm_repo_url: "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/cm5/redhat/{{ ansible_distribution_major_version }}/x86_64/cm/{{ cluster_version_cm }}/" + scm_repo_gpgkey: "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/cm5/redhat/{{ ansible_distribution_major_version }}/x86_64/cm/RPM-GPG-KEY-cloudera" + scm_parcel_repositories: + - "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/cdh5/parcels/{{ cluster_version_cdh }}/" + - "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/spark2/parcels/{{ cluster_version_cds }}/" + - "http://cloudera-build-3-us-central-1.gce.cloudera.com/s3/build/723506/parcels" + - "http://cloudera-build-3-us-central-1.gce.cloudera.com/s3/build/338985/parcels" + scm_csds: + - "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/spark2/csd/SPARK2_ON_YARN-{{cluster_version_cds}}.jar" + scm_prepare_database_script_path: "/usr/share/cmf/schema/scm_prepare_database.sh" + "6": + scm_repo_url: "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/cm6/{{ cluster_version_cm }}/redhat{{ ansible_distribution_major_version }}/yum" + scm_repo_gpgkey: "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/cm6/{{ cluster_version_cm }}/redhat{{ ansible_distribution_major_version }}/yum/RPM-GPG-KEY-cloudera" + scm_parcel_repositories: + - "{{ cloudera_archive_protocol }}{{ cloudera_archive }}/cdh6/{{ cluster_version_cdh }}/parcels" + - http://tristan3-1.vpc.cloudera.com/parcels/keytrustee-kms-6.1.0-parcels/6.1.0/parcels/ + - http://tristan3-1.vpc.cloudera.com/parcels/keytrustee-server-6.1.0-parcels/6.1.0/parcels/ + scm_prepare_database_script_path: "/opt/cloudera/cm/schema/scm_prepare_database.sh" + "7": + scm_repo_url: "{{ cloudera_archive_protocol }}{{ cloudera_archive_authn }}@{{ cloudera_archive }}/p/cm7/{{ cluster_version_cm }}/redhat{{ ansible_distribution_major_version }}/yum" + scm_repo_gpgkey: "{{ cloudera_archive_protocol }}{{ cloudera_archive_authn }}@{{ cloudera_archive }}/p/cm7/{{ cluster_version_cm }}/redhat{{ ansible_distribution_major_version }}/yum/RPM-GPG-KEY-cloudera" + scm_parcel_repositories: + - "{{ cloudera_archive_protocol }}{{ cloudera_archive_authn }}@{{ cloudera_archive }}/p/cdh7/{{ cluster_version_cdh }}/parcels" + scm_prepare_database_script_path: "/opt/cloudera/cm/schema/scm_prepare_database.sh" + +scm_default_user: admin +scm_default_pass: admin +scm_port: 7180 +scm_license_file: /path/to/cloudera_license.txt +scm_parcel_repositories: "{{ configs_by_version[cluster_version_cdh_major].scm_parcel_repositories }}" +scm_prepare_database_script_path: "{{ configs_by_version[cluster_version_cm_major].scm_prepare_database_script_path }}" +scm_repo_url: "{{ configs_by_version[cluster_version_cm_major].scm_repo_url }}" +scm_repo_gpgkey: "{{ configs_by_version[cluster_version_cm_major].scm_repo_gpgkey }}" +scm_csds: "{{ configs_by_version[cluster_version_cm_major].scm_csds | default([]) }}" + + ## ------------------------------------------------------------------------------------------------------------ ## Java installation options ## ------------------------------------------------------------------------------------------------------------ diff --git a/group_vars/cdh_servers.yml b/group_vars/cdh_servers.yml index 61a0773..55ad662 100644 --- a/group_vars/cdh_servers.yml +++ b/group_vars/cdh_servers.yml @@ -3,7 +3,6 @@ db_hostname: "{{ hostvars[groups['db_server'][0]]['inventory_hostname'] }}" scm_hostname: "{{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }}" -cdh_version: 6.2.0 cluster_display_name: Cluster1 cdh_tls: true @@ -57,6 +56,8 @@ cdh_services: - type: solr - type: spark + + - type: spark2 - type: yarn yarn_nodemanager_local_dirs: /data/1/yarn/nm diff --git a/group_vars/scm_server.yml b/group_vars/scm_server.yml index 314fb3f..7ceaca3 100644 --- a/group_vars/scm_server.yml +++ b/group_vars/scm_server.yml @@ -1,6 +1,5 @@ --- -scm_version: 6.3.x scm_port: 7180 scm_port_tls: 7183 scm_hostname: "{{ hostvars[groups['scm_server'][0]]['inventory_hostname'] }}" @@ -9,24 +8,16 @@ scm_web_tls: True banner_text: "Ansible Cluster Build" banner_colour: RED -yum_repo_base: http://cloudera-build-3-us-west-2.vpc.cloudera.com/s3/build/1712953/cm6/6.3.x - -scm_repositories: - - http://cloudera-build-3-us-west-2.vpc.cloudera.com/s3/build/1746013/cdh6/6.3.x/parcels/ - - http://tristan3-1.vpc.cloudera.com/parcels/keytrustee-kms-6.1.0-parcels/6.1.0/parcels/ - - http://tristan3-1.vpc.cloudera.com/parcels/keytrustee-server-6.1.0-parcels/6.1.0/parcels/ -# - http:///PARCELS/KTS/6.1.0/ -# - http:///PARCELS/KMS/6.1.0/ -# - http:///PARCELS/ANACONDA/4.4.1/ -# - http:///PARCELS/Oracle/134/ - scm_csd: - http:///CSD/CLOUDERA_DATA_SCIENCE_WORKBENCH-CDH5-1.4.2.jar # - http:///CSD/SPARK2_ON_YARN-2.3.0.cloudera4.jar scm_products: - product: CDH - version: 6.3.x-1.cdh6.3.x.p0.1746013 + version: 5.16.2-1.cdh5.16.2.p0.8 + + - product: SPARK2 + version: 2.4.0.cloudera2-1.cdh5.13.3.p0.1041012 # - product: KAFKA # version: 4.0.0-1.4.0.0.p0.1 @@ -45,7 +36,7 @@ kts_products: version: 6.1.0-1.keytrustee6.1.0.p0.592761 - product: CDH - version: 6.3.x-1.cdh6.3.x.p0.1746013 + version: 5.16.2-1.cdh5.16.2.p0.8 oom_heap_dump_dir: /var/log/heapdumps eventserver_index_dir: /var/log/cloudera-scm-eventserver diff --git a/hosts b/hosts index 332658a..2e9a410 100644 --- a/hosts +++ b/hosts @@ -65,4 +65,4 @@ tristan3-1.vpc.cloudera.com krb5_kdc_type='mit' hdfs_tde_enabled='True' database_type='mysql' -full_teardown='False' +full_teardown='True' diff --git a/roles/cdh/tasks/main.yml b/roles/cdh/tasks/main.yml index 00ff17c..6fa1745 100644 --- a/roles/cdh/tasks/main.yml +++ b/roles/cdh/tasks/main.yml @@ -57,22 +57,6 @@ retries: 5 delay: 30 -# Install Cloudera Manager Python API -# - include: api.yml - -# Retrieve auto-generated host IDs from SCM -#- name: Get SCM hostIds for inventory hosts -# become: true -# action: scm_hosts -# register: scm_hosts_result - -#- set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" -#- debug: var=scm_host_ids - -#- set_fact: scm_host_names="{{ scm_hosts_result.host_names }}" -#- debug: var=scm_host_names - - # Check whether cluster already exists # https://cloudera.github.io/cm_api/apidocs/v13/path__clusters.html diff --git a/roles/cdh/templates/base.j2 b/roles/cdh/templates/base.j2 index 3ad0de2..867626d 100644 --- a/roles/cdh/templates/base.j2 +++ b/roles/cdh/templates/base.j2 @@ -1,15 +1,15 @@ { - "cdhVersion" : "{{ cdh_version }}", + "cdhVersion" : "{{ cluster_version_cdh }}", "displayName" : "{{ cluster_display_name }}", "repositories" : [ - {% for repo in scm_repositories %} + {% for repo in scm_parcel_repositories %} "{{ repo }}" {% if not loop.last %} , {% endif %} {% endfor %} ], - "cmVersion" : "{{ scm_version }}", + "cmVersion" : "{{ cluster_version_cm }}", "products" : [ {% set prod_j = joiner(",") %} {% for product in scm_products %} diff --git a/roles/cdh/templates/hdfs.j2 b/roles/cdh/templates/hdfs.j2 index 6d77dc7..9221ec6 100644 --- a/roles/cdh/templates/hdfs.j2 +++ b/roles/cdh/templates/hdfs.j2 @@ -254,10 +254,13 @@ }, { "refName": "HDFS-1-BALANCER-BASE", "roleType": "BALANCER", - "configs": [{ + "configs": [ + {% if (cluster_version_cdh_major >= '6') %} + { "name" : "balancer_log_dir", "value" : "{{ log_base }}/hadoop-hdfs" - } ], + } + {% endif %} ], "base": true }, { "refName": "HDFS-1-GATEWAY-BASE", diff --git a/roles/cdh/templates/hive.j2 b/roles/cdh/templates/hive.j2 index 216b26e..4a80c12 100644 --- a/roles/cdh/templates/hive.j2 +++ b/roles/cdh/templates/hive.j2 @@ -107,9 +107,11 @@ }, { "name" : "hive_log_dir", "value" : "{{ log_base }}/hive" + {% if (cluster_version_cdh_major >= '6') %} } , { "name" : "hiveserver2_load_balancer", "value" : "{{ hostvars[groups['haproxy'][0]]['inventory_hostname'] }}:10000" + {% endif %} {% if (cdh_services | json_query('[?type==`hive`].hiveserver2_java_heapsize') | length > 0 ) %} }, { "name" : "hiveserver2_java_heapsize", diff --git a/roles/cdh/templates/host.j2 b/roles/cdh/templates/host.j2 index 9d662af..b3dd900 100644 --- a/roles/cdh/templates/host.j2 +++ b/roles/cdh/templates/host.j2 @@ -12,6 +12,9 @@ "OOZIE-1-OOZIE_SERVER-BASE", "HIVE-1-HIVESERVER2-BASE", "SPARK_ON_YARN-1-GATEWAY-BASE", + {% if cluster_version_cdh_major < '6' %} + "SPARK2_ON_YARN-1-GATEWAY-BASE", + {% endif %} "HBASE-1-HBASERESTSERVER-BASE", "HBASE-1-HBASETHRIFTSERVER-BASE", "HUE-1-HUE_LOAD_BALANCER-BASE", @@ -30,6 +33,9 @@ "HUE-1-KT_RENEWER-BASE", {% endif %} "SPARK_ON_YARN-1-GATEWAY-BASE", + {% if cluster_version_cdh_major < '6' %} + "SPARK2_ON_YARN-1-GATEWAY-BASE", + {% endif %} "HBASE-1-HBASERESTSERVER-BASE", "HBASE-1-HBASETHRIFTSERVER-BASE", "SENTRY-1-GATEWAY-BASE", @@ -69,6 +75,9 @@ "IMPALA-1-CATALOGSERVER-BASE", "IMPALA-1-STATESTORE-BASE", "SPARK_ON_YARN-1-SPARK_YARN_HISTORY_SERVER-BASE", + {% if cluster_version_cdh_major < '6' %} + "SPARK2_ON_YARN-1-SPARK2_YARN_HISTORY_SERVER-BASE", + {% endif %} "KS_INDEXER-1-HBASE_INDEXER-BASE", "HIVE-1-GATEWAY-BASE", "YARN-1-JOBHISTORY-BASE" ] @@ -82,6 +91,9 @@ "HBASE-1-REGIONSERVER-BASE", "SOLR-1-SOLR_SERVER-BASE", "HIVE-1-GATEWAY-BASE", + {% if cluster_version_cdh_major < '6' %} + "SPARK2_ON_YARN-1-GATEWAY-BASE", + {% endif %} "SPARK_ON_YARN-1-GATEWAY-BASE" ] } {% if (krb5_kdc_type == 'neverever') %}, { diff --git a/roles/cdh/templates/hue.j2 b/roles/cdh/templates/hue.j2 index ca5f824..b27990c 100644 --- a/roles/cdh/templates/hue.j2 +++ b/roles/cdh/templates/hue.j2 @@ -95,7 +95,11 @@ "value" : "{{ ldap_udom.url }}" }, { "name" : "auth_backend", + {% if (cluster_version_cdh_major >= '6') %} "value" : "desktop.auth.backend.LdapBackend,desktop.auth.backend.AllowFirstUserDjangoBackend" + {% else %} + "value" : "desktop.auth.backend.LdapBackend" + {% endif %} }, { "name" : "hbase_service", "ref" : "HBASE-1" diff --git a/roles/cdh/templates/instantiator.j2 b/roles/cdh/templates/instantiator.j2 index cda3c0b..7171aa8 100644 --- a/roles/cdh/templates/instantiator.j2 +++ b/roles/cdh/templates/instantiator.j2 @@ -18,7 +18,7 @@ "variables" : [ {% set var_joiner = joiner(",") %} {% for item in cdh_services %} - {% for (k,v) in item.iteritems() %} + {% for (k,v) in item.items() %} {% if not k|lower == 'type' %} {{ var_joiner() }} { diff --git a/roles/cm_agents_teardown/tasks/main.yml b/roles/cm_agents_teardown/tasks/main.yml index 4e602b2..41deba8 100644 --- a/roles/cm_agents_teardown/tasks/main.yml +++ b/roles/cm_agents_teardown/tasks/main.yml @@ -10,6 +10,7 @@ service: name: supervisord state: stopped + ignore_errors: true - name: Delete CM agent run directories on all nodes shell: "rm /var/run/cloudera-scm-agent/process/* -rf" diff --git a/roles/cm_agents_tls/tasks/main.yml b/roles/cm_agents_tls/tasks/main.yml index 557d1f8..1bce8df 100644 --- a/roles/cm_agents_tls/tasks/main.yml +++ b/roles/cm_agents_tls/tasks/main.yml @@ -7,10 +7,11 @@ #- include: 36322.yml - name: Install Cloudera Manager Agents - yum: name={{ item }} state=installed - with_items: - - "cloudera-manager-daemons-{{ scm_version }}" - - "cloudera-manager-agent-{{ scm_version }}" + yum: + name: + - cloudera-manager-daemons + - cloudera-manager-agent + state: installed - name: Deploy CM Agent config ini template: diff --git a/roles/cm_repo/tasks/main.yml b/roles/cm_repo/tasks/main.yml index 39ff8e8..0ae9519 100644 --- a/roles/cm_repo/tasks/main.yml +++ b/roles/cm_repo/tasks/main.yml @@ -1,11 +1,12 @@ --- -- include_vars: "{{ inventory_dir }}/group_vars/scm_server.yml" - name: Add Cloudera Manager yum repository yum_repository: name: cloudera-manager description: Cloudera Manager - baseurl: "{{ yum_repo_base }}/redhat{{ ansible_distribution_major_version }}/yum" - gpgcheck: no + baseurl: "{{ scm_repo_url }}" + gpgkey: "{{ scm_repo_gpgkey }}" + gpgcheck: yes enabled: yes - when: (ansible_distribution|lower == "redhat") or (ansible_distribution|lower == "centos") + when: + - ansible_os_family|lower == "redhat" diff --git a/roles/kms_encryption_zones/tasks/main.yml b/roles/kms_encryption_zones/tasks/main.yml index 0870873..7625efe 100644 --- a/roles/kms_encryption_zones/tasks/main.yml +++ b/roles/kms_encryption_zones/tasks/main.yml @@ -115,6 +115,9 @@ method: POST status_code: 200 body_format: json + body: + restartOnlyStaleServices: false + redeployClientConfiguration: false force_basic_auth: yes user: "{{ scm_default_user }}" password: "{{ scm_default_pass }}" diff --git a/roles/kms_key_sync/tasks/main.yml b/roles/kms_key_sync/tasks/main.yml index 9db4b64..c4929f2 100644 --- a/roles/kms_key_sync/tasks/main.yml +++ b/roles/kms_key_sync/tasks/main.yml @@ -71,6 +71,9 @@ user: "{{ scm_default_user }}" password: "{{ scm_default_pass }}" body_format: "json" + body: + restartOnlyStaleServices: true + redeployClientConfiguration: true return_content: yes register: cluster_restart_resp run_once: true diff --git a/roles/kts/templates/kts.j2 b/roles/kts/templates/kts.j2 index 0866b47..adf5e5e 100644 --- a/roles/kts/templates/kts.j2 +++ b/roles/kts/templates/kts.j2 @@ -1,7 +1,7 @@ { - "cdhVersion" : "6.0.0", + "cdhVersion" : "{{ cluster_version_cdh_major }}.0.0", "displayName" : "{{ kts_display_name }}", - "cmVersion" : "{{ scm_version }}", + "cmVersion" : "{{ cluster_version_cm }}", "products" : [ {% set prod_j = joiner(",") %} {% for product in kts_products %} {{ prod_j() }} diff --git a/roles/kts_key_sync/tasks/main.yml b/roles/kts_key_sync/tasks/main.yml index 075e107..5a46a47 100644 --- a/roles/kts_key_sync/tasks/main.yml +++ b/roles/kts_key_sync/tasks/main.yml @@ -194,18 +194,25 @@ - debug: var=kts_auth_secret -- name: Get SCM hostIds for inventory hosts - become: true - action: scm_hosts - register: scm_hosts_result - vars: - use_tls: True - # environment: - # PYTHONPATH: "{{ ansible_env.PYTHONPATH }}:/usr/lib/python2.7/site-packages/cm_api-19.0.0-py2.7.egg" - -- set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" - -- set_fact: scm_host_names="{{ scm_hosts_result.host_names }}" +- name: Get the host identifiers and names from Cloudera Manager + uri: + url: "{{ cm_api_url }}/hosts" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: scm_host_list + +- name: Extract the host identifiers and names into facts + set_fact: + scm_hosts: "{{ lookup('template', 'scm_host_list.j2') | from_yaml }}" + +- name: Print the extracted host identifiers and names + debug: + var: scm_hosts + verbosity: 2 - name: Prepare hosts template local_action: diff --git a/roles/kts_key_sync/templates/kms.j2 b/roles/kts_key_sync/templates/kms.j2 index eeb7e6d..5a50c33 100644 --- a/roles/kts_key_sync/templates/kms.j2 +++ b/roles/kts_key_sync/templates/kms.j2 @@ -18,14 +18,12 @@ { "type" : "KMS_KEYTRUSTEE", "hostRef" : { - "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][0]]['inventory_hostname']] }}", - "hostname" : "{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}" + "hostId" : "{{ scm_hosts['ids'][hostvars[groups['kms_servers'][0]]['inventory_hostname']] }}" } }, { "type" : "KMS_KEYTRUSTEE", "hostRef" : { - "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][1]]['inventory_hostname']] }}", - "hostname" : "{{ hostvars[groups['kms_servers'][1]]['inventory_hostname'] }}" + "hostId" : "{{ scm_hosts['ids'][hostvars[groups['kms_servers'][1]]['inventory_hostname']] }}" } } ] diff --git a/roles/kts_key_sync/templates/kmsRCG.j2 b/roles/kts_key_sync/templates/kmsRCG.j2 index c3f4973..72c3834 100644 --- a/roles/kts_key_sync/templates/kmsRCG.j2 +++ b/roles/kts_key_sync/templates/kmsRCG.j2 @@ -16,9 +16,11 @@ }, { "name" : "ssl_server_keystore_password", "value" : "{{ tls.keystore_password }}" + {% if (cluster_version_cdh_major >= '6') %} }, { "name" : "ssl_server_keystore_keypassword", "value" : "{{ tls.keystore_password }}" + {% endif %} }, { "name" : "ssl_enabled", "value" : "true" diff --git a/roles/kts_key_sync/templates/kmshosts.j2 b/roles/kts_key_sync/templates/kmshosts.j2 index d23da53..593702e 100644 --- a/roles/kts_key_sync/templates/kmshosts.j2 +++ b/roles/kts_key_sync/templates/kmshosts.j2 @@ -1,11 +1,9 @@ { "items" : [ { - "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][0]]['inventory_hostname']] }}", - "hostname" : "{{ hostvars[groups['kms_servers'][0]]['inventory_hostname'] }}" + "hostId" : "{{ scm_hosts['ids'][hostvars[groups['kms_servers'][0]]['inventory_hostname']] }}" }, { - "hostId" : "{{ scm_host_ids[hostvars[groups['kms_servers'][1]]['inventory_hostname']] }}", - "hostname" : "{{ hostvars[groups['kms_servers'][1]]['inventory_hostname'] }}" + "hostId" : "{{ scm_hosts['ids'][hostvars[groups['kms_servers'][1]]['inventory_hostname']] }}" } ] } diff --git a/roles/kts_key_sync/templates/scm_host_list.j2 b/roles/kts_key_sync/templates/scm_host_list.j2 new file mode 100644 index 0000000..7eb9a0b --- /dev/null +++ b/roles/kts_key_sync/templates/scm_host_list.j2 @@ -0,0 +1,16 @@ +{%- set scm_hosts = { "ids" : {}, "names" : {} } -%} +{%- for host, vars in hostvars.items() -%} + {%- for scm_host in scm_host_list['json']['items'] -%} + {%- set found_host = False -%} + {%- if scm_host.hostname == vars.inventory_hostname or scm_host.ipAddress == vars.inventory_hostname -%} + {%- set found_host = True -%} + {%- elif alternative_ip|default('private_ip') in vars and scm_host.ipAddress == vars[alternative_ip|default('private_ip')] -%} + {%- set found_host = True -%} + {%- endif -%} + {%- if found_host -%} + {%- set x = scm_hosts.ids.__setitem__(host, scm_host.hostId) -%} + {%- set x = scm_hosts.names.__setitem__(host, scm_host.hostname) -%} + {%- endif -%} + {%- endfor -%} +{%- endfor -%} +{{ scm_hosts|to_yaml }} diff --git a/roles/scm/tasks/api.yml b/roles/scm/tasks/api.yml deleted file mode 100644 index c7d62db..0000000 --- a/roles/scm/tasks/api.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# https://cloudera.github.io/cm_api/docs/python-client/ - -- name: Install Python PIP - yum: name=python-pip state=latest update_cache=yes - -- name: Install CM Python API Client - pip: name=cm-api diff --git a/roles/scm/tasks/main.yml b/roles/scm/tasks/main.yml index 37ee6c1..e229043 100644 --- a/roles/scm/tasks/main.yml +++ b/roles/scm/tasks/main.yml @@ -15,33 +15,28 @@ - openldap-clients state: installed -#To Do -#Change scm_dir to use if statement - previously /usr/share/cmf/ -#Remove MySQL ref - - name: Prepare Cloudera Manager Server External Database - command: "{{ scm_dir }}/schema/scm_prepare_database.sh - -f - --host {{ databases.scm.host }} - --port {{ databases.scm.port }} - {{ databases.scm.type }} {{ databases.scm.name }} {{ databases.scm.user }} {{ databases.scm.pass }}" + command: | + {{ scm_prepare_database_script_path }} -f + --host {{ hostvars[db_hostname]['inventory_hostname'] }} + {{ database_type }} {{ databases.scm.name }} {{ databases.scm.user }} {{ databases.scm.pass }} changed_when: False -#- name: Download & Install Custom Service Descriptors - Spark2 -# get_url: -# url: http://archive.cloudera.com/spark2/CSD/SPARK2_ON_YARN-2.3.0.cloudera4.jar -# dest: /opt/cloudera/csd -# mode: 0600 -# owner: cloudera-scm -# group: cloudera-scm +- name: Create CSD directory + file: + path: /opt/cloudera/csd + state: directory + owner: cloudera-scm + group: cloudera-scm + mode: 0755 -#- name: Download & Install Custom Service Descriptors - CDSW -# get_url: -# url: http://archive.cloudera.com/cdsw1/CSD/CLOUDERA_DATA_SCIENCE_WORKBENCH-CDH5-1.4.2.jar -# dest: /opt/cloudera/csd -# mode: 0600 -# owner: cloudera-scm -# group: cloudera-scm +- name: Download CSDs + get_url: + url: "{{ item }}" + dest: /opt/cloudera/csd + mode: 0644 + with_items: "{{ scm_csds }}" + when: scm_csds is defined - name: Start the Cloudera Manager Server service: name={{ item }} state=restarted enabled=yes @@ -68,20 +63,29 @@ # Set base CM API URL - set_fact: cm_api_url="http://{{ hostvars[scm_hostname]['inventory_hostname'] }}:{{ scm_port }}/api/{{ result.content }}" -# Install Cloudera Manager Python API -- include: api.yml +- debug: + var: cm_api_url + verbosity: 1 -# Retrieve auto-generated host IDs from SCM -- name: Get SCM hostIds for inventory hosts - become: true - action: scm_hosts - register: scm_hosts_result - vars: - use_tls: False +- name: Get the host identifiers and names from Cloudera Manager + uri: + url: "{{ cm_api_url }}/hosts" + method: GET + status_code: 200 + user: "{{ scm_default_user }}" + password: "{{ scm_default_pass }}" + force_basic_auth: yes + return_content: yes + register: scm_host_list -- set_fact: scm_host_ids="{{ scm_hosts_result.host_ids }}" +- name: Extract the host identifiers and names into facts + set_fact: + scm_hosts: "{{ lookup('template', 'scm_host_list.j2') | from_yaml }}" -- set_fact: scm_host_names="{{ scm_hosts_result.host_names }}" +- name: Print the extracted host identifiers and names + debug: + var: scm_hosts + verbosity: 2 - include: license.yml - include: scm.yml diff --git a/roles/scm/templates/cms_base.j2 b/roles/scm/templates/cms_base.j2 index 4abf6f9..b91c669 100644 --- a/roles/scm/templates/cms_base.j2 +++ b/roles/scm/templates/cms_base.j2 @@ -17,7 +17,7 @@ "name": "mgmt-SERVICEMONITOR", "type": "SERVICEMONITOR", "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-ACTIVITYMONITOR", @@ -26,13 +26,13 @@ "roleConfigGroupName": "mgmt-ACTIVITYMONITOR-BASE" }, "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-HOSTMONITOR", "type": "HOSTMONITOR", "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-REPORTSMANAGER", @@ -41,19 +41,19 @@ "roleConfigGroupName": "mgmt-REPORTSMANAGER-BASE" }, "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-EVENTSERVER", "type": "EVENTSERVER", "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-ALERTPUBLISHER", "type": "ALERTPUBLISHER", "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-NAVIGATOR", @@ -62,7 +62,7 @@ "roleConfigGroupName": "mgmt-NAVIGATOR-BASE" }, "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } }, { "name": "mgmt-NAVIGATORMETASERVER", @@ -71,7 +71,7 @@ "roleConfigGroupName": "mgmt-NAVIGATORMETASERVER-BASE" }, "hostRef": { - "hostId": "{{ scm_host_ids[scm_hostname] }}" + "hostId": "{{ scm_hosts['ids'][scm_hostname] }}" } } ], @@ -104,9 +104,11 @@ }, { "name" : "firehose_debug_port", "value" : "-1" + {% if (cluster_version_cdh_major >= '6') %} }, { "name" : "firehose_debug_tls_port", "value" : "-1" + {% endif %} }, { "name": "oom_heap_dump_dir", "value": "{{ oom_heap_dump_dir }}" @@ -388,12 +390,16 @@ { "name" : "firehose_debug_port", "value" : "-1", + {% if (cluster_version_cdh_major >= '6') %} }, { "name" : "firehose_debug_tls_port", "value" : "-1", + {% endif %} + {% if (firehose_non_java_memory_bytes is defined) %} }, { "name": "firehose_non_java_memory_bytes", - "value": "4294967296" + "value": "{{ firehose_non_java_memory_bytes }}" + {% endif %} }, { "name": "firehose_storage_dir", "value": "{{ hmon_firehose_storage_dir }}" diff --git a/roles/scm/templates/scm.j2 b/roles/scm/templates/scm.j2 index 2309e5e..d0afc5f 100644 --- a/roles/scm/templates/scm.j2 +++ b/roles/scm/templates/scm.j2 @@ -114,7 +114,7 @@ }, { "name" : "REMOTE_PARCEL_REPO_URLS", "value" : {% set repo_j = joiner(",") %} - "{% for repo in scm_repositories %}{{ repo_j() }}{{ repo }}{% endfor %}" + "{% for repo in scm_parcel_repositories %}{{ repo_j() }}{{ repo }}{% endfor %}" } ] } diff --git a/roles/scm/templates/scm_host_list.j2 b/roles/scm/templates/scm_host_list.j2 new file mode 100644 index 0000000..7eb9a0b --- /dev/null +++ b/roles/scm/templates/scm_host_list.j2 @@ -0,0 +1,16 @@ +{%- set scm_hosts = { "ids" : {}, "names" : {} } -%} +{%- for host, vars in hostvars.items() -%} + {%- for scm_host in scm_host_list['json']['items'] -%} + {%- set found_host = False -%} + {%- if scm_host.hostname == vars.inventory_hostname or scm_host.ipAddress == vars.inventory_hostname -%} + {%- set found_host = True -%} + {%- elif alternative_ip|default('private_ip') in vars and scm_host.ipAddress == vars[alternative_ip|default('private_ip')] -%} + {%- set found_host = True -%} + {%- endif -%} + {%- if found_host -%} + {%- set x = scm_hosts.ids.__setitem__(host, scm_host.hostId) -%} + {%- set x = scm_hosts.names.__setitem__(host, scm_host.hostname) -%} + {%- endif -%} + {%- endfor -%} +{%- endfor -%} +{{ scm_hosts|to_yaml }} diff --git a/roles/yum_teardown_cm_agent/tasks/main.yml b/roles/yum_teardown_cm_agent/tasks/main.yml index c3f8c16..981bd5f 100644 --- a/roles/yum_teardown_cm_agent/tasks/main.yml +++ b/roles/yum_teardown_cm_agent/tasks/main.yml @@ -1,8 +1,8 @@ --- - name: Remove the Cloudera Manager Agent Packages - yum: name={{ item }} state=installed - with_items: - - "cloudera-manager-daemons-{{ scm_version }}" - - "cloudera-manager-agent-{{ scm_version }}" - + yum: + name: + - "cloudera-manager-daemons" + - "cloudera-manager-agent" + state: absent diff --git a/roles/yum_teardown_cm_server/tasks/main.yml b/roles/yum_teardown_cm_server/tasks/main.yml index 830b4b0..b2731b0 100644 --- a/roles/yum_teardown_cm_server/tasks/main.yml +++ b/roles/yum_teardown_cm_server/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Remove the Cloudera Manager Server Packages - yum: name={{ item }} state=absent - with_items: - - "cloudera-manager-server-{{ scm_version }}" - + yum: + name: + - "cloudera-manager-server" + state: absent From ff1939f4a74889be4cf11c0d738d1e66e6be69de Mon Sep 17 00:00:00 2001 From: root Date: Tue, 10 Mar 2020 06:25:39 -0700 Subject: [PATCH 08/15] Bugfix for CDH 5.16.2 --- roles/krb5_server/tasks/main.yml | 4 ++-- roles/kts_key_sync/tasks/main.yml | 4 ++-- roles/pre_reqs/tasks/main.yml | 14 ++++++++++++++ roles/pre_reqs/templates/krb5.conf.j2 | 2 +- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/roles/krb5_server/tasks/main.yml b/roles/krb5_server/tasks/main.yml index 58a01b8..6e0fbd8 100644 --- a/roles/krb5_server/tasks/main.yml +++ b/roles/krb5_server/tasks/main.yml @@ -3,12 +3,12 @@ - name: Install KRB5 workstation yum: name=krb5-workstation state=latest delegate_to: "{{ item }}" - with_items: "{{ groups['cdh_servers'] }}" + with_items: "{{ groups['all'] }}" - name: Set krb5.conf template: src=krb5.conf.j2 dest=/etc/krb5.conf backup=yes delegate_to: "{{ item }}" - with_items: "{{ groups['cdh_servers'] }}" + with_items: "{{ groups['all'] }}" - name: Install KRB5 server yum: name={{ item }} state=latest diff --git a/roles/kts_key_sync/tasks/main.yml b/roles/kts_key_sync/tasks/main.yml index 5a46a47..af2ab0f 100644 --- a/roles/kts_key_sync/tasks/main.yml +++ b/roles/kts_key_sync/tasks/main.yml @@ -227,7 +227,7 @@ method: POST body_format: json body: "{{ lookup('file', ''+ tmp_dir + '/kmshosts.json') }}" - status_code: 200 + status_code: 200,400 force_basic_auth: yes user: "{{ scm_default_user }}" password: "{{ scm_default_pass }}" @@ -334,7 +334,7 @@ delay: 5 run_once: true -- name: Restart KMS Service +- name: Start KMS Service uri: url: "{{ cm_api_url }}/clusters/{{ cluster_display_name }}/services/keytrustee/commands/start" method: POST diff --git a/roles/pre_reqs/tasks/main.yml b/roles/pre_reqs/tasks/main.yml index 3d762d3..0b8f074 100644 --- a/roles/pre_reqs/tasks/main.yml +++ b/roles/pre_reqs/tasks/main.yml @@ -1,5 +1,6 @@ - include_vars: "{{ inventory_dir }}/group_vars/tls_enc.yml" - include_vars: "{{ inventory_dir }}/group_vars/kms_servers.yml" +- include_vars: "{{ inventory_dir }}/group_vars/kts_servers.yml" - include: 36322.yml @@ -211,3 +212,16 @@ - "{{ kms_key_dir }}" - "{{ kms_conf_dir }}" when: "'kms_servers' in group_names" + +- name: Create KTS Directories + file: + path: "{{ item }}" + state: directory + owner: keytrustee + group: keytrustee + mode: '0700' + with_items: + - "{{ kts_services[0].keytrustee_server_DB_PASSIVE_BASE_db_root }}" + - "{{ kts_services[0].keytrustee_server_DB_ACTIVE_BASE_db_root }}" + - "{{ kts_services[0].keytrustee_server_keytrustee_home }}" + when: "'kts_servers' in group_names" diff --git a/roles/pre_reqs/templates/krb5.conf.j2 b/roles/pre_reqs/templates/krb5.conf.j2 index 56294b4..6492985 100644 --- a/roles/pre_reqs/templates/krb5.conf.j2 +++ b/roles/pre_reqs/templates/krb5.conf.j2 @@ -1,7 +1,7 @@ # Configuration snippets may be placed in this directory as well includedir /etc/krb5.conf.d/ -includedir /var/lib/sss/pubconf/krb5.include.d/ +#includedir /var/lib/sss/pubconf/krb5.include.d/ [logging] default = FILE:/var/log/krb5libs.log From 945246e30185dad97b65fddfd67a1635e7480af8 Mon Sep 17 00:00:00 2001 From: Dave Beech Date: Fri, 30 Aug 2019 15:24:01 +0100 Subject: [PATCH 09/15] Auto-discovery of product names/versions from parcel repos Conflicts: group_vars/scm_server.yml --- group_vars/scm_server.yml | 26 -------------------------- roles/cdh/tasks/main.yml | 13 +++++++++++++ roles/cdh/templates/base.j2 | 4 ++-- 3 files changed, 15 insertions(+), 28 deletions(-) diff --git a/group_vars/scm_server.yml b/group_vars/scm_server.yml index 7ceaca3..297e022 100644 --- a/group_vars/scm_server.yml +++ b/group_vars/scm_server.yml @@ -12,32 +12,6 @@ scm_csd: - http:///CSD/CLOUDERA_DATA_SCIENCE_WORKBENCH-CDH5-1.4.2.jar # - http:///CSD/SPARK2_ON_YARN-2.3.0.cloudera4.jar -scm_products: - - product: CDH - version: 5.16.2-1.cdh5.16.2.p0.8 - - - product: SPARK2 - version: 2.4.0.cloudera2-1.cdh5.13.3.p0.1041012 - - # - product: KAFKA - # version: 4.0.0-1.4.0.0.p0.1 - - - product: KEYTRUSTEE - version: 6.1.0-1.KEYTRUSTEE6.1.0.p0.592714 - -# - product: ORACLE_INSTANT_CLIENT -# version: 11.2-1.oracleinstantclient1.0.0.p0.134 - - # - product: KEYTRUSTEE - # version: 6.1.0.p0.592714 - -kts_products: - - product: KEYTRUSTEE_SERVER - version: 6.1.0-1.keytrustee6.1.0.p0.592761 - - - product: CDH - version: 5.16.2-1.cdh5.16.2.p0.8 - oom_heap_dump_dir: /var/log/heapdumps eventserver_index_dir: /var/log/cloudera-scm-eventserver hmon_firehose_storage_dir: /var/log/cloudera-host-monitor diff --git a/roles/cdh/tasks/main.yml b/roles/cdh/tasks/main.yml index 6fa1745..1c70ccc 100644 --- a/roles/cdh/tasks/main.yml +++ b/roles/cdh/tasks/main.yml @@ -75,6 +75,19 @@ - debug: msg="Cluster '{{ cluster_display_name }}' exists - {{ cluster_exists }}" +- name: Discover product versions from parcel manifests + uri: + url: "{{ item }}/manifest.json" + status_code: 200 + body_format: json + return_content: yes + register: manifests + with_items: + - "{{ scm_repositories }}" + +- set_fact: + scm_products: "{{ manifests.results | map(attribute='json') | list | json_query('[*].parcels[0].parcelName') | map('regex_replace', '-[a-z0-9]+.parcel$','') | list }}" + # https://www.cloudera.com/documentation/enterprise/latest/topics/install_cluster_template.html - name: Prepare cluster template template: diff --git a/roles/cdh/templates/base.j2 b/roles/cdh/templates/base.j2 index 867626d..48112be 100644 --- a/roles/cdh/templates/base.j2 +++ b/roles/cdh/templates/base.j2 @@ -15,8 +15,8 @@ {% for product in scm_products %} {{ prod_j() }} { - "version" : "{{ product['version'] }}", - "product" : "{{ product['product'] }}" + "product" : "{{ product.split('-')[0] }}", + "version" : "{{ '-'.join(product.split('-')[1:]) }}" } {% endfor %} ], From e182a036ce1d4b071d1954c7c604e30c179055b5 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 10 Mar 2020 06:45:36 -0700 Subject: [PATCH 10/15] Moved 36322 --- roles/pre_reqs/{ => tasks}/36322.yml | 2 ++ 1 file changed, 2 insertions(+) rename roles/pre_reqs/{ => tasks}/36322.yml (95%) diff --git a/roles/pre_reqs/36322.yml b/roles/pre_reqs/tasks/36322.yml similarity index 95% rename from roles/pre_reqs/36322.yml rename to roles/pre_reqs/tasks/36322.yml index cc22d76..8f76538 100644 --- a/roles/pre_reqs/36322.yml +++ b/roles/pre_reqs/tasks/36322.yml @@ -58,3 +58,5 @@ - file: path=/var/lib/solr state=directory owner=solr group=solr mode=0755 - file: path=/var/lib/spark state=directory owner=spark group=spark mode=0755 - file: path=/var/lib/sentry state=directory owner=sentry group=sentry mode=0755 +- file: path=/var/lib/keytrustee state=directory owner=keytrustee group=keytrustee mode=0755 +- file: path=/var/lib/kms state=directory owner=kms group=kms mode=0755 From 5d1e2ae705d690242b87e050d979c1bbe01d41c7 Mon Sep 17 00:00:00 2001 From: bhagya7893 Date: Tue, 24 Mar 2020 16:20:14 -0700 Subject: [PATCH 11/15] 389 Ldap Server Setup Adding the role for ldap server setup --- .../configure-ldap-server.yaml | 13 +++++ roles/389-ldap-server/defaults/main.yaml | 14 ++++++ roles/389-ldap-server/main.yaml | 6 +++ roles/389-ldap-server/prepare-server.yaml | 48 +++++++++++++++++++ roles/389-ldap-server/templates/ldap.inf.j2 | 23 +++++++++ 5 files changed, 104 insertions(+) create mode 100644 roles/389-ldap-server/configure-ldap-server.yaml create mode 100644 roles/389-ldap-server/defaults/main.yaml create mode 100644 roles/389-ldap-server/main.yaml create mode 100644 roles/389-ldap-server/prepare-server.yaml create mode 100644 roles/389-ldap-server/templates/ldap.inf.j2 diff --git a/roles/389-ldap-server/configure-ldap-server.yaml b/roles/389-ldap-server/configure-ldap-server.yaml new file mode 100644 index 0000000..cbf1446 --- /dev/null +++ b/roles/389-ldap-server/configure-ldap-server.yaml @@ -0,0 +1,13 @@ +--- +- block: + - name: Check that server is already installed + command: systemctl is-enabled dirsrv@{{ serverid }} + register: results_raw + changed_when: false + + rescue: + - name: Copy installation template + template: src=templates/ldap.inf.j2 dest=/root/ldap.inf + + - name: Run installation script setup-ds-admin.pl + shell: /usr/sbin/setup-ds-admin.pl -s -f /root/ldap.inf \ No newline at end of file diff --git a/roles/389-ldap-server/defaults/main.yaml b/roles/389-ldap-server/defaults/main.yaml new file mode 100644 index 0000000..4aba4e4 --- /dev/null +++ b/roles/389-ldap-server/defaults/main.yaml @@ -0,0 +1,14 @@ +--- +# Configuration type +# if it is set - preparation and 389-ds configuration activities will be skipped (usually to add a new replication agreement) + skip_config: false + +# General 389-ds settings + password: password + suffix: dc=mit,dc=example,dc=com + rootdn: cn=root + serverid: ldapsrv + +# Admin server settings + admin_password: password + admin_domain: mit.example.com \ No newline at end of file diff --git a/roles/389-ldap-server/main.yaml b/roles/389-ldap-server/main.yaml new file mode 100644 index 0000000..4f4a81e --- /dev/null +++ b/roles/389-ldap-server/main.yaml @@ -0,0 +1,6 @@ +--- +- include: prepare_server.yaml + when: not skip_config + +- include: configure_ldap_server.yaml + when: not skip_config \ No newline at end of file diff --git a/roles/389-ldap-server/prepare-server.yaml b/roles/389-ldap-server/prepare-server.yaml new file mode 100644 index 0000000..6d20e19 --- /dev/null +++ b/roles/389-ldap-server/prepare-server.yaml @@ -0,0 +1,48 @@ +--- +- name: Configure EPEL repository + yum: name=epel-release state=present + +- name: Install packages + yum: name={{item}} state=present + with_items: + - 389-ds-base + - 389-admin + - openldap-clients + +- name: Configure sysctl variables + sysctl: name={{item.name}} value={{item.value}} sysctl_set=yes state=present + with_items: + - {name: net.ipv4.tcp_keepalive_time, value: 300} + - {name: net.ipv4.ip_local_port_range, value: "1024 65000"} + - {name: fs.file-max, value: 64000} + +- name: Configure file descriptors for dirsrv systemd service + lineinfile: + dest=/etc/sysconfig/dirsrv.systemd + state=present + insertafter="^[Service]" + line=LimitNOFILE=8192 + +#- name: Configure group ldap +# group: name=ldap gid=389 + +- name: Configure user ldap + user: name=ldap comment="Dirsrv user" uid=389 group=ldap shell=/sbin/nologin + +- name: Check that firewalld service is active + command: systemctl is-active firewalld + register: firewalld_results + ignore_errors: true + changed_when: false + +- block: + - debug: msg="firewalld service is running" + - name: Allow ldap port on firewalld + firewalld: service=ldap permanent=true state=enabled + - name: Allow port 9830 on firdwalld + firewalld: port=9830/tcp permanent=true state=enabled + when: "'active' in firewalld_results.stdout" + +# - name: Configuring SELinux +# selinux: policy=targeted state=permissive +# \ No newline at end of file diff --git a/roles/389-ldap-server/templates/ldap.inf.j2 b/roles/389-ldap-server/templates/ldap.inf.j2 new file mode 100644 index 0000000..418c8f4 --- /dev/null +++ b/roles/389-ldap-server/templates/ldap.inf.j2 @@ -0,0 +1,23 @@ +[General] +FullMachineName= {{ ansible_nodename }} +SuiteSpotUserID= ldap +SuiteSpotGroup= ldap +AdminDomain= {{ admin_domain }} +ConfigDirectoryAdminID= admin +ConfigDirectoryAdminPwd= {{ admin_password }} +ConfigDirectoryLdapURL= ldap://{{ ansible_nodename }}:389/o=NetscapeRoot +[slapd] +SlapdConfigForMC= Yes +UseExistingMC= 0 +ServerPort= 389 +ServerIdentifier= {{ serverid }} +Suffix= {{ suffix }} +RootDN= {{ rootdn }} +RootDNPwd= {{ password }} +#ds_bename=exampleDB +AddSampleEntries= No +[admin] +Port= 9830 +ServerIpAddress= 0.0.0.0 +ServerAdminID= admin +ServerAdminPwd= {{ admin_password }} \ No newline at end of file From 176befc8f0e72d760ab81afeaa478e456f78ffb1 Mon Sep 17 00:00:00 2001 From: bhagya7893 Date: Thu, 2 Apr 2020 00:04:09 -0700 Subject: [PATCH 12/15] Add files via upload --- roles/389-ldap-server/defaults/main.yaml | 25 ++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/roles/389-ldap-server/defaults/main.yaml b/roles/389-ldap-server/defaults/main.yaml index 4aba4e4..fe26a4a 100644 --- a/roles/389-ldap-server/defaults/main.yaml +++ b/roles/389-ldap-server/defaults/main.yaml @@ -6,9 +6,30 @@ # General 389-ds settings password: password suffix: dc=mit,dc=example,dc=com - rootdn: cn=root + rootdn: cn=Directory Manger serverid: ldapsrv # Admin server settings admin_password: password - admin_domain: mit.example.com \ No newline at end of file + admin_domain: mit.example.com + + server_uri: "ldap://bg-ldap-10.vpc.cloudera.com:389" + dc: "dc=mit,dc=example,dc=com" + ou_people: "ou=People" + ou_group: "ou=Group" + bind_dn: uid=admin,{{ ou_people }},{{ dc }} + bind_pw: password + ldap_lookup_config: + url: "{{ server_uri }}" + base: "{{ dc }}" + binddn: "{{ bind_dn }}" + bindpw: "{{ bind_pw }}" + scope: subtree + filter: (objectClass=*) + + AllUidNumber: + base: "{{ ou_people }},{{ dc }}" + value: + - AllUidNumber: always_list=True + + job: none \ No newline at end of file From 5195a790fb1660e7636ca472bff339522ae2a690 Mon Sep 17 00:00:00 2001 From: bhagya7893 Date: Thu, 2 Apr 2020 00:06:22 -0700 Subject: [PATCH 13/15] Adding ldap entry task This will add a specific Unix user through ldap add --- roles/389-ldap-server/ldap_entry.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 roles/389-ldap-server/ldap_entry.yml diff --git a/roles/389-ldap-server/ldap_entry.yml b/roles/389-ldap-server/ldap_entry.yml new file mode 100644 index 0000000..b691345 --- /dev/null +++ b/roles/389-ldap-server/ldap_entry.yml @@ -0,0 +1,15 @@ +--- +- name: "Create Account {{ ldap_param.uid }}" + ldap_entry: + dn: uid={{ ldap_param.uid }},{{ ou_people }},{{ dc }} + objectClass: + - shadowAccount + - top + - posixAccount + - inetAccount + - account + - ldapPublicKey + attributes: "{{ ldap_param }}" + server_uri: '{{ server_uri }}' + bind_dn: '{{ bind_dn }}' + bind_pw: '{{ bind_pw }}' \ No newline at end of file From c753e9dc273e32e463f473eeec8f1a73681e8503 Mon Sep 17 00:00:00 2001 From: bhagya7893 Date: Thu, 2 Apr 2020 00:08:36 -0700 Subject: [PATCH 14/15] User LDAP attributes --- roles/389-ldap-server/conf/people.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 roles/389-ldap-server/conf/people.yml diff --git a/roles/389-ldap-server/conf/people.yml b/roles/389-ldap-server/conf/people.yml new file mode 100644 index 0000000..5010c3c --- /dev/null +++ b/roles/389-ldap-server/conf/people.yml @@ -0,0 +1,2 @@ + bind_dn: "uid=cmadmin,ou=People,dc=mit,dc=example,dc=com" + bind_pw: "password" \ No newline at end of file From d821d29efde8978252bce946b9f0d804cbf8711d Mon Sep 17 00:00:00 2001 From: bhagya7893 Date: Thu, 23 Apr 2020 14:48:12 -0700 Subject: [PATCH 15/15] Adding module to add test users and groups 1. Fixed the LDAP root DN password issue 2. Added module to add test users and groups using a loop --- roles/389-ldap-server/ldap_entry.yml | 61 ++++++++++++++++++----- roles/389-ldap-server/main.yaml | 36 +++++++++++-- roles/389-ldap-server/prepare-server.yaml | 11 ++-- 3 files changed, 85 insertions(+), 23 deletions(-) diff --git a/roles/389-ldap-server/ldap_entry.yml b/roles/389-ldap-server/ldap_entry.yml index b691345..ce7f46a 100644 --- a/roles/389-ldap-server/ldap_entry.yml +++ b/roles/389-ldap-server/ldap_entry.yml @@ -1,15 +1,52 @@ --- -- name: "Create Account {{ ldap_param.uid }}" +- include_vars: "/root/389-ldap-server/defaults/main.yml" + +- name: Adding groups ldap_entry: - dn: uid={{ ldap_param.uid }},{{ ou_people }},{{ dc }} + dn: "{{ item[1] }},{{ suffix }}" + state: present objectClass: - - shadowAccount - - top - - posixAccount - - inetAccount - - account - - ldapPublicKey - attributes: "{{ ldap_param }}" - server_uri: '{{ server_uri }}' - bind_dn: '{{ bind_dn }}' - bind_pw: '{{ bind_pw }}' \ No newline at end of file + - top + - posixGroup + - groupOfUniqueNames + attributes: + cn: "{{ item[0] }}" + gidNumber: "{{ item[2] }}" + server_uri: "{{ server_url }}" + bind_dn: "{{ rootdn }}" + bind_pw: "{{ password }}" + loop: + - "{{ ['GROUP_CDH_ADMIN','cn=GROUP_CDH_ADMIN','1004'] }}" + - "{{ ['GROUP_CDH_ADMIN2','cn=GROUP_CDH_ADMIN2','1005'] }}" + - "{{ ['GROUP_CDH_ADMIN3','cn=GROUP_CDH_ADMIN3','1006'] }}" + - "{{ ['GROUP_CDH_ADMIN4','cn=GROUP_CDH_ADMIN4','1007'] }}" + - "{{ ['GROUP_CDH_ADMIN5','cn=GROUP_CDH_ADMIN5','1008'] }}" + - "{{ ['GROUP_CDH_ADMIN6','cn=GROUP_CDH_ADMIN6','1009'] }}" + + + +- name: Adding test users + ldap_entry: + dn: "{{ item[0] }},{{ ou_people }},{{ suffix }}" + state: present + objectClass: + - top + - posixGroup + - inetOrgPerson + - person + - inetUser + - OrganizationalPerson + attributes: + gidNumber: "{{ item[1] }}" + sn: "{{ item[2] }}" + cn: "{{ item[2] }}" + userPassword: "{{ item[3] }}" + server_uri: "{{ server_url }}" + bind_dn: "{{ rootdn }}" + bind_pw: "{{ password }}" + loop: + - "{{ ['uid=testuser1','1004','testuser1','pass1'] }}" + - "{{ ['uid=testuser2','1008','testuser2','pass2'] }}" + - "{{ ['uid=testuser3','1004','testuser3','pass3'] }}" + - "{{ ['uid=testuser4','1008','testuser4','pass4'] }}" + - "{{ ['uid=testuser5','1003','testuser5','pass5'] }}" \ No newline at end of file diff --git a/roles/389-ldap-server/main.yaml b/roles/389-ldap-server/main.yaml index 4f4a81e..e387598 100644 --- a/roles/389-ldap-server/main.yaml +++ b/roles/389-ldap-server/main.yaml @@ -1,6 +1,34 @@ --- -- include: prepare_server.yaml - when: not skip_config +# Configuration type +# if it is set - preparation and 389-ds configuration activities will be skipped (usually to add a new replication agreement) + skip_config: false + +# General 389-ds settings + password: password + suffix: dc=hadoop,dc=com + rootdn: cn=Directory Manger + serverid: ldapsrv + +# Admin server settings + admin_password: password + admin_domain: hadoop.com + + server_uri: "ldap://:389" + dc: "dc=hadoop,dc=com" + ou_people: "ou=People" + ou_group: "ou=Group" + #bind_dn: uid=admin,{{ ou_people }},{{ dc }} + bind_pw: password + ldap_lookup_config: + url: "{{ server_uri }}" + base: "{{ dc }}" + binddn: "{{ bind_dn }}" + bindpw: "{{ bind_pw }}" + scope: subtree + filter: (objectClass=*) + +# AllUidNumber: +# base: "{{ ou_people }},{{ dc }}" +# value: +# - AllUidNumber: always_list=True -- include: configure_ldap_server.yaml - when: not skip_config \ No newline at end of file diff --git a/roles/389-ldap-server/prepare-server.yaml b/roles/389-ldap-server/prepare-server.yaml index 6d20e19..91c6625 100644 --- a/roles/389-ldap-server/prepare-server.yaml +++ b/roles/389-ldap-server/prepare-server.yaml @@ -7,6 +7,7 @@ with_items: - 389-ds-base - 389-admin + - 389-adminutil - openldap-clients - name: Configure sysctl variables @@ -23,11 +24,11 @@ insertafter="^[Service]" line=LimitNOFILE=8192 -#- name: Configure group ldap -# group: name=ldap gid=389 +- name: Configure group ldap + group: name=ldap - name: Configure user ldap - user: name=ldap comment="Dirsrv user" uid=389 group=ldap shell=/sbin/nologin + user: name=ldap comment="Dirsrv user" group=ldap shell=/sbin/nologin - name: Check that firewalld service is active command: systemctl is-active firewalld @@ -42,7 +43,3 @@ - name: Allow port 9830 on firdwalld firewalld: port=9830/tcp permanent=true state=enabled when: "'active' in firewalld_results.stdout" - -# - name: Configuring SELinux -# selinux: policy=targeted state=permissive -# \ No newline at end of file