diff --git a/nifi-dependency-check-maven/suppressions.xml b/nifi-dependency-check-maven/suppressions.xml
index e71529a3dc6d..63871205e58e 100644
--- a/nifi-dependency-check-maven/suppressions.xml
+++ b/nifi-dependency-check-maven/suppressions.xml
@@ -79,66 +79,6 @@
^pkg:maven/org\.apache\.parquet/parquet\-avro@.*$
cpe:/a:avro_project:avro
-
- CVE-2016-5397 applies to Apache Thrift Go not Java
- ^pkg:maven/org\.apache\.thrift/libthrift@.*$
- CVE-2016-5397
-
-
- CVE-2019-0210 applies to Apache Thrift Go server not Java
- ^pkg:maven/org\.apache\.thrift/libthrift@.*$
- CVE-2019-0210
-
-
- CVE-2018-11798 applies Apache Thrift Node.js not Java
- ^pkg:maven/org\.apache\.thrift/libthrift@.*$
- CVE-2018-11798
-
-
- CVE-2019-11939 applies to Thrift Servers in Go not Java
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-11939
-
-
- CVE-2019-3552 applies to Thrift Servers in CPP not Java
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-3552
-
-
- CVE-2019-3553 applies to Thrift Servers in CPP not Java
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-3553
-
-
- CVE-2019-3558 applies to Thrift Servers in Python not Java
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-3558
-
-
- CVE-2019-3564 applies to Thrift Servers in Go not Java
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-3564
-
-
- CVE-2019-3565 applies to Thrift Servers in CPP not Java
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-3565
-
-
- CVE-2021-24028 applies to Facebook Thrift CPP
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2021-24028
-
-
- CVE-2019-11938 applies to Facebook Thrift Servers
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-11938
-
-
- CVE-2019-3559 applies to Facebook Thrift Servers
- ^pkg:maven/org\.apache\.thrift/libfb303@.*$
- CVE-2019-3559
-
CVE-2023-37475 applies to Hamba Avro in Go not Apache Avro for Java
^pkg:maven/org\.apache\.avro/.*$
@@ -149,11 +89,6 @@
^pkg:maven/com\.azure/azure\-identity@.*$
CVE-2023-36415
-
- CVE-2020-13949 applies to Thrift and not to Hive
- ^pkg:maven/org\.apache\.hive.*$
- CVE-2020-13949
-
Parquet MR vulnerabilities do not apply to other Parquet libraries
^pkg:maven/org\.apache\.parquet/parquet\-(?!mr).*$
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/pom.xml b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/pom.xml
index 232068391bbe..ddeda6190138 100644
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/pom.xml
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/pom.xml
@@ -69,11 +69,6 @@
iceberg-core
${iceberg.version}
-
- org.apache.iceberg
- iceberg-hive-metastore
- ${iceberg.version}
-
org.apache.iceberg
iceberg-data
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/src/main/java/org/apache/nifi/processors/iceberg/catalog/IcebergCatalogFactory.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/src/main/java/org/apache/nifi/processors/iceberg/catalog/IcebergCatalogFactory.java
index eac198173546..e6ecdca9f92a 100644
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/src/main/java/org/apache/nifi/processors/iceberg/catalog/IcebergCatalogFactory.java
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-common/src/main/java/org/apache/nifi/processors/iceberg/catalog/IcebergCatalogFactory.java
@@ -22,7 +22,6 @@
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.hadoop.HadoopCatalog;
-import org.apache.iceberg.hive.HiveCatalog;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.jdbc.JdbcCatalog;
import org.apache.iceberg.jdbc.JdbcClientPool;
@@ -37,7 +36,6 @@
import static org.apache.nifi.processors.iceberg.IcebergUtils.getConfigurationFromFiles;
import static org.apache.nifi.services.iceberg.IcebergCatalogProperty.CATALOG_NAME;
import static org.apache.nifi.services.iceberg.IcebergCatalogProperty.CLIENT_POOL_SERVICE;
-import static org.apache.nifi.services.iceberg.IcebergCatalogProperty.METASTORE_URI;
import static org.apache.nifi.services.iceberg.IcebergCatalogProperty.WAREHOUSE_LOCATION;
public class IcebergCatalogFactory {
@@ -50,35 +48,11 @@ public IcebergCatalogFactory(IcebergCatalogService catalogService) {
public Catalog create() {
return switch (catalogService.getCatalogType()) {
- case HIVE -> initHiveCatalog(catalogService);
case HADOOP -> initHadoopCatalog(catalogService);
case JDBC -> initJdbcCatalog(catalogService);
};
}
- private Catalog initHiveCatalog(IcebergCatalogService catalogService) {
- HiveCatalog catalog = new HiveCatalog();
-
- if (catalogService.getConfigFilePaths() != null) {
- final Configuration configuration = getConfigurationFromFiles(catalogService.getConfigFilePaths());
- catalog.setConf(configuration);
- }
-
- final Map catalogProperties = catalogService.getCatalogProperties();
- final Map properties = new HashMap<>();
-
- if (catalogProperties.containsKey(METASTORE_URI)) {
- properties.put(CatalogProperties.URI, (String) catalogProperties.get(METASTORE_URI));
- }
-
- if (catalogProperties.containsKey(WAREHOUSE_LOCATION)) {
- properties.put(CatalogProperties.WAREHOUSE_LOCATION, (String) catalogProperties.get(WAREHOUSE_LOCATION));
- }
-
- catalog.initialize("hive-catalog", properties);
- return catalog;
- }
-
private Catalog initHadoopCatalog(IcebergCatalogService catalogService) {
final Map catalogProperties = catalogService.getCatalogProperties();
final String warehousePath = (String) catalogProperties.get(WAREHOUSE_LOCATION);
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/pom.xml b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/pom.xml
index e16dd764b815..0a64fc78bc27 100644
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/pom.xml
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/pom.xml
@@ -53,209 +53,10 @@
-
- org.apache.hive
- hive-exec
- ${hive.version}
- core
-
-
- log4j
- log4j
-
-
- org.slf4j
- slf4j-log4j12
-
-
- commons-logging
- commons-logging
-
-
- org.apache.logging.log4j
- log4j-core
-
-
- org.apache.logging.log4j
- log4j-web
-
-
- org.apache.logging.log4j
- log4j-1.2-api
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
-
-
- org.apache.hive
- hive-llap-tez
-
-
- org.apache.calcite
- calcite-core
-
-
- org.apache.calcite
- calcite-druid
-
-
- org.apache.calcite.avatica
- avatica
-
-
- com.google.guava
- guava
-
-
- org.apache.hadoop
- hadoop-auth
-
-
- org.apache.hadoop
- hadoop-yarn-common
-
-
- org.apache.hadoop
- hadoop-yarn-server-common
-
-
- org.apache.hadoop
- hadoop-yarn-registry
-
-
- org.apache.hadoop
- hadoop-yarn-server-resourcemanager
-
-
- org.bouncycastle
- bcprov-jdk15on
-
-
- org.codehaus.groovy
- groovy-all
-
-
- org.apache.ivy
- ivy
-
-
- org.eclipse.jetty
- jetty-server
-
-
-
org.bouncycastle
bcprov-jdk18on
-
- org.apache.hive
- hive-metastore
- ${hive.version}
-
-
- log4j
- log4j
-
-
- org.slf4j
- slf4j-log4j12
-
-
- commons-logging
- commons-logging
-
-
- org.apache.logging.log4j
- log4j-core
-
-
- org.apache.logging.log4j
- log4j-web
-
-
- org.apache.logging.log4j
- log4j-1.2-api
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
-
-
- org.apache.orc
- orc-core
-
-
- org.apache.hbase
- hbase-client
-
-
- org.apache.hbase
- hbase-mapreduce
-
-
- org.apache.hbase
- hbase-hadoop2-compat
-
-
- co.cask.tephra
- tephra-api
-
-
- co.cask.tephra
- tephra-core
-
-
- co.cask.tephra
- tephra-hbase-compat-1.0
-
-
- org.apache.parquet
- parquet-hadoop-bundle
-
-
- com.tdunning
- json
-
-
- com.zaxxer
- HikariCP
-
-
- com.google.guava
- guava
-
-
- org.eclipse.jetty
- jetty-xml
-
-
- org.eclipse.jetty
- jetty-server
-
-
- org.eclipse.jetty
- jetty-servlet
-
-
- org.eclipse.jetty
- jetty-webapp
-
-
- org.eclipse.jetty
- jetty-util
-
-
- org.eclipse.jetty
- jetty-http
-
-
- org.eclipse.jetty
- jetty-rewrite
-
-
-
org.apache.derby
@@ -297,12 +98,6 @@
-
- org.apache.nifi
- nifi-iceberg-test-utils
- 2.0.0-SNAPSHOT
- test
-
org.apache.nifi
nifi-avro-record-utils
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/src/test/java/org/apache/nifi/processors/iceberg/TestPutIcebergCustomValidation.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/src/test/java/org/apache/nifi/processors/iceberg/TestPutIcebergCustomValidation.java
deleted file mode 100644
index f1ab1d398328..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/src/test/java/org/apache/nifi/processors/iceberg/TestPutIcebergCustomValidation.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
-
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.iceberg;
-
-import org.apache.nifi.kerberos.KerberosUserService;
-import org.apache.nifi.reporting.InitializationException;
-import org.apache.nifi.serialization.record.MockRecordParser;
-import org.apache.nifi.services.iceberg.HiveCatalogService;
-import org.apache.nifi.util.TestRunner;
-import org.apache.nifi.util.TestRunners;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import static org.apache.nifi.services.iceberg.AbstractCatalogService.HADOOP_CONFIGURATION_RESOURCES;
-import static org.apache.nifi.services.iceberg.HiveCatalogService.METASTORE_URI;
-import static org.apache.nifi.services.iceberg.HiveCatalogService.WAREHOUSE_LOCATION;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class TestPutIcebergCustomValidation {
-
- private static final String RECORD_READER_NAME = "record-reader";
- private static final String KERBEROS_USER_SERVICE_NAME = "kerberos-user-service";
- private static final String CATALOG_SERVICE_NAME = "catalog-service";
-
- private static final String CATALOG_NAMESPACE = "catalogNamespace";
- private static final String TABLE_NAME = "tableName";
-
- private TestRunner runner;
-
- @BeforeEach
- public void setUp() {
- PutIceberg processor = new PutIceberg();
- runner = TestRunners.newTestRunner(processor);
- }
-
- private void initRecordReader() throws InitializationException {
- MockRecordParser readerFactory = new MockRecordParser();
-
- runner.addControllerService(RECORD_READER_NAME, readerFactory);
- runner.enableControllerService(readerFactory);
-
- runner.setProperty(PutIceberg.RECORD_READER, RECORD_READER_NAME);
- }
-
- private void initCatalogService(String configFilePaths) throws InitializationException {
- final HiveCatalogService catalogService = new HiveCatalogService();
- runner.addControllerService(CATALOG_SERVICE_NAME, catalogService);
- runner.setProperty(catalogService, METASTORE_URI, "test-metastore");
- runner.setProperty(catalogService, WAREHOUSE_LOCATION, "test-warehouse");
- runner.setProperty(catalogService, HADOOP_CONFIGURATION_RESOURCES, configFilePaths);
-
- runner.enableControllerService(catalogService);
-
- runner.setProperty(PutIceberg.CATALOG, CATALOG_SERVICE_NAME);
- }
-
- private void initKerberosUserService() throws InitializationException {
- KerberosUserService kerberosUserService = mock(KerberosUserService.class);
- when(kerberosUserService.getIdentifier()).thenReturn(KERBEROS_USER_SERVICE_NAME);
-
- runner.addControllerService(KERBEROS_USER_SERVICE_NAME, kerberosUserService);
- runner.enableControllerService(kerberosUserService);
-
- runner.setProperty(PutIceberg.KERBEROS_USER_SERVICE, KERBEROS_USER_SERVICE_NAME);
- }
-
- @Test
- public void testCustomValidateWithKerberosSecurityConfigAndWithoutKerberosUserService() throws InitializationException {
- initRecordReader();
- initCatalogService("src/test/resources/secured-core-site.xml");
-
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAMESPACE);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
- runner.assertNotValid();
- }
-
- @Test
- public void testCustomValidateWithKerberosSecurityConfigAndKerberosUserService() throws InitializationException {
- initRecordReader();
- initCatalogService("src/test/resources/secured-core-site.xml");
-
- initKerberosUserService();
-
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAMESPACE);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
- runner.assertValid();
- }
-
- @Test
- public void testCustomValidateWithoutKerberosSecurityConfigAndKerberosUserService() throws InitializationException {
- initRecordReader();
- initCatalogService("src/test/resources/unsecured-core-site.xml");
-
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAMESPACE);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
- runner.assertValid();
- }
-
- @Test
- public void testCustomValidateWithoutKerberosSecurityConfigAndWithKerberosUserService() throws InitializationException {
- initRecordReader();
- initCatalogService("src/test/resources/unsecured-core-site.xml");
-
- initKerberosUserService();
-
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAMESPACE);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
- runner.assertNotValid();
- }
-
- @Test
- public void testInvalidSnapshotSummaryDynamicProperty() throws InitializationException {
- initRecordReader();
- initCatalogService("src/test/resources/unsecured-core-site.xml");
-
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAMESPACE);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
-
- runner.setProperty("invalid.dynamic.property", "test value");
- runner.assertNotValid();
- }
-
- @Test
- public void testValidSnapshotSummaryDynamicProperty() throws InitializationException {
- initRecordReader();
- initCatalogService("src/test/resources/unsecured-core-site.xml");
-
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAMESPACE);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
-
- runner.setProperty("snapshot-property.valid-property", "test value");
- runner.assertValid();
- }
-}
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/src/test/java/org/apache/nifi/processors/iceberg/TestPutIcebergWithHiveCatalog.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/src/test/java/org/apache/nifi/processors/iceberg/TestPutIcebergWithHiveCatalog.java
deleted file mode 100644
index 21ff765c1fd0..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-processors/src/test/java/org/apache/nifi/processors/iceberg/TestPutIcebergWithHiveCatalog.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
-
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.iceberg;
-
-import org.apache.iceberg.FileFormat;
-import org.apache.iceberg.PartitionSpec;
-import org.apache.iceberg.Table;
-import org.apache.iceberg.TableProperties;
-import org.apache.iceberg.data.Record;
-import org.apache.nifi.hive.metastore.ThriftMetastore;
-import org.apache.nifi.processors.iceberg.catalog.IcebergCatalogFactory;
-import org.apache.nifi.processors.iceberg.util.IcebergTestUtils;
-import org.apache.nifi.provenance.ProvenanceEventRecord;
-import org.apache.nifi.provenance.ProvenanceEventType;
-import org.apache.nifi.reporting.InitializationException;
-import org.apache.nifi.services.iceberg.HiveCatalogService;
-import org.apache.nifi.util.MockFlowFile;
-import org.apache.nifi.util.TestRunners;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.condition.DisabledOnOs;
-import org.junit.jupiter.api.extension.RegisterExtension;
-
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.nifi.processors.iceberg.PutIceberg.ICEBERG_RECORD_COUNT;
-import static org.apache.nifi.processors.iceberg.PutIceberg.ICEBERG_SNAPSHOT_SUMMARY_FLOWFILE_UUID;
-import static org.apache.nifi.processors.iceberg.util.IcebergTestUtils.CATALOG_NAME;
-import static org.apache.nifi.processors.iceberg.util.IcebergTestUtils.validateData;
-import static org.apache.nifi.processors.iceberg.util.IcebergTestUtils.validateNumberOfDataFiles;
-import static org.apache.nifi.processors.iceberg.util.IcebergTestUtils.validatePartitionFolders;
-import static org.apache.nifi.services.iceberg.HiveCatalogService.METASTORE_URI;
-import static org.apache.nifi.services.iceberg.HiveCatalogService.WAREHOUSE_LOCATION;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.condition.OS.WINDOWS;
-
-@DisabledOnOs(WINDOWS)
-public class TestPutIcebergWithHiveCatalog extends AbstractTestPutIceberg {
-
- @RegisterExtension
- public static ThriftMetastore metastore = new ThriftMetastore();
-
- private void initCatalog(PartitionSpec spec, FileFormat fileFormat) throws InitializationException {
- final Map tableProperties = new HashMap<>();
- tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, fileFormat.name());
-
- final HiveCatalogService catalogService = new HiveCatalogService();
- runner.addControllerService("catalog-service", catalogService);
- runner.setProperty(catalogService, METASTORE_URI, metastore.getThriftConnectionUri());
- runner.setProperty(catalogService, WAREHOUSE_LOCATION, warehousePath);
- runner.enableControllerService(catalogService);
-
- final IcebergCatalogFactory catalogFactory = new IcebergCatalogFactory(catalogService);
- catalog = catalogFactory.create();
-
- catalog.createTable(TABLE_IDENTIFIER, USER_SCHEMA, spec, tableProperties);
-
- runner.setProperty(PutIceberg.CATALOG, "catalog-service");
- }
-
- @Test
- public void onTriggerIdentityPartitioned() throws Exception {
- final PartitionSpec spec = PartitionSpec.builderFor(USER_SCHEMA)
- .identity("department")
- .build();
-
- runner = TestRunners.newTestRunner(processor);
- initRecordReader();
- initCatalog(spec, FileFormat.ORC);
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAME);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
- runner.setValidateExpressionUsage(false);
- runner.enqueue(new byte[0]);
- runner.run();
-
- final Table table = catalog.loadTable(TABLE_IDENTIFIER);
-
- final List expectedRecords = IcebergTestUtils.RecordsBuilder.newInstance(USER_SCHEMA)
- .add(0, "John", "Finance")
- .add(1, "Jill", "Finance")
- .add(2, "James", "Marketing")
- .add(3, "Joana", "Sales")
- .build();
-
- runner.assertTransferCount(PutIceberg.REL_SUCCESS, 1);
- final MockFlowFile flowFile = runner.getFlowFilesForRelationship(PutIceberg.REL_SUCCESS).getFirst();
-
- final String tableLocation = new URI(table.location()).getPath();
- assertTrue(table.spec().isPartitioned());
- assertEquals("4", flowFile.getAttribute(ICEBERG_RECORD_COUNT));
- validateData(table, expectedRecords, 0);
- validateNumberOfDataFiles(tableLocation, 3);
- validatePartitionFolders(tableLocation, Arrays.asList(
- "department=Finance", "department=Marketing", "department=Sales"));
- assertProvenanceEvents();
- }
-
- @Test
- public void onTriggerMultiLevelIdentityPartitioned() throws Exception {
- final PartitionSpec spec = PartitionSpec.builderFor(USER_SCHEMA)
- .identity("name")
- .identity("department")
- .build();
-
- runner = TestRunners.newTestRunner(processor);
- initRecordReader();
- initCatalog(spec, FileFormat.PARQUET);
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, CATALOG_NAME);
- runner.setProperty(PutIceberg.TABLE_NAME, TABLE_NAME);
- runner.setValidateExpressionUsage(false);
- runner.enqueue(new byte[0]);
- runner.run();
-
- final Table table = catalog.loadTable(TABLE_IDENTIFIER);
-
- final List expectedRecords = IcebergTestUtils.RecordsBuilder.newInstance(USER_SCHEMA)
- .add(0, "John", "Finance")
- .add(1, "Jill", "Finance")
- .add(2, "James", "Marketing")
- .add(3, "Joana", "Sales")
- .build();
-
- runner.assertTransferCount(PutIceberg.REL_SUCCESS, 1);
- final MockFlowFile flowFile = runner.getFlowFilesForRelationship(PutIceberg.REL_SUCCESS).getFirst();
-
- final String tableLocation = new URI(table.location()).getPath();
- assertTrue(table.spec().isPartitioned());
- assertEquals("4", flowFile.getAttribute(ICEBERG_RECORD_COUNT));
- validateData(table, expectedRecords, 0);
- validateNumberOfDataFiles(tableLocation, 4);
- validatePartitionFolders(tableLocation, Arrays.asList(
- "name=James/department=Marketing/",
- "name=Jill/department=Finance/",
- "name=Joana/department=Sales/",
- "name=John/department=Finance/"
- ));
- assertProvenanceEvents();
- }
-
- @Test
- public void onTriggerUnPartitioned() throws Exception {
- runner = TestRunners.newTestRunner(processor);
- initRecordReader();
- initCatalog(PartitionSpec.unpartitioned(), FileFormat.AVRO);
- runner.setProperty(PutIceberg.CATALOG_NAMESPACE, "${catalog.name}");
- runner.setProperty(PutIceberg.TABLE_NAME, "${table.name}");
- runner.setProperty(PutIceberg.MAXIMUM_FILE_SIZE, "${max.filesize}");
- runner.setProperty("snapshot-property.additional-summary-property", "test summary property");
- final Map attributes = new HashMap<>();
- attributes.put("catalog.name", CATALOG_NAME);
- attributes.put("table.name", TABLE_NAME);
- attributes.put("max.filesize", "536870912"); // 512 MB
- runner.enqueue(new byte[0], attributes);
- runner.run();
-
- final Table table = catalog.loadTable(TABLE_IDENTIFIER);
-
- final List expectedRecords = IcebergTestUtils.RecordsBuilder.newInstance(USER_SCHEMA)
- .add(0, "John", "Finance")
- .add(1, "Jill", "Finance")
- .add(2, "James", "Marketing")
- .add(3, "Joana", "Sales")
- .build();
-
- runner.assertTransferCount(PutIceberg.REL_SUCCESS, 1);
- final MockFlowFile flowFile = runner.getFlowFilesForRelationship(PutIceberg.REL_SUCCESS).getFirst();
-
- assertTrue(table.spec().isUnpartitioned());
- assertEquals("4", flowFile.getAttribute(ICEBERG_RECORD_COUNT));
- validateData(table, expectedRecords, 0);
- validateNumberOfDataFiles(new URI(table.location()).getPath(), 1);
- assertProvenanceEvents();
- assertSnapshotSummaryProperties(table, Collections.singletonMap("additional-summary-property", "test summary property"));
- }
-
- private void assertProvenanceEvents() {
- final List provenanceEvents = runner.getProvenanceEvents();
- assertEquals(1, provenanceEvents.size());
- final ProvenanceEventRecord sendEvent = provenanceEvents.getFirst();
- assertEquals(ProvenanceEventType.SEND, sendEvent.getEventType());
- assertTrue(sendEvent.getTransitUri().endsWith(CATALOG_NAME + ".db/" + TABLE_NAME));
- }
-
- private void assertSnapshotSummaryProperties(Table table, Map summaryProperties) {
- final Map snapshotSummary = table.currentSnapshot().summary();
-
- assertTrue(snapshotSummary.containsKey(ICEBERG_SNAPSHOT_SUMMARY_FLOWFILE_UUID));
-
- for (Map.Entry entry : summaryProperties.entrySet()) {
- assertEquals(snapshotSummary.get(entry.getKey()), entry.getValue());
- }
- }
-}
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogProperty.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogProperty.java
index 0874f731adc9..d07bd962fc84 100644
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogProperty.java
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogProperty.java
@@ -20,7 +20,6 @@
public enum IcebergCatalogProperty {
CATALOG_NAME,
- METASTORE_URI("hive.metastore.uris"),
WAREHOUSE_LOCATION("hive.metastore.warehouse.dir"),
CLIENT_POOL_SERVICE;
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogType.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogType.java
index 8aad41049e33..e098f9ce7138 100644
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogType.java
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services-api/src/main/java/org/apache/nifi/services/iceberg/IcebergCatalogType.java
@@ -18,7 +18,6 @@
package org.apache.nifi.services.iceberg;
public enum IcebergCatalogType {
- HIVE,
HADOOP,
JDBC
}
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/java/org/apache/nifi/services/iceberg/HiveCatalogService.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/java/org/apache/nifi/services/iceberg/HiveCatalogService.java
deleted file mode 100644
index c421c8c3b212..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/java/org/apache/nifi/services/iceberg/HiveCatalogService.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
-
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.services.iceberg;
-
-import org.apache.nifi.annotation.documentation.CapabilityDescription;
-import org.apache.nifi.annotation.documentation.Tags;
-import org.apache.nifi.annotation.lifecycle.OnEnabled;
-import org.apache.nifi.components.PropertyDescriptor;
-import org.apache.nifi.components.ValidationContext;
-import org.apache.nifi.components.ValidationResult;
-import org.apache.nifi.controller.ConfigurationContext;
-import org.apache.nifi.expression.ExpressionLanguageScope;
-import org.apache.nifi.processor.util.StandardValidators;
-import org.w3c.dom.Document;
-import org.w3c.dom.NodeList;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-@Tags({"iceberg", "catalog", "service", "metastore", "hive"})
-@CapabilityDescription("Catalog service that connects to a Hive metastore to keep track of Iceberg tables.")
-public class HiveCatalogService extends AbstractCatalogService {
-
- public static final PropertyDescriptor METASTORE_URI = new PropertyDescriptor.Builder()
- .name("hive-metastore-uri")
- .displayName("Hive Metastore URI")
- .description("The URI location(s) for the Hive metastore; note that this is not the location of the Hive Server. The default port for the Hive metastore is 9043.")
- .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
- .addValidator(StandardValidators.URI_LIST_VALIDATOR)
- .build();
-
- public static final PropertyDescriptor WAREHOUSE_LOCATION = new PropertyDescriptor.Builder()
- .name("warehouse-location")
- .displayName("Default Warehouse Location")
- .description("Location of default database for the warehouse. This field sets or overrides the 'hive.metastore.warehouse.dir' configuration property.")
- .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
- .addValidator(StandardValidators.NON_BLANK_VALIDATOR)
- .build();
-
- private static final List PROPERTIES = List.of(METASTORE_URI, WAREHOUSE_LOCATION, HADOOP_CONFIGURATION_RESOURCES);
-
- @Override
- protected List getSupportedPropertyDescriptors() {
- return PROPERTIES;
- }
-
- @Override
- protected Collection customValidate(ValidationContext validationContext) {
-
- final List problems = new ArrayList<>();
- boolean configMetastoreUriPresent = false;
- boolean configWarehouseLocationPresent = false;
-
- final String propertyMetastoreUri = validationContext.getProperty(METASTORE_URI).evaluateAttributeExpressions().getValue();
- final String propertyWarehouseLocation = validationContext.getProperty(WAREHOUSE_LOCATION).evaluateAttributeExpressions().getValue();
-
- // Load the configurations for validation only if any config resource is provided and if either the metastore URI or the warehouse location property is missing
- if (validationContext.getProperty(HADOOP_CONFIGURATION_RESOURCES).isSet() && (propertyMetastoreUri == null || propertyWarehouseLocation == null)) {
- final String configFiles = validationContext.getProperty(HADOOP_CONFIGURATION_RESOURCES).evaluateAttributeExpressions().getValue();
- final List documents = parseConfigFilePaths(configFiles);
-
- for (Document document : documents) {
- final NodeList nameNodeList = document.getElementsByTagName("name");
-
- for (int i = 0; i < nameNodeList.getLength(); i++) {
- final String nodeValue = nameNodeList.item(i).getFirstChild().getNodeValue();
-
- if (nodeValue.equals(IcebergCatalogProperty.METASTORE_URI.getHadoopPropertyName())) {
- configMetastoreUriPresent = true;
- }
-
- if (nodeValue.equals(IcebergCatalogProperty.WAREHOUSE_LOCATION.getHadoopPropertyName())) {
- configWarehouseLocationPresent = true;
- }
-
- if (configMetastoreUriPresent && configWarehouseLocationPresent) {
- break;
- }
- }
- }
- }
-
- if (!configMetastoreUriPresent && propertyMetastoreUri == null) {
- problems.add(new ValidationResult.Builder()
- .subject("Hive Metastore URI")
- .valid(false)
- .explanation("cannot find hive metastore uri, please provide it in the 'Hive Metastore URI' property" +
- " or provide a configuration file which contains 'hive.metastore.uris' value.")
- .build());
- }
-
- if (!configWarehouseLocationPresent && propertyWarehouseLocation == null) {
- problems.add(new ValidationResult.Builder()
- .subject("Default Warehouse Location")
- .valid(false)
- .explanation("cannot find default warehouse location, please provide it in the 'Default Warehouse Location' property" +
- " or provide a configuration file which contains 'hive.metastore.warehouse.dir' value.")
- .build());
- }
-
- return problems;
- }
-
- @OnEnabled
- public void onEnabled(final ConfigurationContext context) {
- if (context.getProperty(METASTORE_URI).isSet()) {
- catalogProperties.put(IcebergCatalogProperty.METASTORE_URI, context.getProperty(METASTORE_URI).evaluateAttributeExpressions().getValue());
- }
-
- if (context.getProperty(WAREHOUSE_LOCATION).isSet()) {
- catalogProperties.put(IcebergCatalogProperty.WAREHOUSE_LOCATION, context.getProperty(WAREHOUSE_LOCATION).evaluateAttributeExpressions().getValue());
- }
-
- if (context.getProperty(HADOOP_CONFIGURATION_RESOURCES).isSet()) {
- configFilePaths = createFilePathList(context.getProperty(HADOOP_CONFIGURATION_RESOURCES).evaluateAttributeExpressions().getValue());
- }
- }
-
- @Override
- public IcebergCatalogType getCatalogType() {
- return IcebergCatalogType.HIVE;
- }
-}
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
index 44a8e957cd34..a0ba41732b50 100755
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-services/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
@@ -13,6 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-org.apache.nifi.services.iceberg.HiveCatalogService
org.apache.nifi.services.iceberg.HadoopCatalogService
org.apache.nifi.services.iceberg.JdbcCatalogService
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/pom.xml b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/pom.xml
deleted file mode 100644
index b3458a7a16b6..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/pom.xml
+++ /dev/null
@@ -1,173 +0,0 @@
-
-
-
-
- nifi-iceberg-bundle
- org.apache.nifi
- 2.0.0-SNAPSHOT
-
- 4.0.0
-
- nifi-iceberg-test-utils
- jar
-
-
-
- org.apache.hive
- hive-metastore
- ${hive.version}
-
-
- log4j
- log4j
-
-
- org.slf4j
- slf4j-log4j12
-
-
- commons-logging
- commons-logging
-
-
- org.apache.logging.log4j
- log4j-core
-
-
- org.apache.logging.log4j
- log4j-web
-
-
- org.apache.logging.log4j
- log4j-1.2-api
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
-
-
- org.apache.orc
- orc-core
-
-
- org.apache.hbase
- hbase-client
-
-
- co.cask.tephra
- tephra-api
-
-
- co.cask.tephra
- tephra-core
-
-
- co.cask.tephra
- tephra-hbase-compat-1.0
-
-
- org.apache.parquet
- parquet-hadoop-bundle
-
-
- com.tdunning
- json
-
-
- com.zaxxer
- HikariCP
-
-
- com.google.guava
- guava
-
-
- org.apache.groovy
- groovy-all
-
-
- org.apache.ivy
- ivy
-
-
- org.apache.hadoop
- hadoop-yarn-server-resourcemanager
-
-
- org.eclipse.jetty
- jetty-server
-
-
- org.eclipse.jetty
- jetty-servlet
-
-
- org.eclipse.jetty
- jetty-webapp
-
-
- org.eclipse.jetty
- jetty-util
-
-
- org.eclipse.jetty
- jetty-http
-
-
- org.eclipse.jetty
- jetty-rewrite
-
-
- org.apache.hive
- hive-shims
-
-
- com.jolbox
- bonecp
-
-
- commons-cli
- commons-cli
-
-
- com.google.protobuf
- protobuf-java
-
-
-
-
- org.apache.hadoop
- hadoop-common
-
-
-
- org.apache.derby
- derbytools
- ${derby.version}
-
-
- org.apache.derby
- derby
- ${derby.version}
-
-
- org.junit.jupiter
- junit-jupiter-api
- compile
-
-
-
-
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/MetastoreCore.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/MetastoreCore.java
deleted file mode 100644
index 3d2908b3d1f7..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/MetastoreCore.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
-
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.hive.metastore;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.derby.jdbc.EmbeddedDriver;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.IHMSHandler;
-import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
-import org.apache.hadoop.hive.metastore.TServerSocketKeepAlive;
-import org.apache.hadoop.hive.metastore.TSetIpAddressProcessor;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.server.TServer;
-import org.apache.thrift.server.TThreadPoolServer;
-import org.apache.thrift.transport.TServerSocket;
-import org.apache.thrift.transport.TTransportException;
-import org.apache.thrift.transport.TTransportFactory;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.lang.reflect.InvocationTargetException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-import static java.nio.file.Files.createTempDirectory;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.AUTO_CREATE_ALL;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_WORKER_THREADS;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CONNECTION_DRIVER;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CONNECT_URL_KEY;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_TXN_MANAGER;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HMS_HANDLER_FORCE_RELOAD_CONF;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.SCHEMA_VERIFICATION;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.THRIFT_URIS;
-import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.WAREHOUSE;
-
-/**
- * This class wraps Metastore service core functionalities.
- */
-class MetastoreCore {
-
- private final String DATABASE_NAME = "iceberg_test";
-
- private String thriftConnectionUri;
- private Configuration hiveConf;
- private HiveMetaStoreClient metaStoreClient;
- private File tempDir;
- private ExecutorService thriftServer;
- private TServer server;
-
- public void initialize(Map configOverrides) throws IOException, TException, InvocationTargetException, NoSuchMethodException,
- IllegalAccessException, NoSuchFieldException, SQLException {
- thriftServer = Executors.newSingleThreadExecutor();
- tempDir = createTempDirectory("metastore").toFile();
- setDerbyLogPath();
- setupDB("jdbc:derby:" + getDerbyPath() + ";create=true");
-
- server = thriftServer(configOverrides);
- thriftServer.submit(() -> server.serve());
-
- metaStoreClient = new HiveMetaStoreClient(hiveConf);
- metaStoreClient.createDatabase(new Database(DATABASE_NAME, "description", getDBPath(), new HashMap<>()));
- }
-
- public void shutdown() {
- metaStoreClient.close();
-
- if (server != null) {
- server.stop();
- }
-
- thriftServer.shutdown();
-
- FileUtils.deleteQuietly(tempDir);
- }
-
- private HiveConf hiveConf(int port, Map configOverrides) throws IOException {
- thriftConnectionUri = "thrift://localhost:" + port;
-
- final HiveConf hiveConf = new HiveConf(new Configuration(), this.getClass());
- hiveConf.set(THRIFT_URIS.getVarname(), thriftConnectionUri);
- hiveConf.set(WAREHOUSE.getVarname(), "file:" + tempDir.getAbsolutePath());
- hiveConf.set(WAREHOUSE.getHiveName(), "file:" + tempDir.getAbsolutePath());
- hiveConf.set(CONNECTION_DRIVER.getVarname(), EmbeddedDriver.class.getName());
- hiveConf.set(CONNECT_URL_KEY.getVarname(), "jdbc:derby:" + getDerbyPath() + ";create=true");
- hiveConf.set(AUTO_CREATE_ALL.getVarname(), "false");
- hiveConf.set(SCHEMA_VERIFICATION.getVarname(), "false");
- hiveConf.set(HIVE_TXN_MANAGER.getVarname(), "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
- hiveConf.set(COMPACTOR_INITIATOR_ON.getVarname(), "true");
- hiveConf.set(COMPACTOR_WORKER_THREADS.getVarname(), "1");
- hiveConf.set(HIVE_SUPPORT_CONCURRENCY.getVarname(), "true");
- hiveConf.setBoolean("hcatalog.hive.client.cache.disabled", true);
-
- hiveConf.set(CONNECTION_POOLING_TYPE.getVarname(), "NONE");
- hiveConf.set(HMS_HANDLER_FORCE_RELOAD_CONF.getVarname(), "true");
-
- configOverrides.forEach(hiveConf::set);
-
- writeHiveConfFile(hiveConf);
- return hiveConf;
- }
-
- private void setDerbyLogPath() throws IOException {
- final String derbyLog = Files.createTempFile(tempDir.toPath(), "derby", ".log").toString();
- System.setProperty("derby.stream.error.file", derbyLog);
- }
-
- private String getDerbyPath() {
- return new File(tempDir, "metastore_db").getPath();
- }
-
- private TServer thriftServer(Map configOverrides) throws TTransportException, MetaException, InvocationTargetException,
- NoSuchMethodException, IllegalAccessException, NoSuchFieldException, IOException {
- final TServerSocketKeepAlive socket = new TServerSocketKeepAlive(new TServerSocket(0));
- hiveConf = hiveConf(socket.getServerSocket().getLocalPort(), configOverrides);
- final HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", hiveConf);
- final IHMSHandler handler = RetryingHMSHandler.getProxy(hiveConf, baseHandler, true);
- final TTransportFactory transportFactory = new TTransportFactory();
- final TSetIpAddressProcessor processor = new TSetIpAddressProcessor<>(handler);
-
- TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket)
- .processor(processor)
- .transportFactory(transportFactory)
- .protocolFactory(new TBinaryProtocol.Factory())
- .minWorkerThreads(3)
- .maxWorkerThreads(5);
-
- return new TThreadPoolServer(args);
- }
-
- private void setupDB(String dbURL) throws SQLException, IOException {
- final Connection connection = DriverManager.getConnection(dbURL);
- final ScriptRunner scriptRunner = new ScriptRunner(connection);
-
- final InputStream inputStream = getClass().getClassLoader().getResourceAsStream("hive-schema-4.0.0-alpha-2.derby.sql");
- final Reader reader = new BufferedReader(new InputStreamReader(inputStream));
- scriptRunner.runScript(reader);
- }
-
- private String getDBPath() {
- return Paths.get(tempDir.getAbsolutePath(), DATABASE_NAME + ".db").toAbsolutePath().toString();
- }
-
- private void writeHiveConfFile(HiveConf hiveConf) throws IOException {
- File file = new File(tempDir.toPath() + "/hive-site.xml");
- hiveConf.writeXml(Files.newOutputStream(file.toPath()));
- }
-
- public String getThriftConnectionUri() {
- return thriftConnectionUri;
- }
-
- public String getWarehouseLocation() {
- return tempDir.getAbsolutePath();
- }
-
- public HiveMetaStoreClient getMetaStoreClient() {
- return metaStoreClient;
- }
-
- public Configuration getConfiguration() {
- return hiveConf;
- }
-
- public String getConfigurationLocation() {
- return tempDir.toPath() + "/hive-site.xml";
- }
-
-}
-
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/ScriptRunner.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/ScriptRunner.java
deleted file mode 100644
index d3666fdbe4ff..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/ScriptRunner.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
-
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.hive.metastore;
-
-import java.io.IOException;
-import java.io.LineNumberReader;
-import java.io.Reader;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-/** This class is responsible for metastore init script processing and execution. */
-public class ScriptRunner {
-
- private static final String DEFAULT_DELIMITER = ";";
-
- private final Connection connection;
-
- public ScriptRunner(Connection connection) throws SQLException {
- this.connection = connection;
- if (!this.connection.getAutoCommit()) {
- // May throw SQLFeatureNotSupportedException which is a subclass of SQLException
- this.connection.setAutoCommit(true);
- }
- }
-
- public void runScript(Reader reader) throws IOException, SQLException {
- try {
- StringBuilder command = new StringBuilder();
- LineNumberReader lineReader = new LineNumberReader(reader);
- String line;
- while ((line = lineReader.readLine()) != null) {
- String trimmedLine = line.trim();
- if (trimmedLine.isEmpty() || trimmedLine.startsWith("--") || trimmedLine.startsWith("//")) {
- continue; //Skip comment line
- } else if (trimmedLine.endsWith(getDelimiter())) {
- command.append(line, 0, line.lastIndexOf(getDelimiter()));
- command.append(" ");
- Statement statement = connection.createStatement();
-
- statement.execute(command.toString());
- connection.commit();
-
- command = new StringBuilder();
-
- statement.close();
- } else {
- command.append(line);
- command.append(" ");
- }
- }
- } catch (IOException | SQLException e) {
- throw e;
- } catch (Exception e) {
- throw new RuntimeException("Error running metastore init script.", e);
- } finally {
- connection.rollback();
- }
- }
-
- private String getDelimiter() {
- return DEFAULT_DELIMITER;
- }
-}
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/ThriftMetastore.java b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/ThriftMetastore.java
deleted file mode 100644
index 2b3dc4da5c41..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/java/org/apache/nifi/hive/metastore/ThriftMetastore.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
-
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.junit.jupiter.api.extension.AfterAllCallback;
-import org.junit.jupiter.api.extension.BeforeAllCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/** A JUnit Extension that creates a Hive Metastore Thrift service backed by a Hive Metastore using an in-memory Derby database. */
-public class ThriftMetastore implements BeforeAllCallback, AfterAllCallback {
-
- private final MetastoreCore metastoreCore;
-
- private Map configOverrides = new HashMap<>();
-
- public ThriftMetastore() {
- metastoreCore = new MetastoreCore();
- }
-
- public ThriftMetastore withConfigOverrides(Map configs) {
- configOverrides = configs;
- return this;
- }
-
- @Override
- public void beforeAll(ExtensionContext context) throws Exception {
- metastoreCore.initialize(configOverrides);
- }
-
- @Override
- public void afterAll(ExtensionContext context) {
- metastoreCore.shutdown();
- }
-
- public String getThriftConnectionUri() {
- return metastoreCore.getThriftConnectionUri();
- }
-
- public String getWarehouseLocation() {
- return metastoreCore.getWarehouseLocation();
- }
-
- public HiveMetaStoreClient getMetaStoreClient() {
- return metastoreCore.getMetaStoreClient();
- }
-
- public Configuration getConfiguration() {
- return metastoreCore.getConfiguration();
- }
-
- public String getConfigurationLocation() {
- return metastoreCore.getConfigurationLocation();
- }
-
-}
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/resources/hive-schema-4.0.0-alpha-2.derby.sql b/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/resources/hive-schema-4.0.0-alpha-2.derby.sql
deleted file mode 100644
index c1cc235a6f67..000000000000
--- a/nifi-extension-bundles/nifi-iceberg-bundle/nifi-iceberg-test-utils/src/main/resources/hive-schema-4.0.0-alpha-2.derby.sql
+++ /dev/null
@@ -1,791 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements. See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License. You may obtain a copy of the License at
--- http://www.apache.org/licenses/LICENSE-2.0
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
--- Timestamp: 2011-09-22 15:32:02.024
--- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
--- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
--- Specified schema is: APP
--- appendLogs: false
-
--- ----------------------------------------------
--- DDL Statements for functions
--- ----------------------------------------------
-
-CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
-
-CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
-
--- ----------------------------------------------
--- DDL Statements for tables
--- ----------------------------------------------
-CREATE TABLE "APP"."DBS" (
- "DB_ID" BIGINT NOT NULL,
- "DESC" VARCHAR(4000),
- "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
- "NAME" VARCHAR(128),
- "OWNER_NAME" VARCHAR(128),
- "OWNER_TYPE" VARCHAR(10),
- "CTLG_NAME" VARCHAR(256) NOT NULL DEFAULT 'hive',
- "CREATE_TIME" INTEGER,
- "DB_MANAGED_LOCATION_URI" VARCHAR(4000),
- "TYPE" VARCHAR(32) DEFAULT 'NATIVE' NOT NULL,
- "DATACONNECTOR_NAME" VARCHAR(128),
- "REMOTE_DBNAME" VARCHAR(128)
-);
-
-CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."DC_PRIVS" ("DC_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "NAME" VARCHAR(128), "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DC_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
-
-CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0);
-
-CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
-
-CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
-
-CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
-
-CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."PARTITION_EVENTS" (
- "PART_NAME_ID" BIGINT NOT NULL,
- "CAT_NAME" VARCHAR(256),
- "DB_NAME" VARCHAR(128),
- "EVENT_TIME" BIGINT NOT NULL,
- "EVENT_TYPE" INTEGER NOT NULL,
- "PARTITION_NAME" VARCHAR(767),
- "TBL_NAME" VARCHAR(256)
-);
-
-CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "WRITE_ID" BIGINT DEFAULT 0);
-
-CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
-
-CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
-
-CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."TAB_COL_STATS"(
- "CAT_NAME" VARCHAR(256) NOT NULL,
- "DB_NAME" VARCHAR(128) NOT NULL,
- "TABLE_NAME" VARCHAR(256) NOT NULL,
- "COLUMN_NAME" VARCHAR(767) NOT NULL,
- "COLUMN_TYPE" VARCHAR(128) NOT NULL,
- "LONG_LOW_VALUE" BIGINT,
- "LONG_HIGH_VALUE" BIGINT,
- "DOUBLE_LOW_VALUE" DOUBLE,
- "DOUBLE_HIGH_VALUE" DOUBLE,
- "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
- "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
- "NUM_DISTINCTS" BIGINT,
- "NUM_NULLS" BIGINT NOT NULL,
- "AVG_COL_LEN" DOUBLE,
- "MAX_COL_LEN" BIGINT,
- "NUM_TRUES" BIGINT,
- "NUM_FALSES" BIGINT,
- "LAST_ANALYZED" BIGINT,
- "CS_ID" BIGINT NOT NULL,
- "TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB,
- "ENGINE" VARCHAR(128) NOT NULL
-);
-
-CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
-
-CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
-
-CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
-
-CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
-
-CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
-
-CREATE TABLE "APP"."PART_COL_STATS"(
- "CAT_NAME" VARCHAR(256) NOT NULL,
- "DB_NAME" VARCHAR(128) NOT NULL,
- "TABLE_NAME" VARCHAR(256) NOT NULL,
- "PARTITION_NAME" VARCHAR(767) NOT NULL,
- "COLUMN_NAME" VARCHAR(767) NOT NULL,
- "COLUMN_TYPE" VARCHAR(128) NOT NULL,
- "LONG_LOW_VALUE" BIGINT,
- "LONG_HIGH_VALUE" BIGINT,
- "DOUBLE_LOW_VALUE" DOUBLE,
- "DOUBLE_HIGH_VALUE" DOUBLE,
- "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
- "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
- "NUM_DISTINCTS" BIGINT,
- "BIT_VECTOR" BLOB,
- "NUM_NULLS" BIGINT NOT NULL,
- "AVG_COL_LEN" DOUBLE,
- "MAX_COL_LEN" BIGINT,
- "NUM_TRUES" BIGINT,
- "NUM_FALSES" BIGINT,
- "LAST_ANALYZED" BIGINT,
- "CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL,
- "ENGINE" VARCHAR(128) NOT NULL
-);
-
-CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
-
-CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
-
-CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
-
-CREATE TABLE "APP"."NOTIFICATION_LOG" (
- "NL_ID" BIGINT NOT NULL,
- "CAT_NAME" VARCHAR(256),
- "DB_NAME" VARCHAR(128),
- "EVENT_ID" BIGINT NOT NULL,
- "EVENT_TIME" INTEGER NOT NULL,
- "EVENT_TYPE" VARCHAR(32) NOT NULL,
- "MESSAGE" CLOB,
- "TBL_NAME" VARCHAR(256),
- "MESSAGE_FORMAT" VARCHAR(16)
-);
-
-CREATE UNIQUE INDEX "APP"."NOTIFICATION_LOG_EVENT_ID" ON "APP"."NOTIFICATION_LOG" ("EVENT_ID");
-
-CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
-
-CREATE TABLE "APP"."KEY_CONSTRAINTS" (
- "CHILD_CD_ID" BIGINT,
- "CHILD_INTEGER_IDX" INTEGER,
- "CHILD_TBL_ID" BIGINT,
- "PARENT_CD_ID" BIGINT,
- "PARENT_INTEGER_IDX" INTEGER,
- "PARENT_TBL_ID" BIGINT NOT NULL,
- "POSITION" BIGINT NOT NULL,
- "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
- "CONSTRAINT_TYPE" SMALLINT NOT NULL,
- "UPDATE_RULE" SMALLINT,
- "DELETE_RULE" SMALLINT,
- "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
- "DEFAULT_VALUE" VARCHAR(400)
-);
-
-CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
-
-CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NS VARCHAR(128), NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
-
-CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
-
-CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0);
-
-CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL);
-
-CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER);
-
-CREATE TABLE "APP"."MV_CREATION_METADATA" (
- "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
- "CAT_NAME" VARCHAR(256) NOT NULL,
- "DB_NAME" VARCHAR(128) NOT NULL,
- "TBL_NAME" VARCHAR(256) NOT NULL,
- "TXN_LIST" CLOB,
- "MATERIALIZATION_TIME" BIGINT NOT NULL
-);
-
-CREATE TABLE "APP"."MV_TABLES_USED" (
- "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
- "TBL_ID" BIGINT NOT NULL,
- "INSERTED_COUNT" BIGINT NOT NULL DEFAULT 0,
- "UPDATED_COUNT" BIGINT NOT NULL DEFAULT 0,
- "DELETED_COUNT" BIGINT NOT NULL DEFAULT 0
-);
-
-CREATE TABLE "APP"."CTLGS" (
- "CTLG_ID" BIGINT NOT NULL,
- "NAME" VARCHAR(256) UNIQUE,
- "DESC" VARCHAR(4000),
- "LOCATION_URI" VARCHAR(4000) NOT NULL,
- "CREATE_TIME" INTEGER);
-
--- Insert a default value. The location is TBD. Hive will fix this when it starts
-INSERT INTO "APP"."CTLGS" ("CTLG_ID", "NAME", "DESC", "LOCATION_URI", "CREATE_TIME")
-VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL);
-
--- ----------------------------------------------
--- DML Statements
--- ----------------------------------------------
-
-INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE");
-
-INSERT INTO "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_VAL" FROM "APP"."SEQUENCE_TABLE" WHERE "SEQUENCE_NAME" = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
-
--- ----------------------------------------------
--- DDL Statements for indexes
--- ----------------------------------------------
-
-CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
-
-CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."DCPRIVILEGEINDEX" ON "APP"."DC_PRIVS" ("AUTHORIZER", "NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DC_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
-
-CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("CAT_NAME", "DB_NAME", "TABLE_NAME", "COLUMN_NAME");
-
-CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
-
-CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME");
-
-CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
-
-CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
-
-CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
-
-CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
-
-CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
-
-CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
-
-CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NS", "NAME");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
-
-CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME");
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME");
-
-
--- ----------------------------------------------
--- DDL Statements for keys
--- ----------------------------------------------
-
--- primary/unique
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
-
-ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
-
-ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
-
-ALTER TABLE "APP"."DC_PRIVS" ADD CONSTRAINT "DC_PRIVS_PK" PRIMARY KEY ("DC_GRANT_ID");
-
-ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
-
-ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
-
-ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
-
-ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
-
-ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
-
-ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
-
-ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
-
-ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
-
-ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
-
-ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
-
-ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
-
-ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
-
-ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
-
-ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
-
-ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
-
-ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
-
-ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
-
-ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
-
-ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "ONE_ROW_CONSTRAINT" CHECK (NNI_ID = 1);
-
-ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("PARENT_TBL_ID", "CONSTRAINT_NAME", "POSITION");
-
-ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
-
-ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
-
-ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
-
-
--- foreign
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
-
-ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
-
-ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
-
-ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
-
-ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
-
-ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_PK" PRIMARY KEY ("TBL_ID", "MV_CREATION_METADATA_ID");
-
-ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
-ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
--- ----------------------------------------------
--- DDL Statements for checks
--- ----------------------------------------------
-
-ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
-
-ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
-
--- ----------------------------
--- Transaction and Lock Tables
--- ----------------------------
-CREATE TABLE TXNS (
- TXN_ID bigint PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY,
- TXN_STATE char(1) NOT NULL,
- TXN_STARTED bigint NOT NULL,
- TXN_LAST_HEARTBEAT bigint NOT NULL,
- TXN_USER varchar(128) NOT NULL,
- TXN_HOST varchar(128) NOT NULL,
- TXN_AGENT_INFO varchar(128),
- TXN_META_INFO varchar(128),
- TXN_HEARTBEAT_COUNT integer,
- TXN_TYPE integer
-);
-
-INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, TXN_HOST)
- VALUES(0, 'c', 0, 0, '', '');
-
-CREATE TABLE TXN_COMPONENTS (
- TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
- TC_DATABASE varchar(128) NOT NULL,
- TC_TABLE varchar(256),
- TC_PARTITION varchar(767),
- TC_OPERATION_TYPE char(1) NOT NULL,
- TC_WRITEID bigint
-);
-
-CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
- CTC_TXNID bigint NOT NULL,
- CTC_DATABASE varchar(128) NOT NULL,
- CTC_TABLE varchar(256),
- CTC_PARTITION varchar(767),
- CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
- CTC_WRITEID bigint,
- CTC_UPDATE_DELETE char(1) NOT NULL
-);
-
-CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
-
-CREATE TABLE TXN_LOCK_TBL (
- TXN_LOCK bigint NOT NULL
-);
-INSERT INTO TXN_LOCK_TBL VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
- HL_LOCK_EXT_ID bigint NOT NULL,
- HL_LOCK_INT_ID bigint NOT NULL,
- HL_TXNID bigint NOT NULL,
- HL_DB varchar(128) NOT NULL,
- HL_TABLE varchar(256),
- HL_PARTITION varchar(767),
- HL_LOCK_STATE char(1) NOT NULL,
- HL_LOCK_TYPE char(1) NOT NULL,
- HL_LAST_HEARTBEAT bigint NOT NULL,
- HL_ACQUIRED_AT bigint,
- HL_USER varchar(128) NOT NULL,
- HL_HOST varchar(128) NOT NULL,
- HL_HEARTBEAT_COUNT integer,
- HL_AGENT_INFO varchar(128),
- HL_BLOCKEDBY_EXT_ID bigint,
- HL_BLOCKEDBY_INT_ID bigint,
- PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-);
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
- NL_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
- CQ_ID bigint PRIMARY KEY,
- CQ_DATABASE varchar(128) NOT NULL,
- CQ_TABLE varchar(256) NOT NULL,
- CQ_PARTITION varchar(767),
- CQ_STATE char(1) NOT NULL,
- CQ_TYPE char(1) NOT NULL,
- CQ_TBLPROPERTIES varchar(2048),
- CQ_WORKER_ID varchar(128),
- CQ_ENQUEUE_TIME bigint,
- CQ_START bigint,
- CQ_RUN_AS varchar(128),
- CQ_HIGHEST_WRITE_ID bigint,
- CQ_META_INFO varchar(2048) for bit data,
- CQ_HADOOP_JOB_ID varchar(32),
- CQ_ERROR_MESSAGE clob,
- CQ_NEXT_TXN_ID bigint,
- CQ_TXN_ID bigint,
- CQ_COMMIT_TIME bigint,
- CQ_INITIATOR_ID varchar(128),
- CQ_INITIATOR_VERSION varchar(128),
- CQ_WORKER_VERSION varchar(128),
- CQ_CLEANER_START bigint,
- CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
- NCQ_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-CREATE TABLE COMPLETED_COMPACTIONS (
- CC_ID bigint PRIMARY KEY,
- CC_DATABASE varchar(128) NOT NULL,
- CC_TABLE varchar(256) NOT NULL,
- CC_PARTITION varchar(767),
- CC_STATE char(1) NOT NULL,
- CC_TYPE char(1) NOT NULL,
- CC_TBLPROPERTIES varchar(2048),
- CC_WORKER_ID varchar(128),
- CC_ENQUEUE_TIME bigint,
- CC_START bigint,
- CC_END bigint,
- CC_RUN_AS varchar(128),
- CC_HIGHEST_WRITE_ID bigint,
- CC_META_INFO varchar(2048) for bit data,
- CC_HADOOP_JOB_ID varchar(32),
- CC_ERROR_MESSAGE clob,
- CC_NEXT_TXN_ID bigint,
- CC_TXN_ID bigint,
- CC_COMMIT_TIME bigint,
- CC_INITIATOR_ID varchar(128),
- CC_INITIATOR_VERSION varchar(128),
- CC_WORKER_VERSION varchar(128)
-);
-
-CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION);
-
--- HIVE-25842
-CREATE TABLE COMPACTION_METRICS_CACHE (
- CMC_DATABASE varchar(128) NOT NULL,
- CMC_TABLE varchar(256) NOT NULL,
- CMC_PARTITION varchar(767),
- CMC_METRIC_TYPE varchar(128) NOT NULL,
- CMC_METRIC_VALUE integer NOT NULL,
- CMC_VERSION integer NOT NULL
-);
-
-CREATE TABLE AUX_TABLE (
- MT_KEY1 varchar(128) NOT NULL,
- MT_KEY2 bigint NOT NULL,
- MT_COMMENT varchar(255),
- PRIMARY KEY(MT_KEY1, MT_KEY2)
-);
-
---1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
---This is a good candidate for Index orgainzed table
-CREATE TABLE WRITE_SET (
- WS_DATABASE varchar(128) NOT NULL,
- WS_TABLE varchar(256) NOT NULL,
- WS_PARTITION varchar(767),
- WS_TXNID bigint NOT NULL,
- WS_COMMIT_ID bigint NOT NULL,
- WS_OPERATION_TYPE char(1) NOT NULL
-);
-
-CREATE TABLE TXN_TO_WRITE_ID (
- T2W_TXNID bigint NOT NULL,
- T2W_DATABASE varchar(128) NOT NULL,
- T2W_TABLE varchar(256) NOT NULL,
- T2W_WRITEID bigint NOT NULL
-);
-
-CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
-CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
-
-CREATE TABLE NEXT_WRITE_ID (
- NWI_DATABASE varchar(128) NOT NULL,
- NWI_TABLE varchar(256) NOT NULL,
- NWI_NEXT bigint NOT NULL
-);
-
-CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
-
-CREATE TABLE MIN_HISTORY_LEVEL (
- MHL_TXNID bigint NOT NULL,
- MHL_MIN_OPEN_TXNID bigint NOT NULL,
- PRIMARY KEY(MHL_TXNID)
-);
-
-CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
-
-CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
- MRL_TXN_ID BIGINT NOT NULL,
- MRL_DB_NAME VARCHAR(128) NOT NULL,
- MRL_TBL_NAME VARCHAR(256) NOT NULL,
- MRL_LAST_HEARTBEAT BIGINT NOT NULL,
- PRIMARY KEY(MRL_TXN_ID)
-);
-
-CREATE TABLE "APP"."I_SCHEMA" (
- "SCHEMA_ID" bigint primary key,
- "SCHEMA_TYPE" integer not null,
- "NAME" varchar(256) unique,
- "DB_ID" bigint references "APP"."DBS" ("DB_ID"),
- "COMPATIBILITY" integer not null,
- "VALIDATION_LEVEL" integer not null,
- "CAN_EVOLVE" char(1) not null,
- "SCHEMA_GROUP" varchar(256),
- "DESCRIPTION" varchar(4000)
-);
-
-CREATE TABLE "APP"."SCHEMA_VERSION" (
- "SCHEMA_VERSION_ID" bigint primary key,
- "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"),
- "VERSION" integer not null,
- "CREATED_AT" bigint not null,
- "CD_ID" bigint references "APP"."CDS" ("CD_ID"),
- "STATE" integer not null,
- "DESCRIPTION" varchar(4000),
- "SCHEMA_TEXT" clob,
- "FINGERPRINT" varchar(256),
- "SCHEMA_VERSION_NAME" varchar(256),
- "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID")
-);
-
-CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION");
-
-CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
- WNL_ID bigint NOT NULL,
- WNL_TXNID bigint NOT NULL,
- WNL_WRITEID bigint NOT NULL,
- WNL_DATABASE varchar(128) NOT NULL,
- WNL_TABLE varchar(256) NOT NULL,
- WNL_PARTITION varchar(767) NOT NULL,
- WNL_TABLE_OBJ clob NOT NULL,
- WNL_PARTITION_OBJ clob,
- WNL_FILES clob,
- WNL_EVENT_TIME integer NOT NULL,
- PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
-);
-INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0-alpha-2', 'Hive release version 4.0.0-alpha-2');
diff --git a/nifi-extension-bundles/nifi-iceberg-bundle/pom.xml b/nifi-extension-bundles/nifi-iceberg-bundle/pom.xml
index db24c405640b..f8ddc4ded25a 100644
--- a/nifi-extension-bundles/nifi-iceberg-bundle/pom.xml
+++ b/nifi-extension-bundles/nifi-iceberg-bundle/pom.xml
@@ -27,7 +27,6 @@
1.6.1
- 3.1.3
@@ -38,7 +37,6 @@
nifi-iceberg-processors
nifi-iceberg-processors-nar
nifi-iceberg-common
- nifi-iceberg-test-utils
@@ -162,12 +160,6 @@
guava
33.3.1-jre
-
-
- org.codehaus.groovy
- groovy-all
- 2.4.21
-