Skip to content

Commit 81c02a7

Browse files
authored
HIVE-28972: HMS performace degradation post HIVE-28909 for alter query (#5835)
1 parent de994e5 commit 81c02a7

File tree

4 files changed

+11
-17
lines changed

4 files changed

+11
-17
lines changed

standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,9 @@ public List<Void> run(List<Partition> input) throws Exception {
407407
MetastoreConf.getBoolVar(handler.getConf(), MetastoreConf.ConfVars.COLSTATS_RETAIN_ON_COLUMN_REMOVAL);
408408

409409
if (runPartitionMetadataUpdate) {
410+
// Don't validate table-level stats for a partitoned table.
411+
msdb.alterTable(catName, dbname, name, newt, null);
412+
410413
if (cascade || retainOnColRemoval) {
411414
parts = msdb.getPartitions(catName, dbname, name, -1);
412415
for (Partition part : parts) {
@@ -431,8 +434,6 @@ public List<Void> run(List<Partition> input) throws Exception {
431434
TableName tableName = new TableName(catName, dbname, name);
432435
msdb.deleteAllPartitionColumnStatistics(tableName, writeIdList);
433436
}
434-
// Don't validate table-level stats for a partitoned table.
435-
msdb.alterTable(catName, dbname, name, newt, null);
436437
} else {
437438
LOG.warn("Alter table not cascaded to partitions.");
438439
msdb.alterTable(catName, dbname, name, newt, writeIdList);

standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2715,7 +2715,7 @@ private void addPartitionsInternal(String catName, String dbName,
27152715
throw new MetaException("Partition does not belong to target table "
27162716
+ dbName + "." + tblName + ": " + part);
27172717
}
2718-
MPartition mpart = convertToMPart(part, table, true);
2718+
MPartition mpart = convertToMPart(part, table);
27192719
mParts.add(mpart);
27202720
int now = (int) (System.currentTimeMillis() / 1000);
27212721
List<MPartitionPrivilege> mPartPrivileges = new ArrayList<>();
@@ -2817,7 +2817,7 @@ public boolean addPartitions(String catName, String dbName, String tblName,
28172817
Partition part = iterator.next();
28182818

28192819
if (isValidPartition(part, partitionKeys, ifNotExists)) {
2820-
MPartition mpart = convertToMPart(part, table, true);
2820+
MPartition mpart = convertToMPart(part, table);
28212821
pm.makePersistent(mpart);
28222822
if (tabGrants != null) {
28232823
for (MTablePrivilege tab : tabGrants) {
@@ -3013,10 +3013,9 @@ private MPartition getMPartition(String catName, String dbName, String tableName
30133013
* to the same one as the table's storage descriptor.
30143014
* @param part the partition to convert
30153015
* @param mt the parent table object
3016-
* @param useTableCD whether to try to use the parent table's column descriptor.
30173016
* @return the model partition object, and null if the input partition is null.
30183017
*/
3019-
private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD)
3018+
private MPartition convertToMPart(Partition part, MTable mt)
30203019
throws InvalidObjectException, MetaException {
30213020
// NOTE: we don't set writeId in this method. Write ID is only set after validating the
30223021
// existing write ID against the caller's valid list.
@@ -3032,8 +3031,7 @@ private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD)
30323031
// use the parent table's, so we do not create a duplicate column descriptor,
30333032
// thereby saving space
30343033
MStorageDescriptor msd;
3035-
if (useTableCD &&
3036-
mt.getSd() != null && mt.getSd().getCD() != null &&
3034+
if (mt.getSd() != null && mt.getSd().getCD() != null &&
30373035
mt.getSd().getCD().getCols() != null &&
30383036
part.getSd() != null &&
30393037
convertToFieldSchemas(mt.getSd().getCD().getCols()).
@@ -5188,7 +5186,7 @@ private Partition alterPartitionNoTxn(String catName, String dbname,
51885186
catName = normalizeIdentifier(catName);
51895187
name = normalizeIdentifier(name);
51905188
dbname = normalizeIdentifier(dbname);
5191-
MPartition newp = convertToMPart(newPart, table, false);
5189+
MPartition newp = convertToMPart(newPart, table);
51925190
MColumnDescriptor oldCD = null;
51935191
MStorageDescriptor oldSD = oldp.getSd();
51945192
if (oldSD != null) {
@@ -5248,9 +5246,6 @@ public Partition alterPartition(String catName, String dbname, String name, List
52485246
Partition result = null;
52495247
try {
52505248
openTransaction();
5251-
if (newPart.isSetWriteId()) {
5252-
LOG.warn("Alter partitions with write ID called without transaction information");
5253-
}
52545249
Ref<MColumnDescriptor> oldCd = new Ref<>();
52555250
result = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart, validWriteIds, oldCd);
52565251
removeUnusedColumnDescriptor(oldCd.t);

standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MColumn.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,8 @@ public boolean equals(Object other) {
6262
public MColumn() {
6363
}
6464

65-
public MColumn(String name, String type, String comment) {
65+
public MColumn(MColumnDescriptor cd, String name, String type, String comment) {
66+
this.cd = cd;
6667
this.name = name;
6768
this.type = type;
6869
this.comment = comment;

standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MColumnDescriptor.java

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@
1616
* limitations under the License.
1717
*/
1818

19-
/**
20-
*
21-
*/
2219
package org.apache.hadoop.hive.metastore.model;
2320

2421
import javax.jdo.annotations.NotPersistent;
@@ -42,7 +39,7 @@ public MColumnDescriptor() {}
4239

4340
public MColumnDescriptor(List<MFieldSchema> cols) {
4441
fields = cols.stream().map(schema ->
45-
new MColumn(schema.getName(), schema.getType(), schema.getComment()))
42+
new MColumn(this, schema.getName(), schema.getType(), schema.getComment()))
4643
.collect(Collectors.toList());
4744
}
4845

0 commit comments

Comments
 (0)