Skip to content

Commit db2dd7b

Browse files
HIVE-28956 : Implement DirectSql for alter table add column cascade command.
1 parent b9449b3 commit db2dd7b

File tree

1 file changed

+32
-15
lines changed
  • standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore

1 file changed

+32
-15
lines changed

standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java

Lines changed: 32 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -409,23 +409,40 @@ public List<Void> run(List<Partition> input) throws Exception {
409409
if (runPartitionMetadataUpdate) {
410410
if (cascade || retainOnColRemoval) {
411411
parts = msdb.getPartitions(catName, dbname, name, -1);
412-
for (Partition part : parts) {
413-
Partition oldPart = new Partition(part);
414-
List<FieldSchema> oldCols = part.getSd().getCols();
415-
part.getSd().setCols(newt.getSd().getCols());
416-
List<ColumnStatistics> colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
417-
part.getValues(), oldCols, oldt, part, null, null);
418-
assert (colStats.isEmpty());
419-
Deadline.checkTimeout();
420-
if (cascade) {
421-
msdb.alterPartition(
422-
catName, dbname, name, part.getValues(), part, writeIdList);
423-
} else {
412+
String catalogName = catName;
413+
String databaseName = dbname;
414+
String tableName = name;
415+
Table finalOldt = oldt;
416+
int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(),
417+
MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
418+
Batchable.runBatched(partitionBatchSize, parts, new Batchable<Partition, Void>() {
419+
@Override
420+
public List<Void> run(List<Partition> input) throws Exception {
421+
Deadline.checkTimeout();
422+
List<Partition> oldParts = new ArrayList<>(input.size());
423+
List<List<String>> partVals = input.stream().map(Partition::getValues).collect(Collectors.toList());
424424
// update changed properties (stats)
425-
oldPart.setParameters(part.getParameters());
426-
msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, writeIdList);
425+
for (Partition part : input) {
426+
Partition oldPart = new Partition(part);
427+
List<FieldSchema> oldCols = part.getSd().getCols();
428+
part.getSd().setCols(newt.getSd().getCols());
429+
List<ColumnStatistics> colStats = updateOrGetPartitionColumnStats(msdb, catalogName, databaseName,
430+
tableName, part.getValues(), oldCols, finalOldt, part, null, null);
431+
assert (colStats.isEmpty());
432+
oldPart.setParameters(part.getParameters());
433+
oldParts.add(oldPart);
434+
}
435+
Deadline.checkTimeout();
436+
if (cascade) {
437+
msdb.alterPartitions(catalogName, databaseName, tableName, partVals, input, newt.getWriteId(),
438+
writeIdList);
439+
} else {
440+
msdb.alterPartitions(catalogName, newDbName, newTblName, partVals, oldParts, newt.getWriteId(),
441+
writeIdList);
442+
}
443+
return Collections.emptyList();
427444
}
428-
}
445+
});
429446
} else {
430447
// clear all column stats to prevent incorract behaviour in case same column is reintroduced
431448
TableName tableName = new TableName(catName, dbname, name);

0 commit comments

Comments
 (0)