Skip to content

Commit 2a5cc01

Browse files
HIVE-28956 : Implement DirectSql for alter table add column cascade command.
1 parent b9449b3 commit 2a5cc01

File tree

1 file changed

+33
-15
lines changed
  • standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore

1 file changed

+33
-15
lines changed

standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java

Lines changed: 33 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -409,23 +409,41 @@ public List<Void> run(List<Partition> input) throws Exception {
409409
if (runPartitionMetadataUpdate) {
410410
if (cascade || retainOnColRemoval) {
411411
parts = msdb.getPartitions(catName, dbname, name, -1);
412-
for (Partition part : parts) {
413-
Partition oldPart = new Partition(part);
414-
List<FieldSchema> oldCols = part.getSd().getCols();
415-
part.getSd().setCols(newt.getSd().getCols());
416-
List<ColumnStatistics> colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
417-
part.getValues(), oldCols, oldt, part, null, null);
418-
assert (colStats.isEmpty());
419-
Deadline.checkTimeout();
420-
if (cascade) {
421-
msdb.alterPartition(
422-
catName, dbname, name, part.getValues(), part, writeIdList);
423-
} else {
412+
String catalogName = catName;
413+
String databaseName = dbname;
414+
String tableName = name;
415+
Table finalOldt = oldt;
416+
int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(),
417+
MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
418+
Batchable.runBatched(partitionBatchSize, parts, new Batchable<Partition, Void>() {
419+
@Override
420+
public List<Void> run(List<Partition> input) throws Exception {
421+
List<Partition> oldParts = new ArrayList<>(input.size());
422+
List<List<String>> partVals = input.stream().map(Partition::getValues).collect(Collectors.toList());
424423
// update changed properties (stats)
425-
oldPart.setParameters(part.getParameters());
426-
msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, writeIdList);
424+
for (Partition part : input) {
425+
Partition oldPart = new Partition(part);
426+
List<FieldSchema> oldCols = part.getSd().getCols();
427+
part.getSd().setCols(newt.getSd().getCols());
428+
List<ColumnStatistics> colStats = updateOrGetPartitionColumnStats(msdb, catalogName, databaseName,
429+
tableName, part.getValues(), oldCols, finalOldt, part, null, null);
430+
assert (colStats.isEmpty());
431+
if (!cascade) {
432+
oldPart.setParameters(part.getParameters());
433+
oldParts.add(oldPart);
434+
}
435+
}
436+
Deadline.checkTimeout();
437+
if (cascade) {
438+
msdb.alterPartitions(catalogName, databaseName, tableName, partVals, input, newt.getWriteId(),
439+
writeIdList);
440+
} else {
441+
msdb.alterPartitions(catalogName, newDbName, newTblName, partVals, oldParts, newt.getWriteId(),
442+
writeIdList);
443+
}
444+
return Collections.emptyList();
427445
}
428-
}
446+
});
429447
} else {
430448
// clear all column stats to prevent incorract behaviour in case same column is reintroduced
431449
TableName tableName = new TableName(catName, dbname, name);

0 commit comments

Comments
 (0)