Skip to content

Commit 06f49e6

Browse files
committed
Merge branch 'master' into HBASE-29689
2 parents 9544acd + d41d5c1 commit 06f49e6

File tree

31 files changed

+831
-152
lines changed

31 files changed

+831
-152
lines changed

hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/RpcThrottlingException.java

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,4 +205,15 @@ protected static long timeFromString(String timeDiff) {
205205
}
206206
return -1;
207207
}
208+
209+
/**
210+
* There is little value in an RpcThrottlingException having a stack trace, since its cause is
211+
* well understood without one. When a RegionServer is under heavy load and needs to serve many
212+
* RpcThrottlingExceptions, skipping fillInStackTrace() will save CPU time and allocations, both
213+
* here and later when the exception must be serialized over the wire.
214+
*/
215+
@Override
216+
public synchronized Throwable fillInStackTrace() {
217+
return this;
218+
}
208219
}

hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -187,11 +187,39 @@ public static int getDefaultBufferSize(final FileSystem fs) {
187187
*/
188188
public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
189189
boolean overwrite) throws IOException {
190+
return create(fs, path, perm, overwrite, true);
191+
}
192+
193+
/**
194+
* Create the specified file on the filesystem. By default, this will:
195+
* <ol>
196+
* <li>apply the umask in the configuration (if it is enabled)</li>
197+
* <li>use the fs configured buffer size (or 4096 if not set)</li>
198+
* <li>use the default replication</li>
199+
* <li>use the default block size</li>
200+
* <li>not track progress</li>
201+
* </ol>
202+
* @param fs {@link FileSystem} on which to write the file
203+
* @param path {@link Path} to the file to write
204+
* @param perm intial permissions
205+
* @param overwrite Whether or not the created file should be overwritten.
206+
* @param isRecursiveCreate recursively create parent directories
207+
* @return output stream to the created file
208+
* @throws IOException if the file cannot be created
209+
*/
210+
public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
211+
boolean overwrite, boolean isRecursiveCreate) throws IOException {
190212
if (LOG.isTraceEnabled()) {
191-
LOG.trace("Creating file={} with permission={}, overwrite={}", path, perm, overwrite);
213+
LOG.trace("Creating file={} with permission={}, overwrite={}, recursive={}", path, perm,
214+
overwrite, isRecursiveCreate);
215+
}
216+
if (isRecursiveCreate) {
217+
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
218+
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
219+
} else {
220+
return fs.createNonRecursive(path, perm, overwrite, getDefaultBufferSize(fs),
221+
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
192222
}
193-
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
194-
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
195223
}
196224

197225
/**

hbase-it/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@
296296
<artifactId>maven-failsafe-plugin</artifactId>
297297
<configuration>
298298
<skip>false</skip>
299-
<forkMode>always</forkMode>
299+
<reuseForks>false</reuseForks>
300300
<!-- TODO: failsafe does timeout, but verify does not fail the build because of the timeout.
301301
I believe it is a failsafe bug, we may consider using surefire -->
302302
<forkedProcessTimeoutInSeconds>1800</forkedProcessTimeoutInSeconds>

hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
2424

2525
import java.io.IOException;
26+
import java.io.UncheckedIOException;
2627
import java.io.UnsupportedEncodingException;
2728
import java.net.InetSocketAddress;
2829
import java.net.URLDecoder;
@@ -590,7 +591,7 @@ private static void writePartitions(Configuration conf, Path partitionsPath,
590591
public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator)
591592
throws IOException {
592593
configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
593-
configureRemoteCluster(job, table.getConfiguration());
594+
configureForRemoteCluster(job, table.getConfiguration());
594595
}
595596

596597
/**
@@ -776,8 +777,34 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes
776777
* @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY
777778
* @see #REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY
778779
* @see #REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY
780+
* @deprecated As of release 2.6.4, this will be removed in HBase 4.0.0 Use
781+
* {@link #configureForRemoteCluster(Job, Configuration)} instead.
779782
*/
780-
public static void configureRemoteCluster(Job job, Configuration clusterConf) throws IOException {
783+
@Deprecated
784+
public static void configureRemoteCluster(Job job, Configuration clusterConf) {
785+
try {
786+
configureForRemoteCluster(job, clusterConf);
787+
} catch (IOException e) {
788+
LOG.error("Configure remote cluster error.", e);
789+
throw new UncheckedIOException("Configure remote cluster error.", e);
790+
}
791+
}
792+
793+
/**
794+
* Configure HBase cluster key for remote cluster to load region location for locality-sensitive
795+
* if it's enabled. It's not necessary to call this method explicitly when the cluster key for
796+
* HBase cluster to be used to load region location is configured in the job configuration. Call
797+
* this method when another HBase cluster key is configured in the job configuration. For example,
798+
* you should call when you load data from HBase cluster A using {@link TableInputFormat} and
799+
* generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster
800+
* A and locality-sensitive won't working correctly. If authentication is enabled, it obtains the
801+
* token for the specific cluster.
802+
* @param job which has configuration to be updated
803+
* @param clusterConf which contains cluster key of the HBase cluster to be locality-sensitive
804+
* @throws IOException Exception while initializing cluster credentials
805+
*/
806+
public static void configureForRemoteCluster(Job job, Configuration clusterConf)
807+
throws IOException {
781808
Configuration conf = job.getConfiguration();
782809

783810
if (!conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {

hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
import org.apache.hadoop.hbase.client.TestTableSnapshotScanner;
4747
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
4848
import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit;
49+
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
4950
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
5051
import org.apache.hadoop.hbase.testclassification.LargeTests;
5152
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
@@ -582,4 +583,104 @@ public void testCleanRestoreDir() throws Exception {
582583
TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName);
583584
Assert.assertFalse(fs.exists(restorePath));
584585
}
586+
587+
/**
588+
* Test that explicitly restores a snapshot to a temp directory and reads the restored regions via
589+
* ClientSideRegionScanner through a MapReduce job.
590+
* <p>
591+
* This test verifies the full workflow: 1. Create and load a table with data 2. Create a snapshot
592+
* and restore the snapshot to a temporary directory 3. Configure a job to read the restored
593+
* regions via ClientSideRegionScanner using TableSnapshotInputFormat and verify that it succeeds
594+
* 4. Delete restored temporary directory 5. Configure a new job and verify that it fails
595+
*/
596+
@Test
597+
public void testReadFromRestoredSnapshotViaMR() throws Exception {
598+
final TableName tableName = TableName.valueOf(name.getMethodName());
599+
final String snapshotName = tableName + "_snapshot";
600+
try {
601+
if (UTIL.getAdmin().tableExists(tableName)) {
602+
UTIL.deleteTable(tableName);
603+
}
604+
UTIL.createTable(tableName, FAMILIES, new byte[][] { bbb, yyy });
605+
606+
Admin admin = UTIL.getAdmin();
607+
int regionNum = admin.getRegions(tableName).size();
608+
LOG.info("Created table with {} regions", regionNum);
609+
610+
Table table = UTIL.getConnection().getTable(tableName);
611+
UTIL.loadTable(table, FAMILIES);
612+
table.close();
613+
614+
Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
615+
FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration());
616+
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES),
617+
null, snapshotName, rootDir, fs, true);
618+
Path tempRestoreDir = UTIL.getDataTestDirOnTestFS("restore_" + snapshotName);
619+
RestoreSnapshotHelper.copySnapshotForScanner(UTIL.getConfiguration(), fs, rootDir,
620+
tempRestoreDir, snapshotName);
621+
Assert.assertTrue("Restore directory should exist", fs.exists(tempRestoreDir));
622+
623+
Job job = Job.getInstance(UTIL.getConfiguration());
624+
job.setJarByClass(TestTableSnapshotInputFormat.class);
625+
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
626+
TestTableSnapshotInputFormat.class);
627+
Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow());
628+
Configuration conf = job.getConfiguration();
629+
conf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName);
630+
conf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString());
631+
conf.setInt("hbase.mapreduce.splits.per.region", 1);
632+
job.setReducerClass(TestTableSnapshotReducer.class);
633+
job.setNumReduceTasks(1);
634+
job.setOutputFormatClass(NullOutputFormat.class);
635+
TableMapReduceUtil.initTableMapperJob(snapshotName, // table name (snapshot name in this case)
636+
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job,
637+
false, false, TableSnapshotInputFormat.class);
638+
TableMapReduceUtil.resetCacheConfig(conf);
639+
Assert.assertTrue(job.waitForCompletion(true));
640+
Assert.assertTrue(job.isSuccessful());
641+
642+
// Now verify that job fails when restore directory is deleted
643+
Assert.assertTrue(fs.delete(tempRestoreDir, true));
644+
Assert.assertFalse("Restore directory should not exist after deletion",
645+
fs.exists(tempRestoreDir));
646+
Job failureJob = Job.getInstance(UTIL.getConfiguration());
647+
failureJob.setJarByClass(TestTableSnapshotInputFormat.class);
648+
TableMapReduceUtil.addDependencyJarsForClasses(failureJob.getConfiguration(),
649+
TestTableSnapshotInputFormat.class);
650+
Configuration failureConf = failureJob.getConfiguration();
651+
// Configure job to use the deleted restore directory
652+
failureConf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName);
653+
failureConf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString());
654+
failureConf.setInt("hbase.mapreduce.splits.per.region", 1);
655+
failureJob.setReducerClass(TestTableSnapshotReducer.class);
656+
failureJob.setNumReduceTasks(1);
657+
failureJob.setOutputFormatClass(NullOutputFormat.class);
658+
659+
TableMapReduceUtil.initTableMapperJob(snapshotName, scan, TestTableSnapshotMapper.class,
660+
ImmutableBytesWritable.class, NullWritable.class, failureJob, false, false,
661+
TableSnapshotInputFormat.class);
662+
TableMapReduceUtil.resetCacheConfig(failureConf);
663+
664+
Assert.assertFalse("Restore directory should not exist before job execution",
665+
fs.exists(tempRestoreDir));
666+
failureJob.waitForCompletion(true);
667+
668+
Assert.assertFalse("Job should fail since the restored snapshot directory is deleted",
669+
failureJob.isSuccessful());
670+
671+
} finally {
672+
try {
673+
if (UTIL.getAdmin().tableExists(tableName)) {
674+
UTIL.deleteTable(tableName);
675+
}
676+
} catch (Exception e) {
677+
LOG.warn("Error deleting table", e);
678+
}
679+
try {
680+
UTIL.getAdmin().deleteSnapshot(snapshotName);
681+
} catch (Exception e) {
682+
LOG.warn("Error deleting snapshot", e);
683+
}
684+
}
685+
}
585686
}

hbase-protocol-shaded/README.txt

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
This module has proto files used by core. These protos
2-
overlap with protos that are used by coprocessor endpoints
3-
(CPEP) in the module hbase-protocol. So core versions have
4-
a different name, the generated classes are relocated
5-
-- i.e. shaded -- to a new location; they are moved from
6-
org.apache.hadoop.hbase.* to org.apache.hadoop.hbase.shaded.
1+
This module has proto files used by core.
2+
For historical reasons and to signify that the generated classes are using
3+
the relocated hbase-thirdparty protobuf-java library the generated classes are in
4+
the org.apache.hadoop.hbase.shaded.protobuf.generated.* package, instead of the old
5+
org.apache.hadoop.hbase.protobuf.generated.* package, which is not used at all by
6+
Hbase 3 and later versions.
77

88
proto files layout:
99
protobuf/client - client to server messages, client rpc service and protos, used in hbase-client exclusively;

hbase-protocol-shaded/pom.xml

Lines changed: 0 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -108,61 +108,11 @@
108108
of com.google.protobuf so instead its o.a.h.h.com.google.protobuf.
109109
Plugin is old and in google code archive. Here is usage done by
110110
anohther: https://github.com/beiliubei/maven-replacer-plugin/wiki/Usage-Guide
111-
The mess with the regex in the below is to prevent replacement every time
112-
we run mvn install. There is probably a better way of avoiding the
113-
double interpolation but this is it for now.
114111
-->
115112
<plugin>
116113
<groupId>com.google.code.maven-replacer-plugin</groupId>
117114
<artifactId>replacer</artifactId>
118115
</plugin>
119-
<plugin>
120-
<groupId>org.apache.maven.plugins</groupId>
121-
<artifactId>maven-shade-plugin</artifactId>
122-
<version>3.4.1</version>
123-
<executions>
124-
<execution>
125-
<goals>
126-
<goal>shade</goal>
127-
</goals>
128-
<phase>package</phase>
129-
<configuration>
130-
<minimizeJar>true</minimizeJar>
131-
<shadeSourcesContent>true</shadeSourcesContent>
132-
<!-- Causes an NPE until shade 3.0.1. See MSHADE-247
133-
<createSourcesJar>true</createSourcesJar>
134-
-->
135-
<relocations>
136-
<relocation>
137-
<pattern>com.google.protobuf</pattern>
138-
<shadedPattern>org.apache.hadoop.hbase.shaded.com.google.protobuf</shadedPattern>
139-
</relocation>
140-
</relocations>
141-
<artifactSet>
142-
<excludes>
143-
<!-- exclude J2EE modules that come in for JDK11+ or modules that come in for
144-
JDK8+ but need not be included -->
145-
<exclude>javax.annotation:javax.annotation-api</exclude>
146-
<!--Exclude protobuf itself. We get a patched version from hbase-thirdparty.
147-
-->
148-
<exclude>org.apache.hbase.thirdparty:*</exclude>
149-
<exclude>com.google.protobuf:protobuf-java</exclude>
150-
<exclude>com.google.code.findbugs:*</exclude>
151-
<exclude>com.google.j2objc:j2objc-annotations</exclude>
152-
<exclude>org.codehaus.mojo:animal-sniffer-annotations</exclude>
153-
<exclude>junit:junit</exclude>
154-
<exclude>log4j:log4j</exclude>
155-
<exclude>commons-logging:commons-logging</exclude>
156-
<exclude>org.slf4j:slf4j-api</exclude>
157-
<exclude>org.apache.yetus:audience-annotations</exclude>
158-
<exclude>com.github.stephenc.fingbugs:*</exclude>
159-
<exclude>com.github.spotbugs:*</exclude>
160-
</excludes>
161-
</artifactSet>
162-
</configuration>
163-
</execution>
164-
</executions>
165-
</plugin>
166116
<plugin>
167117
<groupId>org.apache.maven.plugins</groupId>
168118
<artifactId>maven-checkstyle-plugin</artifactId>

hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationQueueStorage.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -376,7 +376,7 @@ public void removeAllQueues(String peerId) throws ReplicationException {
376376
table.delete(new Delete(result.getRow()));
377377
}
378378
} catch (IOException e) {
379-
throw new ReplicationException("failed to listAllQueueIds, peerId=" + peerId, e);
379+
throw new ReplicationException("failed to removeAllQueues, peerId=" + peerId, e);
380380
}
381381
}
382382

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ public static HFileBlock getBlockForCaching(CacheConfig cacheConf, HFileBlock bl
285285
.withOnDiskSizeWithoutHeader(block.getOnDiskSizeWithoutHeader())
286286
.withUncompressedSizeWithoutHeader(block.getUncompressedSizeWithoutHeader())
287287
.withPrevBlockOffset(block.getPrevBlockOffset()).withByteBuff(buff)
288-
.withFillHeader(FILL_HEADER).withOffset(block.getOffset()).withNextBlockOnDiskSize(-1)
288+
.withFillHeader(FILL_HEADER).withOffset(block.getOffset())
289289
.withOnDiskDataSizeWithHeader(block.getOnDiskDataSizeWithHeader() + numBytes)
290290
.withNextBlockOnDiskSize(block.getNextBlockOnDiskSize())
291291
.withHFileContext(cloneContext(block.getHFileContext()))

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ public class BucketEntry implements HBaseReferenceCounted {
117117
this.onDiskSizeWithHeader = onDiskSizeWithHeader;
118118
this.accessCounter = accessCounter;
119119
this.cachedTime = cachedTime;
120-
this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.MULTI;
120+
this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.SINGLE;
121121
this.refCnt = RefCnt.create(createRecycler.apply(this));
122122
this.markedAsEvicted = new AtomicBoolean(false);
123123
this.allocator = allocator;

0 commit comments

Comments
 (0)