diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/ContainerToKeyMapping.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/ContainerToKeyMapping.java index 1c0db64751a8..28b2efe55922 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/ContainerToKeyMapping.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/ContainerToKeyMapping.java @@ -57,13 +57,12 @@ /** * Tool to map full key paths that use the specified containers. - * Note: Currently only processes FSO layout buckets. + * Supports both FSO (File System Optimized) and OBS (Object Store) bucket layouts. */ @CommandLine.Command( name = "container-key-mapping", aliases = "ckm", - description = "Maps full key paths that use the specified containers. " + - "Note: A container can have both FSO and OBS keys. Currently this tool processes only FSO keys") + description = "Maps full key paths that use the specified containers.") public class ContainerToKeyMapping extends AbstractSubcommand implements Callable { private static final String DIRTREE_DB_NAME = "omdirtree.db"; private static final String DIRTREE_TABLE_NAME = "dirTreeTable"; @@ -80,22 +79,26 @@ public class ContainerToKeyMapping extends AbstractSubcommand implements Callabl description = "Comma separated Container IDs") private String containers; + @CommandLine.Option(names = {"--onlyFileNames"}, + defaultValue = "false", + description = "Only display file names without full path") + private boolean onlyFileNames; + private DBStore omDbStore; private Table volumeTable; private Table bucketTable; private Table directoryTable; private Table fileTable; + private Table keyTable; private DBStore dirTreeDbStore; private Table dirTreeTable; // Cache volume IDs to avoid repeated lookups private final Map volumeCache = new HashMap<>(); private ConfigurationSource conf; - // TODO: Add support to OBS keys (HDDS-14118) @Override public Void call() throws Exception { - err().println("Note: A container can have both FSO and OBS keys. Currently this tool processes only FSO keys"); - + String dbPath = parent.getDbPath(); // Parse container IDs Set containerIDs = Arrays.stream(containers.split(",")) @@ -122,6 +125,7 @@ public Void call() throws Exception { bucketTable = OMDBDefinition.BUCKET_TABLE_DEF.getTable(omDbStore, CacheType.NO_CACHE); directoryTable = OMDBDefinition.DIRECTORY_TABLE_DEF.getTable(omDbStore, CacheType.NO_CACHE); fileTable = OMDBDefinition.FILE_TABLE_DEF.getTable(omDbStore, CacheType.NO_CACHE); + keyTable = OMDBDefinition.KEY_TABLE_DEF.getTable(omDbStore, CacheType.NO_CACHE); openDirTreeDB(dbPath); retrieve(writer, containerIDs); @@ -164,7 +168,7 @@ private void closeDirTreeDB(String dbPath) throws IOException { } private void retrieve(PrintWriter writer, Set containerIds) { - // Build dir tree + // Build dir tree for FSO keys Map> bucketVolMap = new HashMap<>(); try { prepareDirIdTree(bucketVolMap); @@ -175,15 +179,25 @@ private void retrieve(PrintWriter writer, Set containerIds) { // Map to collect keys per container Map> containerToKeysMap = new HashMap<>(); - // Track unreferenced keys count per container + // Track unreferenced keys count per container (FSO only) Map unreferencedCountMap = new HashMap<>(); for (Long containerId : containerIds) { containerToKeysMap.put(containerId, new ArrayList<>()); unreferencedCountMap.put(containerId, 0L); } - // Iterate file table and filter for container - try (TableIterator> fileIterator = + // Process FSO keys (fileTable) + processFSOKeys(containerIds, containerToKeysMap, unreferencedCountMap, bucketVolMap); + + // Process OBS keys (keyTable) + processOBSKeys(containerIds, containerToKeysMap); + + jsonOutput(writer, containerToKeysMap, unreferencedCountMap); + } + + private void processFSOKeys(Set containerIds, Map> containerToKeysMap, + Map unreferencedCountMap, Map> bucketVolMap) { + try (TableIterator> fileIterator = fileTable.iterator()) { while (fileIterator.hasNext()) { @@ -191,31 +205,65 @@ private void retrieve(PrintWriter writer, Set containerIds) { OmKeyInfo keyInfo = entry.getValue(); // Find which containers this key uses - Set keyContainers = new HashSet<>(); - keyInfo.getKeyLocationVersions().forEach( - e -> e.getLocationList().forEach( - blk -> { - long cid = blk.getBlockID().getContainerID(); - if (containerIds.contains(cid)) { - keyContainers.add(cid); - } - })); + Set keyContainers = getKeyContainers(keyInfo, containerIds); if (!keyContainers.isEmpty()) { - // Reconstruct full path - String fullPath = reconstructFullPath(keyInfo, bucketVolMap, unreferencedCountMap, keyContainers); - if (fullPath != null) { + if (!onlyFileNames) { + // Reconstruct full path + String fullPath = reconstructFullPath(keyInfo, bucketVolMap, unreferencedCountMap, keyContainers); + if (fullPath != null) { + for (Long containerId : keyContainers) { + containerToKeysMap.get(containerId).add(fullPath); + } + } + } else { for (Long containerId : keyContainers) { - containerToKeysMap.get(containerId).add(fullPath); + containerToKeysMap.get(containerId).add(keyInfo.getKeyName()); } } } } } catch (Exception e) { - err().println("Exception occurred reading file Table, " + e); - return; + err().println("Exception occurred reading fileTable (FSO keys), " + e); } - jsonOutput(writer, containerToKeysMap, unreferencedCountMap); + } + + private void processOBSKeys(Set containerIds, Map> containerToKeysMap) { + try (TableIterator> keyIterator = + keyTable.iterator()) { + + while (keyIterator.hasNext()) { + Table.KeyValue entry = keyIterator.next(); + OmKeyInfo keyInfo = entry.getValue(); + + // Find which containers this key uses + Set keyContainers = getKeyContainers(keyInfo, containerIds); + + if (!keyContainers.isEmpty()) { + // For OBS keys, use the database key directly (already in /volume/bucket/key format) + // Or extract just the key name if onlyFileNames is true + String keyPath = onlyFileNames ? keyInfo.getKeyName() : entry.getKey(); + for (Long containerId : keyContainers) { + containerToKeysMap.get(containerId).add(keyPath); + } + } + } + } catch (Exception e) { + err().println("Exception occurred reading keyTable (OBS keys), " + e); + } + } + + private Set getKeyContainers(OmKeyInfo keyInfo, Set targetContainerIds) { + Set keyContainers = new HashSet<>(); + keyInfo.getKeyLocationVersions().forEach( + e -> e.getLocationList().forEach( + blk -> { + long cid = blk.getBlockID().getContainerID(); + if (targetContainerIds.contains(cid)) { + keyContainers.add(cid); + } + })); + return keyContainers; } private void prepareDirIdTree(Map> bucketVolMap) throws Exception { diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/om/TestContainerToKeyMapping.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/om/TestContainerToKeyMapping.java index 49bd23c7fcd7..884c28b6c253 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/om/TestContainerToKeyMapping.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/om/TestContainerToKeyMapping.java @@ -61,13 +61,17 @@ public class TestContainerToKeyMapping { private StringWriter outWriter; private static final String VOLUME_NAME = "vol1"; - private static final String BUCKET_NAME = "bucket1"; + private static final String FSO_BUCKET_NAME = "fso-bucket"; + private static final String OBS_BUCKET_NAME = "obs-bucket"; private static final long VOLUME_ID = 100L; - private static final long BUCKET_ID = 200L; + private static final long FSO_BUCKET_ID = 200L; + private static final long OBS_BUCKET_ID = 250L; private static final long DIR_ID = 300L; private static final long FILE_ID = 400L; + private static final long KEY_ID = 450L; private static final long CONTAINER_ID_1 = 1L; private static final long CONTAINER_ID_2 = 2L; + private static final long CONTAINER_ID_3 = 3L; private static final long UNREFERENCED_FILE_ID = 500L; private static final long MISSING_DIR_ID = 999L; // Non-existent parent @@ -100,14 +104,36 @@ public void tearDown() throws Exception { @Test public void testContainerToKeyMapping() { - int exitCode = execute("--containers", String.valueOf(CONTAINER_ID_1)); + int exitCode = execute("--containers", CONTAINER_ID_1 + "," + CONTAINER_ID_2); assertEquals(0, exitCode); - + + String output = outWriter.toString(); + + // Check FSO key + assertThat(output).contains("\"" + CONTAINER_ID_1 + "\""); + assertThat(output).contains("vol1/fso-bucket/dir1/file1"); + + // Check OBS key + assertThat(output).contains("\"" + CONTAINER_ID_2 + "\""); + assertThat(output).contains("/vol1/obs-bucket/key1"); + } + + @Test + public void testContainerToKeyMappingWithOnlyFileNames() { + int exitCode = execute("--containers", CONTAINER_ID_1 + "," + CONTAINER_ID_2, "--onlyFileNames"); + assertEquals(0, exitCode); + String output = outWriter.toString(); + // Check FSO key - should show only filename assertThat(output).contains("\"" + CONTAINER_ID_1 + "\""); - assertThat(output).contains("vol1/bucket1/dir1/file1"); - assertThat(output).contains("\"numOfKeys\" : 1"); + assertThat(output).contains("file1"); + assertThat(output).doesNotContain("vol1/fso-bucket/dir1/file1"); + + // Check OBS key - should also show only key name + assertThat(output).contains("\"" + CONTAINER_ID_2 + "\""); + assertThat(output).contains("key1"); + assertThat(output).doesNotContain("/vol1/obs-bucket/key1"); } @Test @@ -124,12 +150,12 @@ public void testNonExistentContainer() { @Test public void testUnreferencedKeys() { - int exitCode = execute("--containers", String.valueOf(CONTAINER_ID_2)); + int exitCode = execute("--containers", String.valueOf(CONTAINER_ID_3)); assertEquals(0, exitCode); String output = outWriter.toString(); - assertThat(output).contains("\"" + CONTAINER_ID_2 + "\""); + assertThat(output).contains("\"" + CONTAINER_ID_3 + "\""); assertThat(output).contains("\"numOfKeys\" : 0"); assertThat(output).contains("\"unreferencedKeys\" : 1"); } @@ -148,40 +174,57 @@ private void createTestData() throws Exception { // Create FSO bucket OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName(VOLUME_NAME) - .setBucketName(BUCKET_NAME) + .setBucketName(FSO_BUCKET_NAME) .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) - .setObjectID(BUCKET_ID) + .setObjectID(FSO_BUCKET_ID) .build(); omMetadataManager.getBucketTable().put( - omMetadataManager.getBucketKey(VOLUME_NAME, BUCKET_NAME), bucketInfo); + omMetadataManager.getBucketKey(VOLUME_NAME, FSO_BUCKET_NAME), bucketInfo); + + // Create OBS bucket + OmBucketInfo obsBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(VOLUME_NAME) + .setBucketName(OBS_BUCKET_NAME) + .setBucketLayout(BucketLayout.OBJECT_STORE) + .setObjectID(OBS_BUCKET_ID) + .build(); + omMetadataManager.getBucketTable().put( + omMetadataManager.getBucketKey(VOLUME_NAME, OBS_BUCKET_NAME), obsBucketInfo); // Create directory OmDirectoryInfo dirInfo = OmDirectoryInfo.newBuilder() .setName("dir1") .setObjectID(DIR_ID) - .setParentObjectID(BUCKET_ID) + .setParentObjectID(FSO_BUCKET_ID) .setUpdateID(1) .build(); - String dirKey = omMetadataManager.getOzonePathKey(VOLUME_ID, BUCKET_ID, BUCKET_ID, "dir1"); + String dirKey = omMetadataManager.getOzonePathKey(VOLUME_ID, FSO_BUCKET_ID, FSO_BUCKET_ID, "dir1"); omMetadataManager.getDirectoryTable().put(dirKey, dirInfo); - // Create file with a block in container 1 + // Create FSO file with a block in container 1 OmKeyInfo keyInfo = createKeyInfo( "file1", FILE_ID, DIR_ID, CONTAINER_ID_1); String fileKey = omMetadataManager.getOzonePathKey( - VOLUME_ID, BUCKET_ID, DIR_ID, "file1"); + VOLUME_ID, FSO_BUCKET_ID, DIR_ID, "file1"); omMetadataManager.getFileTable().put(fileKey, keyInfo); - + + // Create OBS key with a block in container 2 + OmKeyInfo obsKeyInfo = createOBSKeyInfo( + "key1", KEY_ID, CONTAINER_ID_2); + String obsKey = omMetadataManager.getOzoneKey( + VOLUME_NAME, OBS_BUCKET_NAME, "key1"); + omMetadataManager.getKeyTable(BucketLayout.OBJECT_STORE).put(obsKey, obsKeyInfo); + // Create unreferenced file (parent directory doesn't exist) OmKeyInfo unreferencedKey = createKeyInfo( - "unreferencedFile", UNREFERENCED_FILE_ID, MISSING_DIR_ID, CONTAINER_ID_2); + "unreferencedFile", UNREFERENCED_FILE_ID, MISSING_DIR_ID, CONTAINER_ID_3); String unreferencedFileKey = omMetadataManager.getOzonePathKey( - VOLUME_ID, BUCKET_ID, MISSING_DIR_ID, "unreferencedFile"); + VOLUME_ID, FSO_BUCKET_ID, MISSING_DIR_ID, "unreferencedFile"); omMetadataManager.getFileTable().put(unreferencedFileKey, unreferencedKey); } /** - * Helper method to create OmKeyInfo with a block in specified container. + * Helper method to create OmKeyInfo with a block in specified container (FSO). */ private OmKeyInfo createKeyInfo(String keyName, long objectId, long parentId, long containerId) { OmKeyLocationInfo locationInfo = new OmKeyLocationInfo.Builder() @@ -195,7 +238,7 @@ private OmKeyInfo createKeyInfo(String keyName, long objectId, long parentId, lo return new OmKeyInfo.Builder() .setVolumeName(VOLUME_NAME) - .setBucketName(BUCKET_NAME) + .setBucketName(FSO_BUCKET_NAME) .setKeyName(keyName) .setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(1024) @@ -206,6 +249,31 @@ private OmKeyInfo createKeyInfo(String keyName, long objectId, long parentId, lo .build(); } + /** + * Helper method to create OmKeyInfo for OBS keys with a block in specified container. + */ + private OmKeyInfo createOBSKeyInfo(String keyName, long objectId, long containerId) { + OmKeyLocationInfo locationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(new BlockID(containerId, 1L)) + .setLength(1024) + .setOffset(0) + .build(); + + OmKeyLocationInfoGroup locationGroup = new OmKeyLocationInfoGroup(0, + Collections.singletonList(locationInfo)); + + return new OmKeyInfo.Builder() + .setVolumeName(VOLUME_NAME) + .setBucketName(OBS_BUCKET_NAME) + .setKeyName(keyName) + .setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .setDataSize(1024) + .setObjectID(objectId) + .setUpdateID(1) + .addOmKeyLocationInfoGroup(locationGroup) + .build(); + } + private int execute(String... args) { List argList = new ArrayList<>(Arrays.asList("om", "container-key-mapping", "--db", dbPath)); argList.addAll(Arrays.asList(args));