@@ -551,7 +551,7 @@ class VulkanCaptureManager : public ApiCaptureManager
551
551
}
552
552
553
553
void PostProcess_vkQueueBindSparse (
554
- VkResult result, VkQueue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence)
554
+ VkResult result, VkQueue queue , uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence)
555
555
{
556
556
if (IsCaptureModeTrack () && (result == VK_SUCCESS))
557
557
{
@@ -563,6 +563,110 @@ class VulkanCaptureManager : public ApiCaptureManager
563
563
pBindInfo[i].signalSemaphoreCount ,
564
564
pBindInfo[i].pSignalSemaphores );
565
565
}
566
+
567
+ // In default mode, the capture manager uses a shared mutex to capture every API function. As a result,
568
+ // multiple threads may access the sparse resource maps concurrently. Therefore, we use a dedicated mutex
569
+ // for write access to these maps.
570
+ const std::lock_guard<std::mutex> lock (sparse_resource_mutex);
571
+ for (uint32_t bind_info_index = 0 ; bind_info_index < bindInfoCount; bind_info_index++)
572
+ {
573
+ auto & bind_info = pBindInfo[bind_info_index];
574
+
575
+ // TODO: add device group support. In the following handling, we assume that the system only has one
576
+ // physical device or that resourceDeviceIndex and memoryDeviceIndex of VkDeviceGroupBindSparseInfo in
577
+ // the pnext chain are zero.
578
+
579
+ if (bind_info.pBufferBinds != nullptr )
580
+ {
581
+ // The title binds sparse buffers to memory ranges, so we need to track the buffer binding
582
+ // information. The following updates will reflect the latest binding states for all buffers in this
583
+ // vkQueueBindSparse command, covering both fully-resident and partially-resident buffers.
584
+ for (uint32_t buffer_bind_index = 0 ; buffer_bind_index < bind_info.bufferBindCount ;
585
+ buffer_bind_index++)
586
+ {
587
+ auto & buffer_bind = bind_info.pBufferBinds [buffer_bind_index];
588
+ auto sparse_buffer = buffer_bind.buffer ;
589
+ auto wrapper = vulkan_wrappers::GetWrapper<vulkan_wrappers::BufferWrapper>(sparse_buffer);
590
+
591
+ if (wrapper != nullptr )
592
+ {
593
+ wrapper->sparse_bind_queue = queue;
594
+ for (uint32_t bind_memory_range_index = 0 ; bind_memory_range_index < buffer_bind.bindCount ;
595
+ bind_memory_range_index++)
596
+ {
597
+ auto & bind_memory_range = buffer_bind.pBinds [bind_memory_range_index];
598
+ graphics::UpdateSparseMemoryBindMap (wrapper->sparse_memory_bind_map , bind_memory_range);
599
+ }
600
+ }
601
+ }
602
+ }
603
+
604
+ if (bind_info.pImageOpaqueBinds != nullptr )
605
+ {
606
+ // The title binds sparse images to opaque memory ranges, so we need to track the image binding
607
+ // information. The following handling will update the latest binding states for all images in this
608
+ // vkQueueBindSparse command, which utilizes opaque memory binding. There are two cases covered by
609
+ // the tracking. In the first case, the sparse image exclusively uses opaque memory binding. For
610
+ // this case, the target title treats the binding memory ranges as a linear unified region. This
611
+ // should represent a fully-resident binding because this linear region is entirely opaque, meaning
612
+ // there is no application-visible mapping between texel locations and memory offsets. In another
613
+ // case, the image utilizes subresource sparse memory binding, just binding only its mip tail region
614
+ // to an opaque memory range. For this situation, we use the sparse_opaque_memory_bind_map and
615
+ // sparse_subresource_memory_bind_map of the image wrapper to track the subresource bindings and
616
+ // opaque bindings separately.
617
+ for (uint32_t image_opaque_bind_index = 0 ; image_opaque_bind_index < bind_info.imageOpaqueBindCount ;
618
+ image_opaque_bind_index++)
619
+ {
620
+ auto & image_opaque_bind = bind_info.pImageOpaqueBinds [image_opaque_bind_index];
621
+ auto sparse_image = image_opaque_bind.image ;
622
+ auto wrapper = vulkan_wrappers::GetWrapper<vulkan_wrappers::ImageWrapper>(sparse_image);
623
+
624
+ if (wrapper != nullptr )
625
+ {
626
+ wrapper->sparse_bind_queue = queue;
627
+
628
+ for (uint32_t bind_memory_range_index = 0 ;
629
+ bind_memory_range_index < image_opaque_bind.bindCount ;
630
+ bind_memory_range_index++)
631
+ {
632
+ auto & bind_memory_range = image_opaque_bind.pBinds [bind_memory_range_index];
633
+ graphics::UpdateSparseMemoryBindMap (wrapper->sparse_opaque_memory_bind_map ,
634
+ bind_memory_range);
635
+ }
636
+ }
637
+ }
638
+ }
639
+
640
+ if (bind_info.pImageBinds != nullptr )
641
+ {
642
+ // The title binds subresources of a sparse image to memory ranges, which requires us to keep track
643
+ // of the sparse image subresource binding information. It's important to note that while the image
644
+ // mainly use subresource sparse memory binding, its mip tail region must be bound to an opaque
645
+ // memory range. Therefore, we use the sparse_opaque_memory_bind_map and
646
+ // sparse_subresource_memory_bind_map of the image wrapper to separately track both the
647
+ // subresource bindings and the opaque bindings.
648
+ for (uint32_t image_bind_index = 0 ; image_bind_index < bind_info.imageBindCount ; image_bind_index++)
649
+ {
650
+ auto & image_bind = bind_info.pImageBinds [image_bind_index];
651
+ auto sparse_image = image_bind.image ;
652
+ auto wrapper = vulkan_wrappers::GetWrapper<vulkan_wrappers::ImageWrapper>(sparse_image);
653
+
654
+ if (wrapper != nullptr )
655
+ {
656
+ wrapper->sparse_bind_queue = queue;
657
+
658
+ for (uint32_t bind_memory_range_index = 0 ; bind_memory_range_index < image_bind.bindCount ;
659
+ bind_memory_range_index++)
660
+ {
661
+ auto & bind_memory_range = image_bind.pBinds [bind_memory_range_index];
662
+ // TODO: Implement handling for tracking binding information of sparse image
663
+ // subresources.
664
+ GFXRECON_LOG_ERROR_ONCE (" Binding of sparse image blocks is not supported!" );
665
+ }
666
+ }
667
+ }
668
+ }
669
+ }
566
670
}
567
671
}
568
672
@@ -842,6 +946,50 @@ class VulkanCaptureManager : public ApiCaptureManager
842
946
}
843
947
}
844
948
949
+ void PostProcess_vkCreateBuffer (VkResult result,
950
+ VkDevice device,
951
+ const VkBufferCreateInfo* pCreateInfo,
952
+ const VkAllocationCallbacks* pAllocator,
953
+ VkBuffer* pBuffer)
954
+ {
955
+ if (IsCaptureModeTrack () && (result == VK_SUCCESS) && (pCreateInfo != nullptr ))
956
+ {
957
+ assert (state_tracker_ != nullptr );
958
+
959
+ auto buffer_wrapper = vulkan_wrappers::GetWrapper<vulkan_wrappers::BufferWrapper>(*pBuffer);
960
+
961
+ if (buffer_wrapper->is_sparse_buffer )
962
+ {
963
+ // We will need to set the bind_device for handling sparse buffers. There will be no subsequent
964
+ // vkBindBufferMemory, vkBindBufferMemory2 or vkBindBufferMemory2KHR calls for sparse buffer, so we
965
+ // assign bind_device to the device that created the buffer.
966
+ buffer_wrapper->bind_device = vulkan_wrappers::GetWrapper<vulkan_wrappers::DeviceWrapper>(device);
967
+ }
968
+ }
969
+ }
970
+
971
+ void PostProcess_vkCreateImage (VkResult result,
972
+ VkDevice device,
973
+ const VkImageCreateInfo* pCreateInfo,
974
+ const VkAllocationCallbacks* pAllocator,
975
+ VkImage* pImage)
976
+ {
977
+ if (IsCaptureModeTrack () && (result == VK_SUCCESS) && (pCreateInfo != nullptr ))
978
+ {
979
+ assert (state_tracker_ != nullptr );
980
+
981
+ auto image_wrapper = vulkan_wrappers::GetWrapper<vulkan_wrappers::ImageWrapper>(*pImage);
982
+
983
+ if (image_wrapper->is_sparse_image )
984
+ {
985
+ // We will need to set the bind_device for handling sparse images. There will be no subsequent
986
+ // vkBindImageMemory, vkBindImageMemory2, or vkBindImageMemory2KHR calls for sparse image, so we assign
987
+ // bind_device to the device that created the image.
988
+ image_wrapper->bind_device = vulkan_wrappers::GetWrapper<vulkan_wrappers::DeviceWrapper>(device);
989
+ }
990
+ }
991
+ }
992
+
845
993
void PostProcess_vkCmdBeginRenderPass (VkCommandBuffer commandBuffer,
846
994
const VkRenderPassBeginInfo* pRenderPassBegin,
847
995
VkSubpassContents)
@@ -1662,6 +1810,7 @@ class VulkanCaptureManager : public ApiCaptureManager
1662
1810
std::unique_ptr<VulkanStateTracker> state_tracker_;
1663
1811
HardwareBufferMap hardware_buffers_;
1664
1812
std::mutex deferred_operation_mutex;
1813
+ std::mutex sparse_resource_mutex;
1665
1814
};
1666
1815
1667
1816
GFXRECON_END_NAMESPACE (encode)
0 commit comments