diff --git a/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp b/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp index 0f10f14b7f..af6dd40810 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp +++ b/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp @@ -223,15 +223,15 @@ void VKBackend::executeFrame(const FramePointer& frame) { return; }*/ const auto& batch = *batchPtr; - if (batch.getName() == "CompositeHUD") { + /*if (batch.getName() == "CompositeHUD") { continue; // VKTODO: crashes frame player currently - } - if (batch.getName() == "Resample::run") { + }*/ + /*if (batch.getName() == "Resample::run") { continue; // VKTODO: no framebuffer commands support yet"ApplicationOverlay::render" - } - if (batch.getName() == "ApplicationOverlay::render") { + }*/ + /*if (batch.getName() == "ApplicationOverlay::render") { continue; // VKTODO: no overlay support yet - } + }*/ cmdBeginLabel(commandBuffer, "batch:" + batch.getName(), glm::vec4{ 1, 1, 0, 1 }); const auto& commands = batch.getCommands(); const auto& offsets = batch.getCommandOffsets(); @@ -1186,6 +1186,52 @@ void VKBackend::renderPassTransfer(const Batch& batch) { break; } + case Batch::COMMAND_setFramebuffer: { + size_t paramOffset = *offset; + auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint); + // Framebuffer attachments may need layout transitions + auto &renderBuffers = framebuffer->getRenderBuffers(); + auto depthStencilBuffer = framebuffer->getDepthStencilBuffer(); + for (auto &buffer : renderBuffers) { + if (buffer._texture) { + auto gpuTexture = syncGPUObject(*buffer._texture); + if (gpuTexture) { + auto attachmentTexture = dynamic_cast(gpuTexture); + if (attachmentTexture) { + _attachmentTexturesToTransitionToWrite.push_back(attachmentTexture); + } + } + } + } + if (depthStencilBuffer) { + auto gpuDepthStencilBuffer = syncGPUObject(*depthStencilBuffer); + if (gpuDepthStencilBuffer) { + auto attachmentTexture = dynamic_cast(gpuDepthStencilBuffer); + if (attachmentTexture) { + _attachmentTexturesToTransitionToWrite.push_back(attachmentTexture); + } + } + } + + } + break; + + case Batch::COMMAND_setResourceTexture: { + size_t paramOffset = *offset; + uint32_t slot = batch._params[paramOffset + 1]._uint; + TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint); + if (resourceTexture) { + auto gpuTexture = syncGPUObject(*resourceTexture); + if (gpuTexture) { + auto attachmentTexture = dynamic_cast(gpuTexture); + if (attachmentTexture) { + _attachmentTexturesToTransitionToRead.push_back(attachmentTexture); + } + } + } + } + break; + default: break; } @@ -1199,6 +1245,9 @@ void VKBackend::renderPassTransfer(const Batch& batch) { transferGlUniforms(); transferTransformState(batch); } + transitionImageLayouts(); + _attachmentTexturesToTransitionToRead.clear(); + _attachmentTexturesToTransitionToWrite.clear(); _inRenderTransferPass = false; } @@ -1568,22 +1617,26 @@ VKQuery* VKBackend::syncGPUObject(const Query& query) { return object; } -void VKBackend::blitToFramebuffer(gpu::Texture& input, gpu::Texture& output) { +void VKBackend::blitToFramebuffer(VKAttachmentTexture &input, const Vec4i& srcViewport, VKAttachmentTexture &output, const Vec4i& dstViewport) { // is vks::tools::insertImageMemoryBarrier needed? VkImageBlit imageBlit{}; // Do we ever want to blit multiple layers/mips? imageBlit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageBlit.srcSubresource.layerCount = 1; imageBlit.srcSubresource.mipLevel = 0; - imageBlit.srcOffsets[1].x = input.getWidth(); - imageBlit.srcOffsets[1].y = input.getHeight(); + imageBlit.srcOffsets[0].x = srcViewport.x; + imageBlit.srcOffsets[0].y = srcViewport.y; + imageBlit.srcOffsets[1].x = srcViewport.z; + imageBlit.srcOffsets[1].y = srcViewport.w; imageBlit.srcOffsets[1].z = 1; imageBlit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageBlit.dstSubresource.layerCount = 1; imageBlit.dstSubresource.mipLevel = 0; - imageBlit.dstOffsets[1].x = output.getWidth(); - imageBlit.dstOffsets[1].y = output.getHeight(); + imageBlit.dstOffsets[0].x = srcViewport.x; + imageBlit.dstOffsets[0].y = srcViewport.y; + imageBlit.dstOffsets[1].x = srcViewport.z; + imageBlit.dstOffsets[1].y = srcViewport.w; imageBlit.dstOffsets[1].z = 1; VkImageSubresourceRange mipSubRange = {}; @@ -1592,29 +1645,39 @@ void VKBackend::blitToFramebuffer(gpu::Texture& input, gpu::Texture& output) { mipSubRange.levelCount = 1; mipSubRange.layerCount = 1; - auto inputObject = syncGPUObject(input); - auto outputObject = syncGPUObject(output); - vks::tools::insertImageMemoryBarrier( _currentCommandBuffer, - outputObject->_vkImage, + input._vkImage, + VK_ACCESS_TRANSFER_READ_BIT, 0, - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VKTODO: + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, mipSubRange); vkCmdBlitImage( _currentCommandBuffer, - inputObject->_vkImage, + input._vkImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - outputObject->_vkImage, + output._vkImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlit, VK_FILTER_LINEAR); + + vks::tools::insertImageMemoryBarrier( + _currentCommandBuffer, + output._vkImage, + 0, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VKTODO + VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, + mipSubRange); + + output._vkImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VKTODO } void VKBackend::updateInput() { @@ -1858,6 +1921,62 @@ void VKBackend::waitForGPU() { VK_CHECK_RESULT(vkDeviceWaitIdle(_context.device->logicalDevice)); } +void VKBackend::transitionImageLayouts() { + for (auto attachmentTexture : _attachmentTexturesToTransitionToRead) { + if (attachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { + VkImageSubresourceRange mipSubRange = {}; + mipSubRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + mipSubRange.baseMipLevel = 0; + mipSubRange.levelCount = 1; + mipSubRange.layerCount = 1; + vks::tools::insertImageMemoryBarrier( + _currentCommandBuffer, + attachmentTexture->_vkImage, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_ACCESS_SHADER_READ_BIT, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + mipSubRange); + attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + } + } + for (auto attachmentTexture : _attachmentTexturesToTransitionToWrite) { + if (attachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { + if (attachmentTexture->_gpuObject.isDepthStencilRenderTarget()) { + VkImageSubresourceRange mipSubRange = {}; + mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + mipSubRange.baseMipLevel = 0; + mipSubRange.levelCount = 1; + mipSubRange.layerCount = 1; + vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage, + VK_ACCESS_SHADER_READ_BIT, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VKTODO: should be + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, mipSubRange); // VKTODO: what stage mask for depth stencil? + attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + } else { + VkImageSubresourceRange mipSubRange = {}; + mipSubRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + mipSubRange.baseMipLevel = 0; + mipSubRange.levelCount = 1; + mipSubRange.layerCount = 1; + vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage, + VK_ACCESS_SHADER_READ_BIT, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VKTODO: should be + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, mipSubRange); + attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + } + } + } +} + void VKBackend::perFrameCleanup() { auto &recycler = _context.recycler; std::lock_guard lockGuard(recycler.recyclerMutex); @@ -2360,8 +2479,9 @@ void VKBackend::do_setFramebufferSwapChain(const Batch& batch, size_t paramOffse } void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { - // VKTODO: Check if this is needed on Vulkan or just clearing values in render pass info is enough - ; + // VKTODO: This could possibly be optimized by clearing on next render pass. + // VKTODO: use vkCmdClearColorImage(), vkCmdClearDepthStencilImage() or vkCmdClearAttachments() instead + if (_stereo._enable && !_cache.pipelineState.pipeline->getState()->isScissorEnable()) { qWarning("Clear without scissor in stereo mode"); } @@ -2377,6 +2497,8 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { int useScissor = batch._params[paramOffset + 0]._int; auto framebuffer = _cache.pipelineState.framebuffer; + auto gpuFramebuffer = syncGPUObject(*framebuffer); + auto &renderBuffers = framebuffer->getRenderBuffers(); Cache::Pipeline::RenderpassKey key = _cache.pipelineState.getRenderPassKey(framebuffer); std::vector attachments; @@ -2384,7 +2506,20 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { attachments.reserve(key.size()); std::vector colorAttachmentReferences; VkAttachmentReference depthReference{}; - for (const auto& format : key) { + for (size_t i = 0; i < key.size(); i++) { + const auto& format = key[i]; + /*if (surface) { + Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType()); + vkTexture = backend->syncGPUObject(*surface.get());*/ + auto texture = renderBuffers[i]._texture; + VKAttachmentTexture *attachmentTexture = nullptr; + if (texture) { + auto gpuTexture = syncGPUObject(*texture); + if (gpuTexture) { + attachmentTexture = dynamic_cast(gpuTexture); + Q_ASSERT(attachmentTexture); + } + } VkAttachmentDescription attachment{}; attachment.format = format; attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; @@ -2408,6 +2543,14 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; } attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + // Texture state needs to be updated + auto depthStencil = framebuffer->getDepthStencilBuffer(); + Q_ASSERT(depthStencil); + auto gpuDepthStencil = syncGPUObject(*depthStencil); + Q_ASSERT(gpuDepthStencil); + auto depthStencilAttachmentTexture = dynamic_cast(gpuDepthStencil); + Q_ASSERT(depthStencilAttachmentTexture); + depthStencilAttachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; depthReference.attachment = (uint32_t)(attachments.size()); depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; } else { @@ -2420,6 +2563,11 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; } attachment.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + // Texture state needs to be updated + if (attachmentTexture) { + attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + //Q_ASSERT(attachmentTexture->_gpuObject.isColorRenderTarget()); // isColorRenderTarget is broken + } VkAttachmentReference reference; reference.attachment = (uint32_t)(attachments.size()); reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; @@ -2462,7 +2610,7 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { VkExtent2D{framebuffer->getWidth(), framebuffer->getHeight()}}; VkRenderPassBeginInfo beginInfo = vks::initializers::renderPassBeginInfo(); beginInfo.renderPass = renderPass; - beginInfo.framebuffer = syncGPUObject(*framebuffer)->vkFramebuffer; + beginInfo.framebuffer = gpuFramebuffer->vkFramebuffer; beginInfo.renderArea = rect; beginInfo.clearValueCount = (uint32_t)clearValues.size(); beginInfo.pClearValues = clearValues.data(); @@ -2547,8 +2695,7 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { } void VKBackend::do_blit(const Batch& batch, size_t paramOffset) { - // VKTODO - /*auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint); + auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint); Vec4i srcvp; for (auto i = 0; i < 4; ++i) { srcvp[i] = batch._params[paramOffset + 1 + i]._int; @@ -2561,13 +2708,39 @@ void VKBackend::do_blit(const Batch& batch, size_t paramOffset) { } // Assign dest framebuffer if not bound already - auto destFbo = getFramebufferID(dstframebuffer); - auto srcFbo = getFramebufferID(srcframebuffer); - glBlitNamedFramebuffer(srcFbo, destFbo, - srcvp.x, srcvp.y, srcvp.z, srcvp.w, - dstvp.x, dstvp.y, dstvp.z, dstvp.w, - GL_COLOR_BUFFER_BIT, GL_LINEAR); - (void) CHECK_GL_ERROR();*/ + auto dstFbo = syncGPUObject(*dstframebuffer); + auto srcFbo = syncGPUObject(*srcframebuffer); + + // Sometimes framebuffer with multiple attachments is blitted into one with a single attachment. + auto &srcRenderBuffers = srcFbo->_gpuObject.getRenderBuffers(); + auto srcDepthStencilBuffer = srcFbo->_gpuObject.getDepthStencilBuffer(); + auto &dstRenderBuffers = dstFbo->_gpuObject.getRenderBuffers(); + auto dstDepthStencilBuffer = dstFbo->_gpuObject.getDepthStencilBuffer(); + + for (size_t i = 0; i < srcRenderBuffers.size(); i++) { + if (srcRenderBuffers[i]._texture && dstRenderBuffers[i]._texture) { + auto source = syncGPUObject(*srcRenderBuffers[i]._texture); + auto destination = syncGPUObject(*dstRenderBuffers[i]._texture); + if (source && destination) { + auto srcAttachmentTexture = dynamic_cast(source); + auto dstAttachmentTexture = dynamic_cast(destination); + if (srcAttachmentTexture && dstAttachmentTexture) { + blitToFramebuffer(*srcAttachmentTexture, srcvp, *dstAttachmentTexture, dstvp); + } + } + } + } + if (srcDepthStencilBuffer && dstDepthStencilBuffer) { + auto source = syncGPUObject(*srcDepthStencilBuffer); + auto destination = syncGPUObject(*dstDepthStencilBuffer); + if (source && destination) { + auto srcAttachmentTexture = dynamic_cast(source); + auto dstAttachmentTexture = dynamic_cast(destination); + if (srcAttachmentTexture && dstAttachmentTexture) { + blitToFramebuffer(*srcAttachmentTexture, srcvp, *dstAttachmentTexture, dstvp); + } + } + } } void VKBackend::do_setInputFormat(const Batch& batch, size_t paramOffset) { @@ -2972,14 +3145,11 @@ void VKBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) { uint32_t slot = batch._params[paramOffset + 1]._uint; TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint); - if (slot == 2) { - printf("break"); - } - if (!resourceTexture) { releaseResourceTexture(slot); return; } + // check cache before thinking if (_resource._textures[slot].texture == resourceTexture.get()) { return; diff --git a/libraries/gpu-vk/src/gpu/vk/VKBackend.h b/libraries/gpu-vk/src/gpu/vk/VKBackend.h index 02981112b2..7e600b3f3e 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKBackend.h +++ b/libraries/gpu-vk/src/gpu/vk/VKBackend.h @@ -48,6 +48,8 @@ namespace gpu { namespace vk { +class VKAttachmentTexture; + static const int MAX_NUM_UNIFORM_BUFFERS = 14; // There's also camera buffer at slot 15 static const int32_t MIN_REQUIRED_TEXTURE_IMAGE_UNITS = 16; @@ -310,7 +312,7 @@ class VKBackend : public Backend, public std::enable_shared_from_this VKTexture* syncGPUObject(const Texture& texture); VKQuery* syncGPUObject(const Query& query); - void blitToFramebuffer(gpu::Texture &input, gpu::Texture &output); + void blitToFramebuffer(VKAttachmentTexture &input, const Vec4i& srcViewport, VKAttachmentTexture &output, const Vec4i& dstViewport); public: VKBackend(); @@ -446,6 +448,7 @@ class VKBackend : public Backend, public std::enable_shared_from_this // VKTODO: quick hack VKFramebuffer *_outputTexture{ nullptr }; protected: + void transitionImageLayouts(); // These are filled by syncGPUObject() calls, and are needed to track backend objects so that they can be destroyed before // destroying backend. @@ -483,6 +486,10 @@ class VKBackend : public Backend, public std::enable_shared_from_this std::shared_ptr _currentFrame; // Frame for which command buffer is already generated and it's currently being rendered. std::shared_ptr _currentlyRenderedFrame; + + std::vector _attachmentTexturesToTransitionToRead; + std::vector _attachmentTexturesToTransitionToWrite; + // Safety check to ensure that shutdown was completed before destruction. std::atomic isBackendShutdownComplete{ false }; diff --git a/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.cpp b/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.cpp index 59b0f3ddf8..ed3804a1a7 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.cpp +++ b/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.cpp @@ -14,6 +14,7 @@ void gpu::vk::VKFramebuffer::update() { auto backend = _backend.lock(); VkDevice device = backend->getContext().device->logicalDevice; + // VKTODO: this is wrong, most of framebuffer code will need to be rewritten if (vkFramebuffer != VK_NULL_HANDLE) { vkDestroyFramebuffer(device, vkFramebuffer, nullptr); } @@ -63,7 +64,7 @@ void gpu::vk::VKFramebuffer::update() { attachmentCI.height = vkTexture->_gpuObject.getHeight(); attachmentCI.layerCount = 1; attachmentCI.format = gpu::vk::evalTexelFormatInternal(vkTexture->_gpuObject.getTexelFormat()); - attachmentCI.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + attachmentCI.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; attachmentCI.imageSampleCount = VK_SAMPLE_COUNT_1_BIT; addAttachment(attachmentCI, vkTexture->_vkImage); //glNamedFramebufferTexture(_id, colorAttachments[unit], gltexture->_texture, 0); @@ -258,7 +259,7 @@ VkResult gpu::vk::VKFramebuffer::createFramebuffer() return VK_SUCCESS; } -bool gpu::vk::VKFramebuffer::checkStatus(gpu::vk::VKFramebuffer::FramebufferStatus target) const { +//bool gpu::vk::VKFramebuffer::checkStatus(gpu::vk::VKFramebuffer::FramebufferStatus target) const { // VKTODO /*switch (_status) { case GL_FRAMEBUFFER_COMPLETE: @@ -287,7 +288,7 @@ bool gpu::vk::VKFramebuffer::checkStatus(gpu::vk::VKFramebuffer::FramebufferStat } return false; */ -} +//} gpu::vk::VKFramebuffer::~VKFramebuffer() { auto backend = _backend.lock(); @@ -334,29 +335,9 @@ uint32_t gpu::vk::VKFramebuffer::addAttachment(VKAttachmentCreateInfo createinfo assert(aspectMask > 0); - /*VkImageCreateInfo image = vks::initializers::imageCreateInfo(); - image.imageType = VK_IMAGE_TYPE_2D; - image.format = createinfo.format; - image.extent.width = createinfo.width; - image.extent.height = createinfo.height; - image.extent.depth = 1; - image.mipLevels = 1; - image.arrayLayers = createinfo.layerCount; - image.samples = createinfo.imageSampleCount; - image.tiling = VK_IMAGE_TILING_OPTIMAL; - image.usage = createinfo.usage;*/ - VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; - // Create image for this attachment - /*VK_CHECK_RESULT(vkCreateImage(vulkanDevice->logicalDevice, &image, nullptr, &attachment.image)); - vkGetImageMemoryRequirements(vulkanDevice->logicalDevice, attachment.image, &memReqs); - memAlloc.allocationSize = memReqs.size; - memAlloc.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - // VKTODO: this may need to be changed to VMA - VK_CHECK_RESULT(vkAllocateMemory(vulkanDevice->logicalDevice, &memAlloc, nullptr, &attachment.memory)); - VK_CHECK_RESULT(vkBindImageMemory(vulkanDevice->logicalDevice, attachment.image, attachment.memory, 0));*/ attachment.image = image; attachment.subresourceRange = {}; @@ -385,11 +366,13 @@ uint32_t gpu::vk::VKFramebuffer::addAttachment(VKAttachmentCreateInfo createinfo // If not, final layout depends on attachment type if (attachment.hasDepth() || attachment.hasStencil()) { - attachment.description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; + //attachment.description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; + attachment.description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; // VKTODO: this is tricky, because it depends on what the image will be used for } else { - attachment.description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + //attachment.description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + attachment.description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // VKTODO: this is tricky, because it depends on what the image will be used for } attachments.push_back(attachment); diff --git a/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.h b/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.h index 0e5f993d15..6efce85573 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.h +++ b/libraries/gpu-vk/src/gpu/vk/VKFramebuffer.h @@ -102,12 +102,15 @@ class VKFramebuffer : public vk::VKObject { }; + // VKTODO: this can be removed in the future, it's redundant std::vector attachments; protected: - enum FramebufferStatus { VK_FRAMEBUFFER_COMPLETE } _status; + //VkImageLayout _currentLayout {VK_IMAGE_LAYOUT_UNDEFINED}; // Used by render passes. If it's VK_IMAGE_LAYOUT_UNDEFINED, then image will be cleared in the render pass + //enum FramebufferStatus { VK_FRAMEBUFFER_COMPLETE } _status; + virtual void update(); - bool checkStatus(FramebufferStatus target) const; + //bool checkStatus(FramebufferStatus target) const; VkResult createFramebuffer(); struct VKAttachmentCreateInfo { diff --git a/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp b/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp index 844f9f9fce..2c97effcfc 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp +++ b/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp @@ -100,7 +100,7 @@ Cache::Pipeline::PipelineLayout Cache::Pipeline::getPipelineAndDescriptorLayout( // Create the descriptor set layouts std::vector layouts; - if (!uniLayout.empty()) { + if (!uniLayout.empty() || !texLayout.empty() || !stoLayout.empty()) { VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCI = vks::initializers::descriptorSetLayoutCreateInfo(uniLayout.data(), uniLayout.size()); VkDescriptorSetLayout descriptorSetLayout; @@ -110,7 +110,8 @@ Cache::Pipeline::PipelineLayout Cache::Pipeline::getPipelineAndDescriptorLayout( layout.uniformLayout = descriptorSetLayout; } #if SEP_DESC - if (!texLayout.empty()) { + // Descriptor set needs to be created even if it's empty if later descriptor sets are not empty. + if (!texLayout.empty() || !stoLayout.empty()) { VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCI = vks::initializers::descriptorSetLayoutCreateInfo(texLayout.data(), texLayout.size()); VkDescriptorSetLayout descriptorSetLayout; diff --git a/libraries/gpu-vk/src/gpu/vk/VKTexture.cpp b/libraries/gpu-vk/src/gpu/vk/VKTexture.cpp index 58c48e783c..7a5d036cbb 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKTexture.cpp +++ b/libraries/gpu-vk/src/gpu/vk/VKTexture.cpp @@ -204,15 +204,13 @@ void VKAttachmentTexture::createTexture(VKBackend &backend) { || _gpuObject.getTexelFormat().getSemantic() == gpu::R11G11B10 || _gpuObject.getTexelFormat().getSemantic() == gpu::SRGB || _gpuObject.getTexelFormat().getSemantic() == gpu::SRGBA) { - imageCI.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + imageCI.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; } else if (_gpuObject.isDepthStencilRenderTarget()) { imageCI.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; } else { Q_ASSERT(false); } - auto device = _backend.lock()->getContext().device->logicalDevice; - // Create image for this attachment /*VK_CHECK_RESULT(vkCreateImage(device, &imageCI, nullptr, &_texture)); VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo(); @@ -292,7 +290,7 @@ VkDescriptorImageInfo VKAttachmentTexture::getDescriptorImageInfo() { VkDescriptorImageInfo result {}; result.sampler = _vkSampler; - result.imageLayout = _vkImageLayout; + result.imageLayout = _vkImageLayout; // VKTODO: this needs to be updated on blits and other image writes result.imageView = _vkImageView; return result; }; diff --git a/libraries/gpu-vk/src/gpu/vk/VKTexture.h b/libraries/gpu-vk/src/gpu/vk/VKTexture.h index 53923f66a2..5eaa24e170 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKTexture.h +++ b/libraries/gpu-vk/src/gpu/vk/VKTexture.h @@ -280,7 +280,7 @@ class VKAttachmentTexture : public VKFixedAllocationTexture { VkDescriptorImageInfo getDescriptorImageInfo() override; // VKTODO VkImageView _vkImageView { VK_NULL_HANDLE }; - VkImageLayout _vkImageLayout {}; // VKTODO + VkImageLayout _vkImageLayout {}; // VKTODO: this needs to be updated on blits and other image writes VkSampler _vkSampler { VK_NULL_HANDLE }; //VkImage _vkImage { VK_NULL_HANDLE }; //VkDeviceMemory _vkDeviceMemory{ VK_NULL_HANDLE }; @@ -306,7 +306,7 @@ class VKStrictResourceTexture: public VKFixedAllocationTexture { VkDescriptorImageInfo getDescriptorImageInfo() override; //VkImage _vkImage { VK_NULL_HANDLE }; VkImageView _vkImageView { VK_NULL_HANDLE }; - VkImageLayout _vkImageLayout {}; // VKTODO + VkImageLayout _vkImageLayout {}; VkSampler _vkSampler { VK_NULL_HANDLE }; // This need to be moved to VKFixedAllocationTexture and allocated in allocateStorage() //VkDeviceMemory _vkDeviceMemory{ VK_NULL_HANDLE }; @@ -343,7 +343,7 @@ class VKExternalTexture: public VKTexture { void postTransfer(VKBackend &backend) override; VkDescriptorImageInfo getDescriptorImageInfo() override; VkImageView _vkImageView { VK_NULL_HANDLE }; - VkImageLayout _vkImageLayout {}; // VKTODO + VkImageLayout _vkImageLayout {}; VkSampler _vkSampler { VK_NULL_HANDLE }; // Shared texture properties diff --git a/libraries/gpu/src/gpu/Texture.cpp b/libraries/gpu/src/gpu/Texture.cpp index 48159c5da5..68a5cd4c43 100644 --- a/libraries/gpu/src/gpu/Texture.cpp +++ b/libraries/gpu/src/gpu/Texture.cpp @@ -315,7 +315,7 @@ Texture::Size Texture::resize(Type type, const Element& texelFormat, uint16 widt } bool Texture::isColorRenderTarget() const { - return (_texelFormat.getSemantic() == gpu::RGBA); + return (_texelFormat.getSemantic() == gpu::RGBA); // TODO: this is wrong, especially since we use B10G11R11 render targets too } bool Texture::isDepthStencilRenderTarget() const { diff --git a/libraries/vk/src/vk/VulkanTools.cpp b/libraries/vk/src/vk/VulkanTools.cpp index 9fb42a30aa..5f10c0c195 100644 --- a/libraries/vk/src/vk/VulkanTools.cpp +++ b/libraries/vk/src/vk/VulkanTools.cpp @@ -290,24 +290,6 @@ namespace vks 1, &imageMemoryBarrier); } - // Fixed sub resource on first mip level and layer - void setImageLayout( - VkCommandBuffer cmdbuffer, - VkImage image, - VkImageAspectFlags aspectMask, - VkImageLayout oldImageLayout, - VkImageLayout newImageLayout, - VkPipelineStageFlags srcStageMask, - VkPipelineStageFlags dstStageMask) - { - VkImageSubresourceRange subresourceRange = {}; - subresourceRange.aspectMask = aspectMask; - subresourceRange.baseMipLevel = 0; - subresourceRange.levelCount = 1; - subresourceRange.layerCount = 1; - setImageLayout(cmdbuffer, image, oldImageLayout, newImageLayout, subresourceRange, srcStageMask, dstStageMask); - } - void insertImageMemoryBarrier( VkCommandBuffer cmdbuffer, VkImage image,