diff --git a/android/libraries/oculus/src/main/assets/shaders/present.vert b/android/libraries/oculus/src/main/assets/shaders/present.vert index dfd6b1412f4..510e517e10a 100644 --- a/android/libraries/oculus/src/main/assets/shaders/present.vert +++ b/android/libraries/oculus/src/main/assets/shaders/present.vert @@ -4,12 +4,21 @@ layout(location = 0) out vec4 vTexCoordLR; void main(void) { const float depth = 0.0; +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0), + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, depth, 1.0), vec4(1.0, -1.0, depth, 1.0), vec4(-1.0, 1.0, depth, 1.0), vec4(1.0, 1.0, depth, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; gl_Position = pos; vTexCoordLR.xy = pos.xy; diff --git a/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp b/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp index ed3a065b183..6eb4f51f069 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp +++ b/libraries/gpu-vk/src/gpu/vk/VKBackend.cpp @@ -226,9 +226,9 @@ void VKBackend::executeFrame(const FramePointer& frame) { if (batch.getName() == "SurfaceGeometryPass::run") { printf("SurfaceGeometryPass"); } - if (batch.getName() == "BlurGaussian::run") { + /*if (batch.getName() == "BlurGaussian::run") { continue; - } + }*/ cmdBeginLabel(commandBuffer, "batch:" + batch.getName(), glm::vec4{ 1, 1, 0, 1 }); const auto& commands = batch.getCommands(); const auto& offsets = batch.getCommandOffsets(); @@ -363,7 +363,9 @@ void VKBackend::executeFrame(const FramePointer& frame) { if (batch.getName() == "Resample::run") { _outputTexture = syncGPUObject(*_cache.pipelineState.framebuffer); } - + if (batch.getName() == "CompositeHUD") { + _outputTexture = syncGPUObject(*_cache.pipelineState.framebuffer); + } if (renderpassActive) { cmdEndLabel(commandBuffer); renderpassActive = false; @@ -1071,10 +1073,21 @@ void VKBackend::resetQueryStage() { } void VKBackend::updateRenderPass() { + // If framebuffer has changed, it means that render pass changed. _currentRenderPass is set to nullptr in such case. + if (_hasFramebufferChanged) { + Q_ASSERT(_currentVkRenderPass == nullptr); + // Current renderpass has ended and vkCmdEndRenderPass was already called so we cat set proper layouts here + // and avoid generating render pass twice. + if (_currentFramebuffer) { + updateAttachmentLayoutsAfterRenderPass(); + } + transitionAttachmentImageLayouts(*_cache.pipelineState.framebuffer); + } + // Retrieve from cache or create render pass. auto renderPass = _cache.pipelineState.getRenderPass(_context); auto framebuffer = syncGPUObject(*_cache.pipelineState.framebuffer); - // Current render pass is already up to date + // Current render pass is already up to date. // VKTODO: check if framebuffer has changed and if so update render pass too if (_currentVkRenderPass == renderPass && _currentVkFramebuffer == framebuffer->vkFramebuffer) { return; @@ -1084,12 +1097,19 @@ void VKBackend::updateRenderPass() { if (_currentVkRenderPass) { vkCmdEndRenderPass(_currentCommandBuffer); updateAttachmentLayoutsAfterRenderPass(); + transitionAttachmentImageLayouts(*_cache.pipelineState.framebuffer); } + // Input image layouts shouldn't affect render pass and always need to be done between transitionInputImageLayouts(); + // Render pass needs to be retrieved twice, since `updateAttachmentLayoutsAfterRenderPass` and `transitionAttachmentImageLayouts` + // can be called only once we know that renderpass ended and may change attachment image layouts and thus change render pass again. + renderPass = _cache.pipelineState.getRenderPass(_context); + _currentVkRenderPass = renderPass; _currentFramebuffer = _cache.pipelineState.framebuffer; _currentVkFramebuffer = framebuffer->vkFramebuffer; + _hasFramebufferChanged = false; auto renderPassBeginInfo = vks::initializers::renderPassBeginInfo(); renderPassBeginInfo.renderPass = renderPass; @@ -1370,6 +1390,9 @@ void VKBackend::renderPassDraw(const Batch& batch) { offset++; } resetRenderPass(); + if (_currentVkRenderPass) { + updateAttachmentLayoutsAfterRenderPass(); + } _currentVkRenderPass = VK_NULL_HANDLE; _currentFramebuffer = nullptr; // VKTODO: which other stages should be reset here? @@ -1855,6 +1878,10 @@ void VKBackend::FrameData::addGlUniform(size_t size, const void* data, size_t co VKBackend::FrameData::FrameData(VKBackend *backend) : _backend(backend) { createDescriptorPool(); + _cameraCorrectionBuffer.edit() = CameraCorrection(); + _cameraCorrectionBuffer._buffer->flush(); + _cameraCorrectionBufferIdentity.edit() = CameraCorrection(); + _cameraCorrectionBufferIdentity._buffer->flush(); } VKBackend::FrameData::~FrameData() { @@ -1964,20 +1991,51 @@ void VKBackend::transitionInputImageLayouts() { mipSubRange); attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } else if (attachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { - VkImageSubresourceRange mipSubRange = {}; - mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; - mipSubRange.baseMipLevel = 0; - mipSubRange.levelCount = 1; - mipSubRange.layerCount = 1; - vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage, - VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, - VK_ACCESS_SHADER_READ_BIT, - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, - VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO - mipSubRange); - attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + // Sometimes texture is used both as an input and as a depth stencil framebuffer attachment, we need to check for this + bool isAttachment = false; + auto &framebuffer = _cache.pipelineState.framebuffer; + if (framebuffer) { + auto depthStencilBuffer = _cache.pipelineState.framebuffer->getDepthStencilBuffer(); + if (depthStencilBuffer) { + auto depthStencilGpuObject = Backend::getGPUObject(*depthStencilBuffer); + if (depthStencilGpuObject) { + if (depthStencilGpuObject->_vkImage == attachmentTexture->_vkImage) { + isAttachment = true; + } + } + } + } + if (isAttachment) { + VkImageSubresourceRange mipSubRange = {}; + mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + mipSubRange.baseMipLevel = 0; + mipSubRange.levelCount = 1; + mipSubRange.layerCount = 1; + vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, + VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_GENERAL, // VKTODO: is here a better alyout for this use case? + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + mipSubRange); + attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_GENERAL; + } else { + VkImageSubresourceRange mipSubRange = {}; + mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + mipSubRange.baseMipLevel = 0; + mipSubRange.levelCount = 1; + mipSubRange.layerCount = 1; + vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, + VK_ACCESS_SHADER_READ_BIT, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO + mipSubRange); + attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + } }; } @@ -1994,12 +2052,13 @@ void VKBackend::transitionAttachmentImageLayouts(gpu::Framebuffer &framebuffer) continue; } auto attachmentTexture = dynamic_cast(gpuObject); - if (attachmentTexture) { + if (!attachmentTexture) { continue; } if (attachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { if (attachmentTexture->_gpuObject.isDepthStencilRenderTarget()) { + // VKTODO: Check if the same depth render target is used as one of the inputs, if so then don't update it here VkImageSubresourceRange mipSubRange = {}; mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; mipSubRange.baseMipLevel = 0; @@ -2039,11 +2098,11 @@ void VKBackend::transitionAttachmentImageLayouts(gpu::Framebuffer &framebuffer) return; } auto attachmentTexture = dynamic_cast(gpuObject); - if (attachmentTexture) { + if (!attachmentTexture) { return; } if (attachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { - Q_ASSERT(attachmentTexture->_gpuObject.isDepthStencilRenderTarget()); + Q_ASSERT(attachmentTexture->_gpuObject.isDepthStencilRenderTarget()); VkImageSubresourceRange mipSubRange = {}; mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; mipSubRange.baseMipLevel = 0; @@ -2259,6 +2318,14 @@ void VKBackend::updateTransform(const gpu::Batch& batch) { vkCmdBindVertexBuffers(_currentCommandBuffer, gpu::Stream::DRAW_CALL_INFO, 1, &_currentFrame->_drawCallInfoBuffer->buffer, &vkOffset); //glBindVertexBuffer(gpu::Stream::DRAW_CALL_INFO, _transform._drawCallInfoBuffer, (GLintptr)_transform._drawCallInfoOffsets[batch._currentNamedCall], 2 * sizeof(GLushort)); } + _resource._buffers; + + // VKTODO: camera correction + auto* cameraCorrectionObject = syncGPUObject(*_currentFrame->_cameraCorrectionBuffer._buffer); + Q_ASSERT(cameraCorrectionObject); + _uniform._buffers[gpu::slot::buffer::CameraCorrection].buffer = _currentFrame->_cameraCorrectionBuffer._buffer.get(); + _uniform._buffers[gpu::slot::buffer::CameraCorrection].offset = _currentFrame->_cameraCorrectionBuffer._offset; + _uniform._buffers[gpu::slot::buffer::CameraCorrection].size = _currentFrame->_cameraCorrectionBuffer._size; } void VKBackend::updatePipeline() { @@ -2539,9 +2606,9 @@ void VKBackend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffs void VKBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) { auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint); _cache.pipelineState.setFramebuffer(framebuffer); - if (framebuffer) { - transitionAttachmentImageLayouts(*framebuffer); - } + + resetRenderPass(); + _hasFramebufferChanged = true; // VKTODO? /*auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint); if (_output._framebuffer != framebuffer) { @@ -2561,6 +2628,8 @@ void VKBackend::do_setFramebufferSwapChain(const Batch& batch, size_t paramOffse auto index = batch._params[paramOffset + 1]._uint; const auto& framebuffer = swapChain->get(index); _cache.pipelineState.setFramebuffer(framebuffer); + resetRenderPass(); + _hasFramebufferChanged = true; } } @@ -2623,12 +2692,6 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { } else { attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; } - if ((masks & Framebuffer::BUFFER_DEPTH) && (masks & Framebuffer::BUFFER_STENCIL)) { - attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - } else { - attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; - } - attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; // Texture state needs to be updated auto depthStencil = framebuffer->getDepthStencilBuffer(); Q_ASSERT(depthStencil); @@ -2636,9 +2699,25 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { Q_ASSERT(gpuDepthStencil); auto depthStencilAttachmentTexture = dynamic_cast(gpuDepthStencil); Q_ASSERT(depthStencilAttachmentTexture); - depthStencilAttachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + + if ((masks & Framebuffer::BUFFER_DEPTH) && (masks & Framebuffer::BUFFER_STENCIL)) { + attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + depthStencilAttachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + } else { + if (depthStencilAttachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_GENERAL) { + attachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL; + attachment.finalLayout = VK_IMAGE_LAYOUT_GENERAL; + depthReference.layout = VK_IMAGE_LAYOUT_GENERAL; + } else { + attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + depthStencilAttachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + } + } depthReference.attachment = (uint32_t)(attachments.size()); - depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; } else { clearValues.push_back(VkClearValue{.color = VkClearColorValue{.float32 = { color.x, color.y, color.z, color.w }}}); if (masks & Framebuffer::BUFFER_COLORS) { diff --git a/libraries/gpu-vk/src/gpu/vk/VKBackend.h b/libraries/gpu-vk/src/gpu/vk/VKBackend.h index cba669cc163..805dd35eb19 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKBackend.h +++ b/libraries/gpu-vk/src/gpu/vk/VKBackend.h @@ -263,6 +263,7 @@ class VKBackend : public Backend, public std::enable_shared_from_this VkRenderPass _currentVkRenderPass{ VK_NULL_HANDLE }; gpu::FramebufferReference _currentFramebuffer{ nullptr }; // Framebuffer used in currently happening render pass VkFramebuffer _currentVkFramebuffer{ VK_NULL_HANDLE }; // Framebuffer used in currently happening render pass + bool _hasFramebufferChanged {false}; // Set to true when batch calls setFramebuffer command. Used to end render pass and update input image layouts. // Checks if renderpass change is needed and changes it if required void updateRenderPass(); void updateAttachmentLayoutsAfterRenderPass(); @@ -287,6 +288,9 @@ class VKBackend : public Backend, public std::enable_shared_from_this std::unordered_map _glUniformOffsetMap; size_t _glUniformBufferPosition {0}; // Position where data from next glUniform... call is placed + BufferView _cameraCorrectionBuffer { gpu::BufferView(std::make_shared(gpu::Buffer::UniformBuffer, sizeof(CameraCorrection), nullptr )) }; + BufferView _cameraCorrectionBufferIdentity { gpu::BufferView(std::make_shared(gpu::Buffer::UniformBuffer, sizeof(CameraCorrection), nullptr )) }; + void addGlUniform(size_t size, const void *data, size_t commandIndex); FrameData(VKBackend *backend); @@ -452,8 +456,8 @@ class VKBackend : public Backend, public std::enable_shared_from_this // VKTODO: quick hack VKFramebuffer *_outputTexture{ nullptr }; protected: - void transitionInputImageLayouts(); - void transitionAttachmentImageLayouts(gpu::Framebuffer &framebuffer); + void transitionInputImageLayouts(); // This can be called only form `updateRenderPass` + void transitionAttachmentImageLayouts(gpu::Framebuffer &framebuffer); // This can be called only form `updateRenderPass` // These are filled by syncGPUObject() calls, and are needed to track backend objects so that they can be destroyed before // destroying backend. diff --git a/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp b/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp index abc65d61c07..de3c3163203 100644 --- a/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp +++ b/libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp @@ -229,16 +229,25 @@ VkRenderPass Cache::Pipeline::getRenderPass(const vks::Context& context) { if (isDepthStencilFormat(formatAndLayout.first)) { if (!attachmentTexture || attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_UNDEFINED) { attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; } else { - attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + if (attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_GENERAL) { + attachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL; + attachment.finalLayout = VK_IMAGE_LAYOUT_GENERAL; + depthReference.layout = VK_IMAGE_LAYOUT_GENERAL; + } else { + Q_ASSERT(attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); + attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + } } - attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; depthReference.attachment = (uint32_t)(attachments.size()); - depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; } else { if (!attachmentTexture || attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_UNDEFINED) { attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; } else { + Q_ASSERT(attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); attachment.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; } attachment.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; diff --git a/libraries/gpu/src/gpu/Buffer.h b/libraries/gpu/src/gpu/Buffer.h index 1868a221839..8747084e054 100644 --- a/libraries/gpu/src/gpu/Buffer.h +++ b/libraries/gpu/src/gpu/Buffer.h @@ -182,6 +182,7 @@ class Buffer : public Resource { // FIXME find a more generic way to do this. friend class ::gpu::vk::VKBuffer; + friend class ::gpu::vk::VKBackend; friend class gl::GLBackend; friend class gl::GLBuffer; friend class gl41::GL41Buffer; diff --git a/libraries/gpu/src/gpu/DrawTexcoordRectTransformUnitQuad.slv b/libraries/gpu/src/gpu/DrawTexcoordRectTransformUnitQuad.slv index 30c478f3eb7..ba0eed1a22a 100755 --- a/libraries/gpu/src/gpu/DrawTexcoordRectTransformUnitQuad.slv +++ b/libraries/gpu/src/gpu/DrawTexcoordRectTransformUnitQuad.slv @@ -32,12 +32,21 @@ UNIFORM_BUFFER(0, texcoordRectBuffer) { OUTPUT(0, vec2, varTexCoord0); void main(void) { +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, 0.0, 1.0), + vec4(1.0, 1.0, 0.0, 1.0), + vec4(-1.0, -1.0, 0.0, 1.0), + vec4(1.0, -1.0, 0.0, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, 0.0, 1.0), vec4(1.0, -1.0, 0.0, 1.0), vec4(-1.0, 1.0, 0.0, 1.0), vec4(1.0, 1.0, 0.0, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; // standard transform diff --git a/libraries/gpu/src/gpu/DrawTransformUnitQuad.slv b/libraries/gpu/src/gpu/DrawTransformUnitQuad.slv index 70414f57bd1..2f23299610c 100755 --- a/libraries/gpu/src/gpu/DrawTransformUnitQuad.slv +++ b/libraries/gpu/src/gpu/DrawTransformUnitQuad.slv @@ -21,12 +21,21 @@ OUTPUT(0, vec2, varTexCoord0); void main(void) { +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, 0.0, 1.0), + vec4(1.0, 1.0, 0.0, 1.0), + vec4(-1.0, -1.0, 0.0, 1.0), + vec4(1.0, -1.0, 0.0, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, 0.0, 1.0), vec4(1.0, -1.0, 0.0, 1.0), vec4(-1.0, 1.0, 0.0, 1.0), vec4(1.0, 1.0, 0.0, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; // standard transform diff --git a/libraries/gpu/src/gpu/DrawUnitQuadTexcoord.slv b/libraries/gpu/src/gpu/DrawUnitQuadTexcoord.slv index 34d6394c063..311347baf3a 100644 --- a/libraries/gpu/src/gpu/DrawUnitQuadTexcoord.slv +++ b/libraries/gpu/src/gpu/DrawUnitQuadTexcoord.slv @@ -17,12 +17,21 @@ OUTPUT(0, vec2, varTexCoord0); void main(void) { const float depth = 1.0; +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0), + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, depth, 1.0), vec4(1.0, -1.0, depth, 1.0), vec4(-1.0, 1.0, depth, 1.0), vec4(1.0, 1.0, depth, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; varTexCoord0 = (pos.xy + 1.0) * 0.5; diff --git a/libraries/gpu/src/gpu/DrawViewportQuadTransformTexcoord.slv b/libraries/gpu/src/gpu/DrawViewportQuadTransformTexcoord.slv index 855d95f5edc..544ce6e75a6 100755 --- a/libraries/gpu/src/gpu/DrawViewportQuadTransformTexcoord.slv +++ b/libraries/gpu/src/gpu/DrawViewportQuadTransformTexcoord.slv @@ -21,12 +21,21 @@ OUTPUT(0, vec2, varTexCoord0); void main(void) { +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, 0.0, 1.0), + vec4(1.0, 1.0, 0.0, 1.0), + vec4(-1.0, -1.0, 0.0, 1.0), + vec4(1.0, -1.0, 0.0, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, 0.0, 1.0), vec4(1.0, -1.0, 0.0, 1.0), vec4(-1.0, 1.0, 0.0, 1.0), vec4(1.0, 1.0, 0.0, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; // standard transform but applied to the Texcoord diff --git a/libraries/graphics/src/graphics/skybox.slv b/libraries/graphics/src/graphics/skybox.slv index 1df9a455c2d..bd5ea163189 100755 --- a/libraries/graphics/src/graphics/skybox.slv +++ b/libraries/graphics/src/graphics/skybox.slv @@ -17,12 +17,21 @@ OUTPUT(0, vec3, _normal); void main(void) { const float depth = 0.0; - const vec4 UNIT_QUAD[4] = vec4[4]( - vec4(-1.0, -1.0, depth, 1.0), - vec4(1.0, -1.0, depth, 1.0), - vec4(-1.0, 1.0, depth, 1.0), - vec4(1.0, 1.0, depth, 1.0) - ); + #ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0), + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0) + ); + #else + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0), + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0) + ); + #endif vec4 inPosition = UNIT_QUAD[gl_VertexID]; // standard transform diff --git a/libraries/render-utils/src/DeferredFrameTransform.cpp b/libraries/render-utils/src/DeferredFrameTransform.cpp index 27a8b30801f..02bef93e8d4 100644 --- a/libraries/render-utils/src/DeferredFrameTransform.cpp +++ b/libraries/render-utils/src/DeferredFrameTransform.cpp @@ -37,6 +37,8 @@ void DeferredFrameTransform::update(RenderArgs* args, glm::vec2 jitter) { cameraTransform.getInverseMatrix(frameTransformBuffer.view); args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono); + // VKTODO: This breaks things for OpenGL. + frameTransformBuffer.projectionMono = glm::scale(frameTransformBuffer.projectionMono, glm::vec3(1.0f, -1.0f, 1.0f)); // There may be some sort of mismatch here if the viewport size isn't the same as the frame buffer size as // jitter is normalized by frame buffer size in TransformCamera. But we should be safe. diff --git a/libraries/render-utils/src/deferred_light.slv b/libraries/render-utils/src/deferred_light.slv index 51c0fa499ae..ca342797f5f 100644 --- a/libraries/render-utils/src/deferred_light.slv +++ b/libraries/render-utils/src/deferred_light.slv @@ -18,12 +18,21 @@ OUTPUT(RENDER_UTILS_ATTR_TEXCOORD01, vec4, _texCoord01); void main(void) { const float depth = 1.0; + #ifdef VULKAN + const mat4 UNIT_QUAD = mat4( + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0), + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0) + ); + #else const mat4 UNIT_QUAD = mat4( vec4(-1.0, -1.0, depth, 1.0), vec4(1.0, -1.0, depth, 1.0), vec4(-1.0, 1.0, depth, 1.0), vec4(1.0, 1.0, depth, 1.0) ); + #endif vec4 pos = UNIT_QUAD[gl_VertexID]; _texCoord01 = vec4((pos.xy + 1.0) * 0.5, 0.0, 0.0); diff --git a/libraries/render-utils/src/deferred_light_limited.slv b/libraries/render-utils/src/deferred_light_limited.slv index a09c71178fa..62e027298d8 100644 --- a/libraries/render-utils/src/deferred_light_limited.slv +++ b/libraries/render-utils/src/deferred_light_limited.slv @@ -45,12 +45,21 @@ void main(void) { _texCoord01 = vec4(projected.xy, 0.0, 1.0) * gl_Position.w; } else { const float depth = -1.0; //Draw at near plane +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0), + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, depth, 1.0), vec4(1.0, -1.0, depth, 1.0), vec4(-1.0, 1.0, depth, 1.0), vec4(1.0, 1.0, depth, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; diff --git a/libraries/render-utils/src/ssao_bilateralBlur.slv b/libraries/render-utils/src/ssao_bilateralBlur.slv index d45fdf83601..67cd9f360ad 100644 --- a/libraries/render-utils/src/ssao_bilateralBlur.slv +++ b/libraries/render-utils/src/ssao_bilateralBlur.slv @@ -21,12 +21,21 @@ layout(location=0) out vec4 varTexCoord0; void main(void) { +#ifdef VULKAN + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, 1.0, 0.0, 1.0), + vec4(1.0, 1.0, 0.0, 1.0), + vec4(-1.0, -1.0, 0.0, 1.0), + vec4(1.0, -1.0, 0.0, 1.0) + ); +#else const vec4 UNIT_QUAD[4] = vec4[4]( vec4(-1.0, -1.0, 0.0, 1.0), vec4(1.0, -1.0, 0.0, 1.0), vec4(-1.0, 1.0, 0.0, 1.0), vec4(1.0, 1.0, 0.0, 1.0) ); +#endif vec4 pos = UNIT_QUAD[gl_VertexID]; // standard transform but applied to the Texcoord diff --git a/libraries/render/src/render/drawItemStatus.slv b/libraries/render/src/render/drawItemStatus.slv index b9d2bfedb09..9c995040843 100644 --- a/libraries/render/src/render/drawItemStatus.slv +++ b/libraries/render/src/render/drawItemStatus.slv @@ -74,7 +74,7 @@ void main(void) { vec4(-1.0, 1.0, 0.0, 1.0), vec4(1.0, -1.0, 0.0, 1.0), vec4(1.0, 1.0, 0.0, 1.0) - ); + ); // VKTODO: does this need to be inverted? const vec2 ICON_PIXEL_SIZE = vec2(36, 36); const vec2 MARGIN_PIXEL_SIZE = vec2(2, 2); diff --git a/libraries/vk/src/vk/VulkanDebug.cpp b/libraries/vk/src/vk/VulkanDebug.cpp index 62bac862b0c..f9f89f03f30 100644 --- a/libraries/vk/src/vk/VulkanDebug.cpp +++ b/libraries/vk/src/vk/VulkanDebug.cpp @@ -73,7 +73,7 @@ namespace vks } else { std::cout << debugMessage.str() << "\n\n"; } - fflush(stdout); + fflush(stdout); #endif diff --git a/libraries/vk/src/vk/VulkanSwapChain.cpp b/libraries/vk/src/vk/VulkanSwapChain.cpp index 4a0cbfb90cc..f3e11f2c641 100644 --- a/libraries/vk/src/vk/VulkanSwapChain.cpp +++ b/libraries/vk/src/vk/VulkanSwapChain.cpp @@ -186,9 +186,10 @@ void VulkanSwapChain::initSurface(screen_context_t screen_context, screen_window // Initialize the format to the first one returned by the implementation in case we can't find one of the preffered formats VkSurfaceFormatKHR selectedFormat = surfaceFormats[0]; std::vector preferredImageFormats = { - VK_FORMAT_B8G8R8A8_UNORM, + VK_FORMAT_B8G8R8A8_SRGB + /*VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM, - VK_FORMAT_A8B8G8R8_UNORM_PACK32 + VK_FORMAT_A8B8G8R8_UNORM_PACK32 */ }; for (auto& availableFormat : surfaceFormats) {