35 | 35 |
namespace Diligent
|
36 | 36 |
{
|
37 | 37 |
|
38 | |
SwapChainVkImpl::SwapChainVkImpl(IReferenceCounters* pRefCounters,
|
39 | |
const SwapChainDesc& SCDesc,
|
40 | |
RenderDeviceVkImpl* pRenderDeviceVk,
|
41 | |
DeviceContextVkImpl* pDeviceContextVk,
|
42 | |
const NativeWindow& Window) :
|
|
38 |
SwapChainVkImpl::SwapChainVkImpl(IReferenceCounters* pRefCounters,
|
|
39 |
const SwapChainDesc& SCDesc,
|
|
40 |
RenderDeviceVkImpl* pRenderDeviceVk,
|
|
41 |
DeviceContextVkImpl* pDeviceContextVk,
|
|
42 |
uint32_t SwapChainImageCount,
|
|
43 |
const VkImage* pSwapChainImages,
|
|
44 |
SwapChainImageCallbacks SwapChainCallbacks) :
|
43 | 45 |
// clang-format off
|
44 | 46 |
TSwapChainBase {pRefCounters, pRenderDeviceVk, pDeviceContextVk, SCDesc},
|
45 | |
m_Window {Window},
|
46 | 47 |
m_VulkanInstance {pRenderDeviceVk->GetVulkanInstance()},
|
47 | |
m_DesiredBufferCount {SCDesc.BufferCount},
|
48 | 48 |
m_pBackBufferRTV (STD_ALLOCATOR_RAW_MEM(RefCntAutoPtr<ITextureView>, GetRawAllocator(), "Allocator for vector<RefCntAutoPtr<ITextureView>>")),
|
49 | 49 |
m_SwapChainImagesInitialized (STD_ALLOCATOR_RAW_MEM(bool, GetRawAllocator(), "Allocator for vector<bool>")),
|
50 | |
m_ImageAcquiredFenceSubmitted(STD_ALLOCATOR_RAW_MEM(bool, GetRawAllocator(), "Allocator for vector<bool>"))
|
|
50 |
m_Cookie (SwapChainCallbacks.Cookie),
|
|
51 |
m_AcquireImageCallback (SwapChainCallbacks.ImageCallbackAcquire),
|
|
52 |
m_ReleaseImageCallback (SwapChainCallbacks.ImageCallbackRelease)
|
51 | 53 |
// clang-format on
|
52 | 54 |
{
|
53 | |
CreateSurface();
|
54 | |
CreateVulkanSwapChain();
|
55 | |
InitBuffersAndViews();
|
56 | |
auto res = AcquireNextImage(pDeviceContextVk);
|
57 | |
DEV_CHECK_ERR(res == VK_SUCCESS, "Failed to acquire next image for the newly created swap chain");
|
58 | |
(void)res;
|
59 | |
}
|
60 | |
|
61 | |
void SwapChainVkImpl::CreateSurface()
|
62 | |
{
|
63 | |
if (m_VkSurface != VK_NULL_HANDLE)
|
64 | |
{
|
65 | |
vkDestroySurfaceKHR(m_VulkanInstance->GetVkInstance(), m_VkSurface, NULL);
|
66 | |
m_VkSurface = VK_NULL_HANDLE;
|
67 | |
}
|
68 | |
|
69 | |
// Create OS-specific surface
|
70 | |
VkResult err = VK_ERROR_INITIALIZATION_FAILED;
|
71 | |
#if defined(VK_USE_PLATFORM_WIN32_KHR)
|
72 | |
if (m_Window.hWnd != NULL)
|
73 | |
{
|
74 | |
VkWin32SurfaceCreateInfoKHR surfaceCreateInfo = {};
|
75 | |
|
76 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
|
77 | |
surfaceCreateInfo.hinstance = GetModuleHandle(NULL);
|
78 | |
surfaceCreateInfo.hwnd = (HWND)m_Window.hWnd;
|
79 | |
|
80 | |
err = vkCreateWin32SurfaceKHR(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, nullptr, &m_VkSurface);
|
81 | |
}
|
82 | |
#elif defined(VK_USE_PLATFORM_ANDROID_KHR)
|
83 | |
if (m_Window.pAWindow != nullptr)
|
84 | |
{
|
85 | |
VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo = {};
|
86 | |
|
87 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
|
88 | |
surfaceCreateInfo.window = (ANativeWindow*)m_Window.pAWindow;
|
89 | |
|
90 | |
err = vkCreateAndroidSurfaceKHR(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, NULL, &m_VkSurface);
|
91 | |
}
|
92 | |
#elif defined(VK_USE_PLATFORM_IOS_MVK)
|
93 | |
if (m_Window.pCALayer != nullptr)
|
94 | |
{
|
95 | |
VkIOSSurfaceCreateInfoMVK surfaceCreateInfo = {};
|
96 | |
|
97 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK;
|
98 | |
surfaceCreateInfo.pView = m_Window.pCALayer;
|
99 | |
|
100 | |
err = vkCreateIOSSurfaceMVK(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, nullptr, &m_VkSurface);
|
101 | |
}
|
102 | |
#elif defined(VK_USE_PLATFORM_MACOS_MVK)
|
103 | |
if (m_Window.pNSView != nullptr)
|
104 | |
{
|
105 | |
VkMacOSSurfaceCreateInfoMVK surfaceCreateInfo = {};
|
106 | |
|
107 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK;
|
108 | |
surfaceCreateInfo.pView = m_Window.pNSView;
|
109 | |
|
110 | |
err = vkCreateMacOSSurfaceMVK(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, NULL, &m_VkSurface);
|
111 | |
}
|
112 | |
#elif defined(VK_USE_PLATFORM_WAYLAND_KHR)
|
113 | |
if (m_Window.pDisplay != nullptr)
|
114 | |
{
|
115 | |
VkWaylandSurfaceCreateInfoKHR surfaceCreateInfo = {};
|
116 | |
|
117 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
|
118 | |
surfaceCreateInfo.display = reinterpret_cast<struct wl_display*>(m_Window.pDisplay);
|
119 | |
surfaceCreateInfo.Surface = reinterpret_cast<struct wl_surface*>(nullptr);
|
120 | |
|
121 | |
err = vkCreateWaylandSurfaceKHR(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, nullptr, &m_VkSurface);
|
122 | |
}
|
123 | |
#elif defined(VK_USE_PLATFORM_XCB_KHR) || defined(VK_USE_PLATFORM_XLIB_KHR)
|
124 | |
|
125 | |
# if defined(VK_USE_PLATFORM_XCB_KHR)
|
126 | |
if (m_Window.pXCBConnection != nullptr && m_Window.WindowId != 0)
|
127 | |
{
|
128 | |
VkXcbSurfaceCreateInfoKHR surfaceCreateInfo = {};
|
129 | |
|
130 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
|
131 | |
surfaceCreateInfo.connection = reinterpret_cast<xcb_connection_t*>(m_Window.pXCBConnection);
|
132 | |
surfaceCreateInfo.window = m_Window.WindowId;
|
133 | |
|
134 | |
err = vkCreateXcbSurfaceKHR(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, nullptr, &m_VkSurface);
|
135 | |
}
|
136 | |
# endif
|
137 | |
|
138 | |
# if defined(VK_USE_PLATFORM_XLIB_KHR)
|
139 | |
if ((m_Window.pDisplay != nullptr && m_Window.WindowId != 0) && m_VkSurface == VK_NULL_HANDLE)
|
140 | |
{
|
141 | |
VkXlibSurfaceCreateInfoKHR surfaceCreateInfo = {};
|
142 | |
|
143 | |
surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
|
144 | |
surfaceCreateInfo.dpy = reinterpret_cast<Display*>(m_Window.pDisplay);
|
145 | |
surfaceCreateInfo.window = m_Window.WindowId;
|
146 | |
|
147 | |
err = vkCreateXlibSurfaceKHR(m_VulkanInstance->GetVkInstance(), &surfaceCreateInfo, nullptr, &m_VkSurface);
|
148 | |
}
|
149 | |
# endif
|
150 | |
|
151 | |
#endif
|
152 | |
|
153 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to create OS-specific surface");
|
154 | |
|
155 | |
auto* pRenderDeviceVk = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>();
|
156 | |
const auto& PhysicalDevice = pRenderDeviceVk->GetPhysicalDevice();
|
157 | |
auto& CmdQueueVK = pRenderDeviceVk->GetCommandQueue(0);
|
158 | |
auto QueueFamilyIndex = CmdQueueVK.GetQueueFamilyIndex();
|
159 | |
if (!PhysicalDevice.CheckPresentSupport(QueueFamilyIndex, m_VkSurface))
|
160 | |
{
|
161 | |
LOG_ERROR_AND_THROW("Selected physical device does not support present capability.\n"
|
162 | |
"There could be few ways to mitigate this problem. One is to try to find another queue that supports present, but does not support graphics and compute capabilities."
|
163 | |
"Another way is to find another physical device that exposes queue family that supports present and graphics capability. Neither apporach is currently implemented in Diligent Engine.");
|
164 | |
}
|
165 | |
}
|
166 | |
|
167 | |
void SwapChainVkImpl::CreateVulkanSwapChain()
|
168 | |
{
|
169 | |
auto* pRenderDeviceVk = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>();
|
170 | |
const auto& PhysicalDevice = pRenderDeviceVk->GetPhysicalDevice();
|
171 | |
auto vkDeviceHandle = PhysicalDevice.GetVkDeviceHandle();
|
172 | |
// Get the list of VkFormats that are supported:
|
173 | |
uint32_t formatCount = 0;
|
174 | |
|
175 | |
auto err = vkGetPhysicalDeviceSurfaceFormatsKHR(vkDeviceHandle, m_VkSurface, &formatCount, NULL);
|
176 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to query number of supported formats");
|
177 | |
VERIFY_EXPR(formatCount > 0);
|
178 | |
std::vector<VkSurfaceFormatKHR> SupportedFormats(formatCount);
|
179 | |
err = vkGetPhysicalDeviceSurfaceFormatsKHR(vkDeviceHandle, m_VkSurface, &formatCount, SupportedFormats.data());
|
180 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to query supported format properties");
|
181 | |
VERIFY_EXPR(formatCount == SupportedFormats.size());
|
182 | |
m_VkColorFormat = TexFormatToVkFormat(m_SwapChainDesc.ColorBufferFormat);
|
183 | |
|
184 | |
VkColorSpaceKHR ColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
|
185 | |
if (formatCount == 1 && SupportedFormats[0].format == VK_FORMAT_UNDEFINED)
|
186 | |
{
|
187 | |
// If the format list includes just one entry of VK_FORMAT_UNDEFINED,
|
188 | |
// the surface has no preferred format. Otherwise, at least one
|
189 | |
// supported format will be returned.
|
190 | |
|
191 | |
// Do nothing
|
192 | |
}
|
193 | |
else
|
194 | |
{
|
195 | |
bool FmtFound = false;
|
196 | |
for (const auto& SrfFmt : SupportedFormats)
|
197 | |
{
|
198 | |
if (SrfFmt.format == m_VkColorFormat)
|
199 | |
{
|
200 | |
FmtFound = true;
|
201 | |
ColorSpace = SrfFmt.colorSpace;
|
202 | |
break;
|
203 | |
}
|
204 | |
}
|
205 | |
if (!FmtFound)
|
206 | |
{
|
207 | |
VkFormat VkReplacementColorFormat = VK_FORMAT_UNDEFINED;
|
208 | |
switch (m_VkColorFormat)
|
209 | |
{
|
210 | |
// clang-format off
|
211 | |
case VK_FORMAT_R8G8B8A8_UNORM: VkReplacementColorFormat = VK_FORMAT_B8G8R8A8_UNORM; break;
|
212 | |
case VK_FORMAT_B8G8R8A8_UNORM: VkReplacementColorFormat = VK_FORMAT_R8G8B8A8_UNORM; break;
|
213 | |
case VK_FORMAT_B8G8R8A8_SRGB: VkReplacementColorFormat = VK_FORMAT_R8G8B8A8_SRGB; break;
|
214 | |
case VK_FORMAT_R8G8B8A8_SRGB: VkReplacementColorFormat = VK_FORMAT_B8G8R8A8_SRGB; break;
|
215 | |
default: VkReplacementColorFormat = VK_FORMAT_UNDEFINED;
|
216 | |
// clang-format on
|
217 | |
}
|
218 | |
|
219 | |
bool ReplacementFmtFound = false;
|
220 | |
for (const auto& SrfFmt : SupportedFormats)
|
221 | |
{
|
222 | |
if (SrfFmt.format == VkReplacementColorFormat)
|
223 | |
{
|
224 | |
ReplacementFmtFound = true;
|
225 | |
ColorSpace = SrfFmt.colorSpace;
|
226 | |
break;
|
227 | |
}
|
228 | |
}
|
229 | |
|
230 | |
if (ReplacementFmtFound)
|
231 | |
{
|
232 | |
m_VkColorFormat = VkReplacementColorFormat;
|
233 | |
auto NewColorBufferFormat = VkFormatToTexFormat(VkReplacementColorFormat);
|
234 | |
LOG_INFO_MESSAGE("Requested color buffer format ", GetTextureFormatAttribs(m_SwapChainDesc.ColorBufferFormat).Name, " is not supported by the surface and will be replaced with ", GetTextureFormatAttribs(NewColorBufferFormat).Name);
|
235 | |
m_SwapChainDesc.ColorBufferFormat = NewColorBufferFormat;
|
236 | |
}
|
237 | |
else
|
238 | |
{
|
239 | |
LOG_WARNING_MESSAGE("Requested color buffer format ", GetTextureFormatAttribs(m_SwapChainDesc.ColorBufferFormat).Name, "is not supported by the surface");
|
240 | |
}
|
241 | |
}
|
242 | |
}
|
243 | |
|
244 | |
VkSurfaceCapabilitiesKHR surfCapabilities = {};
|
245 | |
|
246 | |
err = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(vkDeviceHandle, m_VkSurface, &surfCapabilities);
|
247 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to query physical device surface capabilities");
|
248 | |
|
249 | |
uint32_t presentModeCount = 0;
|
250 | |
|
251 | |
err = vkGetPhysicalDeviceSurfacePresentModesKHR(vkDeviceHandle, m_VkSurface, &presentModeCount, NULL);
|
252 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to query surface present mode count");
|
253 | |
VERIFY_EXPR(presentModeCount > 0);
|
254 | |
std::vector<VkPresentModeKHR> presentModes(presentModeCount);
|
255 | |
err = vkGetPhysicalDeviceSurfacePresentModesKHR(vkDeviceHandle, m_VkSurface, &presentModeCount, presentModes.data());
|
256 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to query surface present modes");
|
257 | |
VERIFY_EXPR(presentModeCount == presentModes.size());
|
258 | |
|
259 | |
|
260 | |
VkSurfaceTransformFlagBitsKHR vkPreTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
|
261 | |
if (m_DesiredPreTransform != SURFACE_TRANSFORM_OPTIMAL)
|
262 | |
{
|
263 | |
vkPreTransform = SurfaceTransformToVkSurfaceTransformFlag(m_DesiredPreTransform);
|
264 | |
if ((surfCapabilities.supportedTransforms & vkPreTransform) != 0)
|
265 | |
{
|
266 | |
m_SwapChainDesc.PreTransform = m_DesiredPreTransform;
|
267 | |
}
|
268 | |
else
|
269 | |
{
|
270 | |
LOG_WARNING_MESSAGE(GetSurfaceTransformString(m_DesiredPreTransform),
|
271 | |
" is not supported by the presentation engine. Optimal surface transform will be used instead."
|
272 | |
" Query the swap chain description to get the actual surface transform.");
|
273 | |
m_DesiredPreTransform = SURFACE_TRANSFORM_OPTIMAL;
|
274 | |
}
|
275 | |
}
|
276 | |
|
277 | |
if (m_DesiredPreTransform == SURFACE_TRANSFORM_OPTIMAL)
|
278 | |
{
|
279 | |
// Use current surface transform to avoid extra cost of presenting the image.
|
280 | |
// If preTransform does not match the currentTransform value returned by vkGetPhysicalDeviceSurfaceCapabilitiesKHR,
|
281 | |
// the presentation engine will transform the image content as part of the presentation operation.
|
282 | |
// https://android-developers.googleblog.com/2020/02/handling-device-orientation-efficiently.html
|
283 | |
// https://community.arm.com/developer/tools-software/graphics/b/blog/posts/appropriate-use-of-surface-rotation
|
284 | |
vkPreTransform = surfCapabilities.currentTransform;
|
285 | |
m_SwapChainDesc.PreTransform = VkSurfaceTransformFlagToSurfaceTransform(vkPreTransform);
|
286 | |
LOG_INFO_MESSAGE("Using ", GetSurfaceTransformString(m_SwapChainDesc.PreTransform), " swap chain pretransform");
|
287 | |
}
|
288 | |
|
289 | |
VkExtent2D swapchainExtent = {};
|
290 | |
// width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF.
|
291 | |
if (surfCapabilities.currentExtent.width == 0xFFFFFFFF && m_SwapChainDesc.Width != 0 && m_SwapChainDesc.Height != 0)
|
292 | |
{
|
293 | |
// If the surface size is undefined, the size is set to
|
294 | |
// the size of the images requested.
|
295 | |
swapchainExtent.width = std::min(std::max(m_SwapChainDesc.Width, surfCapabilities.minImageExtent.width), surfCapabilities.maxImageExtent.width);
|
296 | |
swapchainExtent.height = std::min(std::max(m_SwapChainDesc.Height, surfCapabilities.minImageExtent.height), surfCapabilities.maxImageExtent.height);
|
297 | |
}
|
298 | |
else
|
299 | |
{
|
300 | |
// If the surface size is defined, the swap chain size must match
|
301 | |
swapchainExtent = surfCapabilities.currentExtent;
|
302 | |
}
|
303 | |
|
304 | |
#if PLATFORM_ANDROID
|
305 | |
// On Android, vkGetPhysicalDeviceSurfaceCapabilitiesKHR is not reliable and starts reporting incorrect
|
306 | |
// dimensions after few rotations. To alleviate the problem, we store the surface extent corresponding to
|
307 | |
// identity rotation.
|
308 | |
// https://android-developers.googleblog.com/2020/02/handling-device-orientation-efficiently.html
|
309 | |
if (m_SurfaceIdentityExtent.width == 0 || m_SurfaceIdentityExtent.height == 0)
|
310 | |
{
|
311 | |
m_SurfaceIdentityExtent = surfCapabilities.currentExtent;
|
312 | |
constexpr auto Rotate90TransformFlags =
|
313 | |
VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR |
|
314 | |
VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR |
|
315 | |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR |
|
316 | |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR;
|
317 | |
if ((surfCapabilities.currentTransform & Rotate90TransformFlags) != 0)
|
318 | |
std::swap(m_SurfaceIdentityExtent.width, m_SurfaceIdentityExtent.height);
|
319 | |
}
|
320 | |
|
321 | |
if (m_DesiredPreTransform == SURFACE_TRANSFORM_OPTIMAL)
|
322 | |
{
|
323 | |
swapchainExtent = m_SurfaceIdentityExtent;
|
324 | |
}
|
325 | |
m_CurrentSurfaceTransform = surfCapabilities.currentTransform;
|
326 | |
#endif
|
327 | |
|
328 | |
swapchainExtent.width = std::max(swapchainExtent.width, 1u);
|
329 | |
swapchainExtent.height = std::max(swapchainExtent.height, 1u);
|
330 | |
m_SwapChainDesc.Width = swapchainExtent.width;
|
331 | |
m_SwapChainDesc.Height = swapchainExtent.height;
|
332 | |
|
333 | |
// The FIFO present mode is guaranteed by the spec to always be supported.
|
334 | |
VkPresentModeKHR PresentMode = VK_PRESENT_MODE_FIFO_KHR;
|
335 | |
{
|
336 | |
std::vector<VkPresentModeKHR> PreferredPresentModes;
|
337 | |
if (m_VSyncEnabled)
|
338 | |
{
|
339 | |
// FIFO relaxed waits for the next VSync, but if the frame is late,
|
340 | |
// it still shows it even if VSync has already passed, which may
|
341 | |
// result in tearing.
|
342 | |
PreferredPresentModes.push_back(VK_PRESENT_MODE_FIFO_RELAXED_KHR);
|
343 | |
PreferredPresentModes.push_back(VK_PRESENT_MODE_FIFO_KHR);
|
344 | |
}
|
345 | |
else
|
346 | |
{
|
347 | |
// Mailbox is the lowest latency non-tearing presentation mode.
|
348 | |
PreferredPresentModes.push_back(VK_PRESENT_MODE_MAILBOX_KHR);
|
349 | |
PreferredPresentModes.push_back(VK_PRESENT_MODE_IMMEDIATE_KHR);
|
350 | |
PreferredPresentModes.push_back(VK_PRESENT_MODE_FIFO_KHR);
|
351 | |
}
|
352 | |
|
353 | |
for (auto PreferredMode : PreferredPresentModes)
|
354 | |
{
|
355 | |
if (std::find(presentModes.begin(), presentModes.end(), PreferredMode) != presentModes.end())
|
356 | |
{
|
357 | |
PresentMode = PreferredMode;
|
358 | |
break;
|
359 | |
}
|
360 | |
}
|
361 | |
|
362 | |
const char* PresentModeName = nullptr;
|
363 | |
#define PRESENT_MODE_CASE(Mode) \
|
364 | |
case Mode: PresentModeName = #Mode; break;
|
365 | |
switch (PresentMode)
|
366 | |
{
|
367 | |
PRESENT_MODE_CASE(VK_PRESENT_MODE_IMMEDIATE_KHR)
|
368 | |
PRESENT_MODE_CASE(VK_PRESENT_MODE_MAILBOX_KHR)
|
369 | |
PRESENT_MODE_CASE(VK_PRESENT_MODE_FIFO_KHR)
|
370 | |
PRESENT_MODE_CASE(VK_PRESENT_MODE_FIFO_RELAXED_KHR)
|
371 | |
PRESENT_MODE_CASE(VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR)
|
372 | |
PRESENT_MODE_CASE(VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR)
|
373 | |
default: PresentModeName = "<UNKNOWN>";
|
374 | |
}
|
375 | |
#undef PRESENT_MODE_CASE
|
376 | |
LOG_INFO_MESSAGE("Using ", PresentModeName, " swap chain present mode");
|
377 | |
}
|
378 | |
|
379 | |
// Determine the number of VkImage's to use in the swap chain.
|
380 | |
// We need to acquire only 1 presentable image at at time.
|
381 | |
// Asking for minImageCount images ensures that we can acquire
|
382 | |
// 1 presentable image as long as we present it before attempting
|
383 | |
// to acquire another.
|
384 | |
if (m_DesiredBufferCount < surfCapabilities.minImageCount)
|
385 | |
{
|
386 | |
LOG_INFO_MESSAGE("Desired back buffer count (", m_DesiredBufferCount, ") is smaller than the minimal image count supported for this surface (", surfCapabilities.minImageCount, "). Resetting to ", surfCapabilities.minImageCount);
|
387 | |
m_DesiredBufferCount = surfCapabilities.minImageCount;
|
388 | |
}
|
389 | |
if (surfCapabilities.maxImageCount != 0 && m_DesiredBufferCount > surfCapabilities.maxImageCount)
|
390 | |
{
|
391 | |
LOG_INFO_MESSAGE("Desired back buffer count (", m_DesiredBufferCount, ") is greater than the maximal image count supported for this surface (", surfCapabilities.maxImageCount, "). Resetting to ", surfCapabilities.maxImageCount);
|
392 | |
m_DesiredBufferCount = surfCapabilities.maxImageCount;
|
393 | |
}
|
394 | |
// We must use m_DesiredBufferCount instead of m_SwapChainDesc.BufferCount, because Vulkan on Android
|
395 | |
// may decide to always add extra buffers, causing infinite growth of the swap chain when it is recreated:
|
396 | |
// m_SwapChainDesc.BufferCount
|
397 | |
// CreateVulkanSwapChain() 2 -> 4
|
398 | |
// CreateVulkanSwapChain() 4 -> 6
|
399 | |
// CreateVulkanSwapChain() 6 -> 8
|
400 | |
uint32_t desiredNumberOfSwapChainImages = m_DesiredBufferCount;
|
401 | |
|
402 | |
// Find a supported composite alpha mode - one of these is guaranteed to be set
|
403 | |
VkCompositeAlphaFlagBitsKHR compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
|
404 | |
VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = //
|
405 | |
{
|
406 | |
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
|
407 | |
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
|
408 | |
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
|
409 | |
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
|
410 | |
};
|
411 | |
for (uint32_t i = 0; i < _countof(compositeAlphaFlags); i++)
|
412 | |
{
|
413 | |
if (surfCapabilities.supportedCompositeAlpha & compositeAlphaFlags[i])
|
414 | |
{
|
415 | |
compositeAlpha = compositeAlphaFlags[i];
|
416 | |
break;
|
417 | |
}
|
418 | |
}
|
419 | |
|
420 | |
auto oldSwapchain = m_VkSwapChain;
|
421 | |
m_VkSwapChain = VK_NULL_HANDLE;
|
422 | |
|
423 | |
VkSwapchainCreateInfoKHR swapchain_ci = {};
|
424 | |
|
425 | |
swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
|
426 | |
swapchain_ci.pNext = NULL;
|
427 | |
swapchain_ci.surface = m_VkSurface;
|
428 | |
swapchain_ci.minImageCount = desiredNumberOfSwapChainImages;
|
429 | |
swapchain_ci.imageFormat = m_VkColorFormat;
|
430 | |
swapchain_ci.imageExtent.width = swapchainExtent.width;
|
431 | |
swapchain_ci.imageExtent.height = swapchainExtent.height;
|
432 | |
swapchain_ci.preTransform = vkPreTransform;
|
433 | |
swapchain_ci.compositeAlpha = compositeAlpha;
|
434 | |
swapchain_ci.imageArrayLayers = 1;
|
435 | |
swapchain_ci.presentMode = PresentMode;
|
436 | |
swapchain_ci.oldSwapchain = oldSwapchain;
|
437 | |
swapchain_ci.clipped = VK_TRUE;
|
438 | |
swapchain_ci.imageColorSpace = ColorSpace;
|
439 | |
|
440 | |
DEV_CHECK_ERR(m_SwapChainDesc.Usage != 0, "No swap chain usage flags defined");
|
441 | |
static_assert(SWAP_CHAIN_USAGE_LAST == SWAP_CHAIN_USAGE_COPY_SOURCE, "Please update this function to handle the new swapchain usage");
|
442 | |
if (m_SwapChainDesc.Usage & SWAP_CHAIN_USAGE_RENDER_TARGET)
|
443 | |
swapchain_ci.imageUsage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
|
444 | |
if (m_SwapChainDesc.Usage & SWAP_CHAIN_USAGE_SHADER_INPUT)
|
445 | |
swapchain_ci.imageUsage |= VK_IMAGE_USAGE_SAMPLED_BIT;
|
446 | |
if (m_SwapChainDesc.Usage & SWAP_CHAIN_USAGE_COPY_SOURCE)
|
447 | |
swapchain_ci.imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
|
448 | |
|
449 | |
// vkCmdClearColorImage() command requires the image to use VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL layout
|
450 | |
// that requires VK_IMAGE_USAGE_TRANSFER_DST_BIT to be set
|
451 | |
swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
452 | |
swapchain_ci.queueFamilyIndexCount = 0;
|
453 | |
swapchain_ci.pQueueFamilyIndices = NULL;
|
454 | |
//uint32_t queueFamilyIndices[] = { (uint32_t)info.graphics_queue_family_index, (uint32_t)info.present_queue_family_index };
|
455 | |
//if (info.graphics_queue_family_index != info.present_queue_family_index) {
|
456 | |
// // If the graphics and present queues are from different queue families,
|
457 | |
// // we either have to explicitly transfer ownership of images between
|
458 | |
// // the queues, or we have to create the swapchain with imageSharingMode
|
459 | |
// // as VK_SHARING_MODE_CONCURRENT
|
460 | |
// swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
|
461 | |
// swapchain_ci.queueFamilyIndexCount = 2;
|
462 | |
// swapchain_ci.pQueueFamilyIndices = queueFamilyIndices;
|
463 | |
//}
|
464 | |
|
465 | |
const auto& LogicalDevice = pRenderDeviceVk->GetLogicalDevice();
|
466 | |
auto vkDevice = pRenderDeviceVk->GetVkDevice();
|
467 | |
|
468 | |
err = vkCreateSwapchainKHR(vkDevice, &swapchain_ci, NULL, &m_VkSwapChain);
|
469 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to create Vulkan swapchain");
|
470 | |
|
471 | |
if (oldSwapchain != VK_NULL_HANDLE)
|
472 | |
{
|
473 | |
vkDestroySwapchainKHR(vkDevice, oldSwapchain, NULL);
|
474 | |
oldSwapchain = VK_NULL_HANDLE;
|
475 | |
}
|
476 | |
|
477 | |
uint32_t swapchainImageCount = 0;
|
478 | |
|
479 | |
err = vkGetSwapchainImagesKHR(vkDevice, m_VkSwapChain, &swapchainImageCount, NULL);
|
480 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to request swap chain image count");
|
481 | |
VERIFY_EXPR(swapchainImageCount > 0);
|
482 | |
if (swapchainImageCount != m_SwapChainDesc.BufferCount)
|
483 | |
{
|
484 | |
LOG_INFO_MESSAGE("Created swap chain with ", swapchainImageCount,
|
485 | |
" images vs ", m_SwapChainDesc.BufferCount, " requested.");
|
486 | |
m_SwapChainDesc.BufferCount = swapchainImageCount;
|
487 | |
}
|
488 | |
|
489 | |
m_ImageAcquiredSemaphores.resize(swapchainImageCount);
|
490 | |
m_DrawCompleteSemaphores.resize(swapchainImageCount);
|
491 | |
m_ImageAcquiredFences.resize(swapchainImageCount);
|
492 | |
for (uint32_t i = 0; i < swapchainImageCount; ++i)
|
493 | |
{
|
494 | |
VkSemaphoreCreateInfo SemaphoreCI = {};
|
495 | |
|
496 | |
SemaphoreCI.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
497 | |
SemaphoreCI.pNext = nullptr;
|
498 | |
SemaphoreCI.flags = 0; // reserved for future use
|
499 | |
|
500 | |
{
|
501 | |
std::stringstream ss;
|
502 | |
ss << "Swap chain image acquired semaphore " << i;
|
503 | |
auto Name = ss.str();
|
504 | |
auto Semaphore = LogicalDevice.CreateSemaphore(SemaphoreCI, Name.c_str());
|
505 | |
ManagedSemaphore::Create(pRenderDeviceVk, std::move(Semaphore), Name.c_str(), &m_ImageAcquiredSemaphores[i]);
|
506 | |
}
|
507 | |
|
508 | |
{
|
509 | |
std::stringstream ss;
|
510 | |
ss << "Swap chain draw complete semaphore " << i;
|
511 | |
auto Name = ss.str();
|
512 | |
auto Semaphore = LogicalDevice.CreateSemaphore(SemaphoreCI, Name.c_str());
|
513 | |
ManagedSemaphore::Create(pRenderDeviceVk, std::move(Semaphore), Name.c_str(), &m_DrawCompleteSemaphores[i]);
|
514 | |
}
|
515 | |
|
516 | |
VkFenceCreateInfo FenceCI = {};
|
517 | |
|
518 | |
FenceCI.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
519 | |
FenceCI.pNext = nullptr;
|
520 | |
FenceCI.flags = 0;
|
521 | |
m_ImageAcquiredFences[i] = LogicalDevice.CreateFence(FenceCI);
|
522 | |
}
|
|
55 |
InitBuffersAndViews(SwapChainImageCount, pSwapChainImages);
|
523 | 56 |
}
|
524 | 57 |
|
525 | 58 |
SwapChainVkImpl::~SwapChainVkImpl()
|
526 | 59 |
{
|
527 | |
if (m_VkSwapChain != VK_NULL_HANDLE)
|
528 | |
{
|
529 | |
auto pDeviceContext = m_wpDeviceContext.Lock();
|
530 | |
auto* pImmediateCtxVk = pDeviceContext.RawPtr<DeviceContextVkImpl>();
|
531 | |
ReleaseSwapChainResources(pImmediateCtxVk, /*DestroyVkSwapChain=*/true);
|
532 | |
VERIFY_EXPR(m_VkSwapChain == VK_NULL_HANDLE);
|
533 | |
}
|
534 | |
|
535 | |
if (m_VkSurface != VK_NULL_HANDLE)
|
536 | |
{
|
537 | |
vkDestroySurfaceKHR(m_VulkanInstance->GetVkInstance(), m_VkSurface, NULL);
|
538 | |
}
|
539 | |
}
|
540 | |
|
541 | |
void SwapChainVkImpl::InitBuffersAndViews()
|
|
60 |
auto pDeviceContext = m_wpDeviceContext.Lock();
|
|
61 |
auto* pImmediateCtxVk = pDeviceContext.RawPtr<DeviceContextVkImpl>();
|
|
62 |
ReleaseSwapChainResources(pImmediateCtxVk);
|
|
63 |
}
|
|
64 |
|
|
65 |
void SwapChainVkImpl::InitBuffersAndViews(uint32_t SwapChainImageCount, const VkImage *pSwapChainImages)
|
542 | 66 |
{
|
543 | 67 |
auto* pDeviceVkImpl = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>();
|
544 | 68 |
auto LogicalVkDevice = pDeviceVkImpl->GetVkDevice();
|
545 | 69 |
|
546 | |
#ifdef DILIGENT_DEBUG
|
547 | |
{
|
548 | |
uint32_t swapchainImageCount = 0;
|
549 | |
auto err = vkGetSwapchainImagesKHR(LogicalVkDevice, m_VkSwapChain, &swapchainImageCount, NULL);
|
550 | |
VERIFY_EXPR(err == VK_SUCCESS);
|
551 | |
VERIFY(swapchainImageCount == m_SwapChainDesc.BufferCount, "Unexpected swap chain buffer count");
|
552 | |
}
|
553 | |
#endif
|
554 | |
|
555 | 70 |
m_pBackBufferRTV.resize(m_SwapChainDesc.BufferCount);
|
556 | 71 |
m_SwapChainImagesInitialized.resize(m_pBackBufferRTV.size(), false);
|
557 | |
m_ImageAcquiredFenceSubmitted.resize(m_pBackBufferRTV.size(), false);
|
558 | |
|
559 | |
uint32_t swapchainImageCount = m_SwapChainDesc.BufferCount;
|
560 | |
std::vector<VkImage> swapchainImages(swapchainImageCount);
|
561 | |
auto err = vkGetSwapchainImagesKHR(LogicalVkDevice, m_VkSwapChain, &swapchainImageCount, swapchainImages.data());
|
562 | |
CHECK_VK_ERROR_AND_THROW(err, "Failed to get swap chain images");
|
563 | |
VERIFY_EXPR(swapchainImageCount == swapchainImages.size());
|
564 | |
|
565 | |
for (uint32_t i = 0; i < swapchainImageCount; i++)
|
|
72 |
|
|
73 |
for (uint32_t i = 0; i < SwapChainImageCount; i++)
|
566 | 74 |
{
|
567 | 75 |
TextureDesc BackBufferDesc;
|
568 | 76 |
std::stringstream name_ss;
|
|
577 | 85 |
BackBufferDesc.MipLevels = 1;
|
578 | 86 |
|
579 | 87 |
RefCntAutoPtr<TextureVkImpl> pBackBufferTex;
|
580 | |
m_pRenderDevice.RawPtr<RenderDeviceVkImpl>()->CreateTexture(BackBufferDesc, swapchainImages[i], RESOURCE_STATE_UNDEFINED, &pBackBufferTex);
|
|
88 |
m_pRenderDevice.RawPtr<RenderDeviceVkImpl>()->CreateTexture(BackBufferDesc, pSwapChainImages[i], RESOURCE_STATE_UNDEFINED, &pBackBufferTex);
|
581 | 89 |
|
582 | 90 |
TextureViewDesc RTVDesc;
|
583 | 91 |
RTVDesc.ViewType = TEXTURE_VIEW_RENDER_TARGET;
|
|
608 | 116 |
}
|
609 | 117 |
}
|
610 | 118 |
|
611 | |
VkResult SwapChainVkImpl::AcquireNextImage(DeviceContextVkImpl* pDeviceCtxVk)
|
612 | |
{
|
613 | |
auto* pDeviceVk = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>();
|
614 | |
const auto& LogicalDevice = pDeviceVk->GetLogicalDevice();
|
615 | |
|
616 | |
// Applications should not rely on vkAcquireNextImageKHR blocking in order to
|
617 | |
// meter their rendering speed. The implementation may return from this function
|
618 | |
// immediately regardless of how many presentation requests are queued, and regardless
|
619 | |
// of when queued presentation requests will complete relative to the call. Instead,
|
620 | |
// applications can use fence to meter their frame generation work to match the
|
621 | |
// presentation rate.
|
|
119 |
VkResult SwapChainVkImpl::AcquireNextImage()
|
|
120 |
{
|
|
121 |
auto pDeviceContext = m_wpDeviceContext.Lock();
|
|
122 |
if (!pDeviceContext)
|
|
123 |
{
|
|
124 |
LOG_ERROR_MESSAGE("Immediate context has been released");
|
|
125 |
return VK_ERROR_UNKNOWN;
|
|
126 |
}
|
|
127 |
|
|
128 |
auto* pDeviceCtxVk = pDeviceContext.RawPtr<DeviceContextVkImpl>();
|
|
129 |
auto* pDeviceVk = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>();
|
|
130 |
|
622 | 131 |
|
623 | 132 |
// Explicitly make sure that there are no more pending frames in the command queue
|
624 | 133 |
// than the number of the swap chain images.
|
|
633 | 142 |
// When acquiring swap chain image for frame N, we need to make sure that
|
634 | 143 |
// frame N-Nsc has completed. To achieve that, we wait for the image acquire
|
635 | 144 |
// fence for frame N-Nsc-1. Thus we will have no more than Nsc frames in the queue.
|
636 | |
auto OldestSubmittedImageFenceInd = (m_SemaphoreIndex + 1u) % static_cast<Uint32>(m_ImageAcquiredFenceSubmitted.size());
|
637 | |
if (m_ImageAcquiredFenceSubmitted[OldestSubmittedImageFenceInd])
|
638 | |
{
|
639 | |
VkFence OldestSubmittedFence = m_ImageAcquiredFences[OldestSubmittedImageFenceInd];
|
640 | |
if (LogicalDevice.GetFenceStatus(OldestSubmittedFence) == VK_NOT_READY)
|
641 | |
{
|
642 | |
auto res = LogicalDevice.WaitForFences(1, &OldestSubmittedFence, VK_TRUE, UINT64_MAX);
|
643 | |
VERIFY_EXPR(res == VK_SUCCESS);
|
644 | |
(void)res;
|
645 | |
}
|
646 | |
LogicalDevice.ResetFence(OldestSubmittedFence);
|
647 | |
m_ImageAcquiredFenceSubmitted[OldestSubmittedImageFenceInd] = false;
|
648 | |
}
|
649 | |
|
650 | |
VkFence ImageAcquiredFence = m_ImageAcquiredFences[m_SemaphoreIndex];
|
651 | |
VkSemaphore ImageAcquiredSemaphore = m_ImageAcquiredSemaphores[m_SemaphoreIndex]->Get();
|
652 | |
|
653 | |
auto res = vkAcquireNextImageKHR(LogicalDevice.GetVkDevice(), m_VkSwapChain, UINT64_MAX, ImageAcquiredSemaphore, ImageAcquiredFence, &m_BackBufferIndex);
|
654 | |
|
655 | |
m_ImageAcquiredFenceSubmitted[m_SemaphoreIndex] = (res == VK_SUCCESS);
|
|
145 |
|
|
146 |
pDeviceVk->LockCommandQueue(0);
|
|
147 |
VkResult res = m_AcquireImageCallback(m_Cookie, &m_BackBufferIndex);
|
|
148 |
pDeviceVk->UnlockCommandQueue(0);
|
|
149 |
|
656 | 150 |
if (res == VK_SUCCESS)
|
657 | 151 |
{
|
658 | 152 |
// Next command in the device context must wait for the next image to be acquired.
|
659 | 153 |
// Unlike fences or events, the act of waiting for a semaphore also unsignals that semaphore (6.4.2).
|
660 | 154 |
// Swapchain image may be used as render target or as destination for copy command.
|
661 | |
pDeviceCtxVk->AddWaitSemaphore(m_ImageAcquiredSemaphores[m_SemaphoreIndex], VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT);
|
662 | 155 |
if (!m_SwapChainImagesInitialized[m_BackBufferIndex])
|
663 | 156 |
{
|
664 | 157 |
// Vulkan validation layers do not like uninitialized memory.
|
|
681 | 174 |
if (SyncInterval != 0 && SyncInterval != 1)
|
682 | 175 |
LOG_WARNING_MESSAGE_ONCE("Vulkan only supports 0 and 1 present intervals");
|
683 | 176 |
|
|
177 |
PresentImage();
|
|
178 |
}
|
|
179 |
|
|
180 |
void SwapChainVkImpl::PresentImage()
|
|
181 |
{
|
684 | 182 |
auto pDeviceContext = m_wpDeviceContext.Lock();
|
685 | 183 |
if (!pDeviceContext)
|
686 | 184 |
{
|
|
697 | 195 |
if (!m_IsMinimized)
|
698 | 196 |
{
|
699 | 197 |
// TransitionImageLayout() never triggers flush
|
700 | |
pImmediateCtxVk->TransitionImageLayout(pBackBuffer, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
|
|
198 |
pImmediateCtxVk->TransitionImageLayout(pBackBuffer, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
|
701 | 199 |
// The context can be empty if no render commands were issued by the app
|
702 | 200 |
//VERIFY(pImmediateCtxVk->GetNumCommandsInCtx() != 0, "The context must not be flushed");
|
703 | |
pImmediateCtxVk->AddSignalSemaphore(m_DrawCompleteSemaphores[m_SemaphoreIndex]);
|
704 | 201 |
}
|
705 | 202 |
|
706 | 203 |
pImmediateCtxVk->Flush();
|
707 | 204 |
|
708 | 205 |
if (!m_IsMinimized)
|
709 | 206 |
{
|
710 | |
VkPresentInfoKHR PresentInfo = {};
|
711 | |
|
712 | |
PresentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
|
713 | |
PresentInfo.pNext = nullptr;
|
714 | |
PresentInfo.waitSemaphoreCount = 1;
|
715 | |
// Unlike fences or events, the act of waiting for a semaphore also unsignals that semaphore (6.4.2)
|
716 | |
VkSemaphore WaitSemaphore[] = {m_DrawCompleteSemaphores[m_SemaphoreIndex]->Get()};
|
717 | |
PresentInfo.pWaitSemaphores = WaitSemaphore;
|
718 | |
PresentInfo.swapchainCount = 1;
|
719 | |
PresentInfo.pSwapchains = &m_VkSwapChain;
|
720 | |
PresentInfo.pImageIndices = &m_BackBufferIndex;
|
721 | |
VkResult Result = VK_SUCCESS;
|
722 | |
PresentInfo.pResults = &Result;
|
723 | |
pDeviceVk->LockCmdQueueAndRun(
|
724 | |
0,
|
725 | |
[&PresentInfo](ICommandQueueVk* pCmdQueueVk) //
|
726 | |
{
|
727 | |
pCmdQueueVk->Present(PresentInfo);
|
728 | |
} //
|
729 | |
);
|
730 | |
|
731 | |
if (Result == VK_SUBOPTIMAL_KHR || Result == VK_ERROR_OUT_OF_DATE_KHR)
|
732 | |
{
|
733 | |
RecreateVulkanSwapchain(pImmediateCtxVk);
|
734 | |
m_SemaphoreIndex = m_SwapChainDesc.BufferCount - 1; // To start with 0 index when acquire next image
|
735 | |
}
|
736 | |
else
|
737 | |
{
|
738 | |
DEV_CHECK_ERR(Result == VK_SUCCESS, "Present failed");
|
739 | |
}
|
|
207 |
pDeviceVk->LockCommandQueue(0);
|
|
208 |
m_ReleaseImageCallback(m_Cookie);
|
|
209 |
pDeviceVk->UnlockCommandQueue(0);
|
740 | 210 |
}
|
741 | 211 |
|
742 | 212 |
if (m_SwapChainDesc.IsPrimary)
|
|
744 | 214 |
pImmediateCtxVk->FinishFrame();
|
745 | 215 |
pDeviceVk->ReleaseStaleResources();
|
746 | 216 |
}
|
747 | |
|
748 | |
if (!m_IsMinimized)
|
749 | |
{
|
750 | |
++m_SemaphoreIndex;
|
751 | |
if (m_SemaphoreIndex >= m_SwapChainDesc.BufferCount)
|
752 | |
m_SemaphoreIndex = 0;
|
753 | |
|
754 | |
bool EnableVSync = SyncInterval != 0;
|
755 | |
|
756 | |
auto res = (m_VSyncEnabled == EnableVSync) ? AcquireNextImage(pImmediateCtxVk) : VK_ERROR_OUT_OF_DATE_KHR;
|
757 | |
if (res == VK_SUBOPTIMAL_KHR || res == VK_ERROR_OUT_OF_DATE_KHR)
|
758 | |
{
|
759 | |
m_VSyncEnabled = EnableVSync;
|
760 | |
RecreateVulkanSwapchain(pImmediateCtxVk);
|
761 | |
m_SemaphoreIndex = m_SwapChainDesc.BufferCount - 1; // To start with 0 index when acquire next image
|
762 | |
|
763 | |
res = AcquireNextImage(pImmediateCtxVk);
|
764 | |
}
|
765 | |
DEV_CHECK_ERR(res == VK_SUCCESS, "Failed to acquire next swap chain image");
|
766 | |
}
|
767 | |
}
|
768 | |
|
769 | |
void SwapChainVkImpl::WaitForImageAcquiredFences()
|
770 | |
{
|
771 | |
const auto& LogicalDevice = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>()->GetLogicalDevice();
|
772 | |
for (size_t i = 0; i < m_ImageAcquiredFences.size(); ++i)
|
773 | |
{
|
774 | |
if (m_ImageAcquiredFenceSubmitted[i])
|
775 | |
{
|
776 | |
VkFence vkFence = m_ImageAcquiredFences[i];
|
777 | |
if (LogicalDevice.GetFenceStatus(vkFence) == VK_NOT_READY)
|
778 | |
LogicalDevice.WaitForFences(1, &vkFence, VK_TRUE, UINT64_MAX);
|
779 | |
}
|
780 | |
}
|
781 | |
}
|
782 | |
|
783 | |
void SwapChainVkImpl::ReleaseSwapChainResources(DeviceContextVkImpl* pImmediateCtxVk, bool DestroyVkSwapChain)
|
784 | |
{
|
785 | |
if (m_VkSwapChain == VK_NULL_HANDLE)
|
786 | |
return;
|
787 | |
|
|
217 |
}
|
|
218 |
|
|
219 |
void SwapChainVkImpl::ReleaseSwapChainResources(DeviceContextVkImpl* pImmediateCtxVk)
|
|
220 |
{
|
788 | 221 |
if (pImmediateCtxVk != nullptr)
|
789 | 222 |
{
|
790 | 223 |
// Flush to submit all pending commands and semaphores to the queue.
|
|
809 | 242 |
// m_pBackBufferRTV[].
|
810 | 243 |
pDeviceVk->IdleGPU();
|
811 | 244 |
|
812 | |
// We need to explicitly wait for all submitted Image Acquired Fences to signal.
|
813 | |
// Just idling the GPU is not enough and results in validation warnings.
|
814 | |
// As a matter of fact, it is only required to check the fence status.
|
815 | |
WaitForImageAcquiredFences();
|
816 | |
|
817 | 245 |
// All references to the swap chain must be released before it can be destroyed
|
818 | 246 |
m_pBackBufferRTV.clear();
|
819 | 247 |
m_SwapChainImagesInitialized.clear();
|
820 | |
m_ImageAcquiredFenceSubmitted.clear();
|
821 | 248 |
m_pDepthBufferDSV.Release();
|
822 | |
|
823 | |
// We must wait unitl GPU is idled before destroying the fences as they
|
824 | |
// are destroyed immediately. The semaphores are managed and will be kept alive
|
825 | |
// by the device context they are submitted to.
|
826 | |
m_ImageAcquiredSemaphores.clear();
|
827 | |
m_DrawCompleteSemaphores.clear();
|
828 | |
m_ImageAcquiredFences.clear();
|
829 | |
m_SemaphoreIndex = 0;
|
830 | |
|
831 | |
if (DestroyVkSwapChain)
|
832 | |
{
|
833 | |
vkDestroySwapchainKHR(pDeviceVk->GetVkDevice(), m_VkSwapChain, NULL);
|
834 | |
m_VkSwapChain = VK_NULL_HANDLE;
|
835 | |
}
|
836 | |
}
|
837 | |
|
838 | |
void SwapChainVkImpl::RecreateVulkanSwapchain(DeviceContextVkImpl* pImmediateCtxVk)
|
839 | |
{
|
840 | |
// Do not destroy Vulkan swap chain as we will use it as oldSwapchain parameter.
|
841 | |
ReleaseSwapChainResources(pImmediateCtxVk, /*DestroyVkSwapChain*/ false);
|
842 | |
|
843 | |
// Check if the surface is lost
|
844 | |
{
|
845 | |
RenderDeviceVkImpl* pDeviceVk = m_pRenderDevice.RawPtr<RenderDeviceVkImpl>();
|
846 | |
const auto vkDeviceHandle = pDeviceVk->GetPhysicalDevice().GetVkDeviceHandle();
|
847 | |
|
848 | |
VkSurfaceCapabilitiesKHR surfCapabilities;
|
849 | |
// Call vkGetPhysicalDeviceSurfaceCapabilitiesKHR only to check the return code
|
850 | |
auto err = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(vkDeviceHandle, m_VkSurface, &surfCapabilities);
|
851 | |
if (err == VK_ERROR_SURFACE_LOST_KHR)
|
852 | |
{
|
853 | |
// Destroy the swap chain associated with the surface
|
854 | |
if (m_VkSwapChain != VK_NULL_HANDLE)
|
855 | |
{
|
856 | |
vkDestroySwapchainKHR(pDeviceVk->GetVkDevice(), m_VkSwapChain, NULL);
|
857 | |
m_VkSwapChain = VK_NULL_HANDLE;
|
858 | |
}
|
859 | |
|
860 | |
// Recreate the surface
|
861 | |
CreateSurface();
|
862 | |
}
|
863 | |
}
|
864 | |
|
865 | |
CreateVulkanSwapChain();
|
866 | |
InitBuffersAndViews();
|
867 | 249 |
}
|
868 | 250 |
|
869 | 251 |
void SwapChainVkImpl::Resize(Uint32 NewWidth, Uint32 NewHeight, SURFACE_TRANSFORM NewPreTransform)
|
870 | 252 |
{
|
871 | |
bool RecreateSwapChain = false;
|
872 | |
|
873 | |
#if PLATFORM_ANDROID
|
874 | |
if (m_VkSurface != VK_NULL_HANDLE)
|
875 | |
{
|
876 | |
// Check orientation
|
877 | |
const auto* pRenderDeviceVk = m_pRenderDevice.RawPtr<const RenderDeviceVkImpl>();
|
878 | |
const auto& PhysicalDevice = pRenderDeviceVk->GetPhysicalDevice();
|
879 | |
const auto vkDeviceHandle = PhysicalDevice.GetVkDeviceHandle();
|
880 | |
|
881 | |
VkSurfaceCapabilitiesKHR surfCapabilities = {};
|
882 | |
|
883 | |
auto err = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(vkDeviceHandle, m_VkSurface, &surfCapabilities);
|
884 | |
if (err == VK_SUCCESS)
|
885 | |
{
|
886 | |
if (m_CurrentSurfaceTransform != surfCapabilities.currentTransform)
|
887 | |
{
|
888 | |
// Surface orientation has changed - we need to recreate the swap chain
|
889 | |
RecreateSwapChain = true;
|
890 | |
}
|
891 | |
|
892 | |
constexpr auto Rotate90TransformFlags =
|
893 | |
VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR |
|
894 | |
VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR |
|
895 | |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR |
|
896 | |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR;
|
897 | |
|
898 | |
if (NewWidth == 0 || NewHeight == 0)
|
899 | |
{
|
900 | |
NewWidth = m_SurfaceIdentityExtent.width;
|
901 | |
NewHeight = m_SurfaceIdentityExtent.height;
|
902 | |
|
903 | |
if ((surfCapabilities.currentTransform & Rotate90TransformFlags) != 0)
|
904 | |
{
|
905 | |
// Swap to get logical dimensions as input NewWidth and NewHeight are
|
906 | |
// expected to be logical sizes.
|
907 | |
std::swap(NewWidth, NewHeight);
|
908 | |
}
|
909 | |
}
|
910 | |
|
911 | |
if (NewPreTransform == SURFACE_TRANSFORM_OPTIMAL)
|
912 | |
{
|
913 | |
if ((surfCapabilities.currentTransform & Rotate90TransformFlags) != 0)
|
914 | |
{
|
915 | |
// Swap to get physical dimensions
|
916 | |
std::swap(NewWidth, NewHeight);
|
917 | |
}
|
918 | |
}
|
919 | |
else
|
920 | |
{
|
921 | |
// Swap if necessary to get desired sizes after pre-transform
|
922 | |
if (NewPreTransform == SURFACE_TRANSFORM_ROTATE_90 ||
|
923 | |
NewPreTransform == SURFACE_TRANSFORM_ROTATE_270 ||
|
924 | |
NewPreTransform == SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90 ||
|
925 | |
NewPreTransform == SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270)
|
926 | |
{
|
927 | |
std::swap(NewWidth, NewHeight);
|
928 | |
}
|
929 | |
}
|
930 | |
}
|
931 | |
else
|
932 | |
{
|
933 | |
LOG_ERROR_MESSAGE(err, "Failed to query physical device surface capabilities");
|
934 | |
}
|
935 | |
}
|
936 | |
#endif
|
937 | |
|
938 | |
if (TSwapChainBase::Resize(NewWidth, NewHeight, NewPreTransform))
|
939 | |
RecreateSwapChain = true;
|
940 | |
|
941 | |
if (RecreateSwapChain)
|
942 | |
{
|
943 | |
auto pDeviceContext = m_wpDeviceContext.Lock();
|
944 | |
VERIFY(pDeviceContext, "Immediate context has been released");
|
945 | |
if (pDeviceContext)
|
946 | |
{
|
947 | |
try
|
948 | |
{
|
949 | |
auto* pImmediateCtxVk = pDeviceContext.RawPtr<DeviceContextVkImpl>();
|
950 | |
// RecreateVulkanSwapchain() unbinds default FB
|
951 | |
RecreateVulkanSwapchain(pImmediateCtxVk);
|
952 | |
|
953 | |
auto res = AcquireNextImage(pImmediateCtxVk);
|
954 | |
DEV_CHECK_ERR(res == VK_SUCCESS, "Failed to acquire next image for the just resized swap chain");
|
955 | |
(void)res;
|
956 | |
}
|
957 | |
catch (const std::runtime_error&)
|
958 | |
{
|
959 | |
LOG_ERROR("Failed to resize the swap chain");
|
960 | |
}
|
961 | |
}
|
962 | |
}
|
963 | |
|
964 | 253 |
m_IsMinimized = (NewWidth == 0 && NewHeight == 0);
|
965 | 254 |
}
|
966 | 255 |
|