git.s-ol.nu ~forks/DiligentCore / 43c3821
D3D12 resource binding refactoring, rename LinearAllocator to FixedLinearAllocator. azhirnov 10 months ago
20 changed file(s) with 495 addition(s) and 546 deletion(s). Raw diff Collapse all Expand all
1818 interface/FixedBlockMemoryAllocator.hpp
1919 interface/HashUtils.hpp
2020 interface/LockHelper.hpp
21 interface/LinearAllocator.hpp
21 interface/FixedLinearAllocator.hpp
2222 interface/DynamicLinearAllocator.hpp
2323 interface/MemoryFileStream.hpp
2424 interface/ObjectBase.hpp
0 /*
1 * Copyright 2019-2020 Diligent Graphics LLC
2 * Copyright 2015-2019 Egor Yusov
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * In no event and under no legal theory, whether in tort (including negligence),
17 * contract, or otherwise, unless required by applicable law (such as deliberate
18 * and grossly negligent acts) or agreed to in writing, shall any Contributor be
19 * liable for any damages, including any direct, indirect, special, incidental,
20 * or consequential damages of any character arising as a result of this License or
21 * out of the use or inability to use the software (including but not limited to damages
22 * for loss of goodwill, work stoppage, computer failure or malfunction, or any and
23 * all other commercial damages or losses), even if such Contributor has been advised
24 * of the possibility of such damages.
25 */
26
27 #pragma once
28
29 /// \file
30 /// Defines Diligent::FixedLinearAllocator class
31
32 #include <vector>
33
34 #include "../../Primitives/interface/BasicTypes.h"
35 #include "../../Primitives/interface/MemoryAllocator.h"
36 #include "../../Platforms/Basic/interface/DebugUtilities.hpp"
37 #include "CompilerDefinitions.h"
38 #include "Align.hpp"
39
40 namespace Diligent
41 {
42
43 /// Implementation of a linear allocator on a fixed-size memory page
44 class FixedLinearAllocator
45 {
46 public:
47 // clang-format off
48 FixedLinearAllocator (const FixedLinearAllocator&) = delete;
49 FixedLinearAllocator& operator=(const FixedLinearAllocator&) = delete;
50 FixedLinearAllocator& operator=(FixedLinearAllocator&&) = delete;
51 // clang-format on
52
53 explicit FixedLinearAllocator(IMemoryAllocator& Allocator) noexcept :
54 m_pAllocator{&Allocator}
55 {}
56
57 FixedLinearAllocator(FixedLinearAllocator&& Other) noexcept :
58 // clang-format off
59 m_pDataStart {Other.m_pDataStart },
60 m_pCurrPtr {Other.m_pCurrPtr },
61 m_ReservedSize {Other.m_ReservedSize },
62 m_CurrAlignment{Other.m_CurrAlignment},
63 m_pAllocator {Other.m_pAllocator }
64 // clang-format on
65 {
66 Other.Reset();
67 }
68
69 ~FixedLinearAllocator()
70 {
71 Free();
72 }
73
74 void Free()
75 {
76 if (m_pDataStart != nullptr && m_pAllocator != nullptr)
77 {
78 m_pAllocator->Free(m_pDataStart);
79 }
80 Reset();
81 }
82
83 NODISCARD void* Release()
84 {
85 void* Ptr = m_pDataStart;
86 Reset();
87 return Ptr;
88 }
89
90 NODISCARD void* ReleaseOwnership() noexcept
91 {
92 m_pAllocator = nullptr;
93 return GetDataPtr();
94 }
95
96 NODISCARD void* GetDataPtr() const noexcept
97 {
98 return m_pDataStart;
99 }
100
101 void AddSpace(size_t size, size_t alignment) noexcept
102 {
103 VERIFY(m_pDataStart == nullptr, "Memory has already been allocated");
104 VERIFY(IsPowerOfTwo(alignment), "Alignment is not a power of two!");
105
106 if (size == 0)
107 return;
108
109 if (m_CurrAlignment == 0)
110 {
111 VERIFY(m_ReservedSize == 0, "This is expected to be a very first time the space is added");
112 m_CurrAlignment = sizeof(void*);
113 }
114
115 if (alignment > m_CurrAlignment)
116 {
117 // Reserve extra space that may be needed for alignment
118 m_ReservedSize += alignment - m_CurrAlignment;
119 }
120 m_CurrAlignment = alignment;
121
122 size = Align(size, alignment);
123 m_ReservedSize += size;
124
125 #if DILIGENT_DEBUG
126 m_DbgAllocations.emplace_back(size, alignment, m_ReservedSize);
127 #endif
128 }
129
130 template <typename T>
131 void AddSpace(size_t count = 1) noexcept
132 {
133 AddSpace(sizeof(T) * count, alignof(T));
134 }
135
136 void AddSpaceForString(const Char* str) noexcept
137 {
138 VERIFY_EXPR(str != nullptr);
139 AddSpace(strlen(str) + 1, 1);
140 }
141
142 void AddSpaceForString(const String& str) noexcept
143 {
144 AddSpaceForString(str.c_str());
145 }
146
147 void Reserve(size_t size)
148 {
149 VERIFY(m_pDataStart == nullptr, "Memory has already been allocated");
150 VERIFY(m_ReservedSize == 0, "Space has been added to the allocator and will be overriden");
151 m_ReservedSize = size;
152 Reserve();
153 }
154
155 void Reserve()
156 {
157 VERIFY(m_pDataStart == nullptr, "Memory has already been allocated");
158 VERIFY(m_pAllocator != nullptr, "Allocator must not be null");
159 // Make sure the data size is at least sizeof(void*)-aligned
160 m_ReservedSize = Align(m_ReservedSize, sizeof(void*));
161 if (m_ReservedSize > 0)
162 {
163 m_pDataStart = reinterpret_cast<uint8_t*>(m_pAllocator->Allocate(m_ReservedSize, "Raw memory for linear allocator", __FILE__, __LINE__));
164 VERIFY(m_pDataStart == Align(m_pDataStart, sizeof(void*)), "Memory pointer must be at least sizeof(void*)-aligned");
165
166 m_pCurrPtr = m_pDataStart;
167 }
168 m_CurrAlignment = sizeof(void*);
169 }
170
171 NODISCARD void* Allocate(size_t size, size_t alignment)
172 {
173 VERIFY(size == 0 || m_pDataStart != nullptr, "Memory has not been allocated");
174 VERIFY(IsPowerOfTwo(alignment), "Alignment is not a power of two!");
175
176 if (size == 0)
177 return nullptr;
178
179 size = Align(size, alignment);
180
181 #if DILIGENT_DEBUG
182 VERIFY(m_DbgCurrAllocation < m_DbgAllocations.size(), "Allocation number exceed the number of allocations that were originally reserved.");
183 const auto& CurrAllocation = m_DbgAllocations[m_DbgCurrAllocation++];
184 VERIFY(CurrAllocation.size == size, "Allocation size (", size, ") does not match the initially requested size (", CurrAllocation.size, ")");
185 VERIFY(CurrAllocation.alignment == alignment, "Allocation alignment (", alignment, ") does not match the initially requested alignment (", CurrAllocation.alignment, ")");
186 #endif
187
188 VERIFY(Align(m_pCurrPtr, m_CurrAlignment) == m_pCurrPtr, "Current pointer is not aligned as expected");
189 m_pCurrPtr = Align(m_pCurrPtr, alignment);
190 m_CurrAlignment = alignment;
191
192 VERIFY(m_pCurrPtr + size <= m_pDataStart + CurrAllocation.reserved_size,
193 "Allocation size exceeds the initially reserved space. This is likely a bug.");
194
195 auto* ptr = m_pCurrPtr;
196 m_pCurrPtr += size;
197
198 VERIFY(m_pCurrPtr <= m_pDataStart + m_ReservedSize, "Allocation size exceeds the reserved space");
199
200 return ptr;
201 }
202
203 template <typename T>
204 NODISCARD T* Allocate(size_t count = 1)
205 {
206 return reinterpret_cast<T*>(Allocate(sizeof(T) * count, alignof(T)));
207 }
208
209 template <typename T, typename... Args>
210 NODISCARD T* Construct(Args&&... args)
211 {
212 T* Ptr = Allocate<T>();
213 new (Ptr) T{std::forward<Args>(args)...};
214 return Ptr;
215 }
216
217 template <typename T, typename... Args>
218 NODISCARD T* ConstructArray(size_t count, const Args&... args)
219 {
220 T* Ptr = Allocate<T>(count);
221 for (size_t i = 0; i < count; ++i)
222 {
223 new (Ptr + i) T{args...};
224 }
225 return Ptr;
226 }
227
228 template <typename T>
229 NODISCARD T* Copy(const T& Src)
230 {
231 return Construct<T>(Src);
232 }
233
234 template <typename T>
235 NODISCARD T* CopyArray(const T* Src, size_t count)
236 {
237 T* Dst = Allocate<T>(count);
238 for (size_t i = 0; i < count; ++i)
239 {
240 new (Dst + i) T{Src[i]};
241 }
242 return Dst;
243 }
244
245 NODISCARD Char* CopyString(const char* Str)
246 {
247 if (Str == nullptr)
248 return nullptr;
249
250 auto* Ptr = reinterpret_cast<Char*>(Allocate(strlen(Str) + 1, 1));
251 Char* Dst = Ptr;
252
253 const auto* pDataEnd = reinterpret_cast<Char*>(m_pDataStart) + m_ReservedSize;
254 while (*Str != 0 && Dst < pDataEnd)
255 {
256 *(Dst++) = *(Str++);
257 }
258 if (Dst < pDataEnd)
259 *(Dst++) = 0;
260 else
261 UNEXPECTED("Not enough space reserved for the string");
262
263 VERIFY_EXPR(reinterpret_cast<Char*>(m_pCurrPtr) == Dst);
264 return Ptr;
265 }
266
267 NODISCARD Char* CopyString(const std::string& Str)
268 {
269 return CopyString(Str.c_str());
270 }
271
272 NODISCARD size_t GetCurrentSize() const
273 {
274 VERIFY(m_pDataStart != nullptr, "Memory has not been allocated");
275 return static_cast<size_t>(m_pCurrPtr - m_pDataStart);
276 }
277
278 NODISCARD size_t GetReservedSize() const
279 {
280 return m_ReservedSize;
281 }
282
283 private:
284 void Reset()
285 {
286 m_pDataStart = nullptr;
287 m_pCurrPtr = nullptr;
288 m_ReservedSize = 0;
289 m_CurrAlignment = 0;
290 m_pAllocator = nullptr;
291
292 #if DILIGENT_DEBUG
293 m_DbgCurrAllocation = 0;
294 m_DbgAllocations.clear();
295 #endif
296 }
297
298 uint8_t* m_pDataStart = nullptr;
299 uint8_t* m_pCurrPtr = nullptr;
300 size_t m_ReservedSize = 0;
301 size_t m_CurrAlignment = 0;
302 IMemoryAllocator* m_pAllocator = nullptr;
303
304 #if DILIGENT_DEBUG
305 size_t m_DbgCurrAllocation = 0;
306 struct DbgAllocationInfo
307 {
308 const size_t size;
309 const size_t alignment;
310 const size_t reserved_size;
311
312 DbgAllocationInfo(size_t _size, size_t _alignment, size_t _reserved_size) :
313 size{_size},
314 alignment{_alignment},
315 reserved_size{_reserved_size}
316 {
317 }
318 };
319 std::vector<DbgAllocationInfo> m_DbgAllocations;
320 #endif
321 };
322
323 } // namespace Diligent
+0
-324
Common/interface/LinearAllocator.hpp less more
0 /*
1 * Copyright 2019-2020 Diligent Graphics LLC
2 * Copyright 2015-2019 Egor Yusov
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *
16 * In no event and under no legal theory, whether in tort (including negligence),
17 * contract, or otherwise, unless required by applicable law (such as deliberate
18 * and grossly negligent acts) or agreed to in writing, shall any Contributor be
19 * liable for any damages, including any direct, indirect, special, incidental,
20 * or consequential damages of any character arising as a result of this License or
21 * out of the use or inability to use the software (including but not limited to damages
22 * for loss of goodwill, work stoppage, computer failure or malfunction, or any and
23 * all other commercial damages or losses), even if such Contributor has been advised
24 * of the possibility of such damages.
25 */
26
27 #pragma once
28
29 /// \file
30 /// Defines Diligent::LinearAllocator class
31
32 #include <vector>
33
34 #include "../../Primitives/interface/BasicTypes.h"
35 #include "../../Primitives/interface/MemoryAllocator.h"
36 #include "../../Platforms/Basic/interface/DebugUtilities.hpp"
37 #include "CompilerDefinitions.h"
38 #include "Align.hpp"
39
40 namespace Diligent
41 {
42
43 /// Implementation of a linear allocator on a fixed-size memory page
44 class LinearAllocator
45 {
46 public:
47 // clang-format off
48 LinearAllocator (const LinearAllocator&) = delete;
49 LinearAllocator& operator=(const LinearAllocator&) = delete;
50 LinearAllocator& operator=(LinearAllocator&&) = delete;
51 // clang-format on
52
53 explicit LinearAllocator(IMemoryAllocator& Allocator) noexcept :
54 m_pAllocator{&Allocator}
55 {}
56
57 LinearAllocator(LinearAllocator&& Other) noexcept :
58 // clang-format off
59 m_pDataStart {Other.m_pDataStart },
60 m_pCurrPtr {Other.m_pCurrPtr },
61 m_ReservedSize {Other.m_ReservedSize },
62 m_CurrAlignment{Other.m_CurrAlignment},
63 m_pAllocator {Other.m_pAllocator }
64 // clang-format on
65 {
66 Other.Reset();
67 }
68
69 ~LinearAllocator()
70 {
71 Free();
72 }
73
74 void Free()
75 {
76 if (m_pDataStart != nullptr && m_pAllocator != nullptr)
77 {
78 m_pAllocator->Free(m_pDataStart);
79 }
80 Reset();
81 }
82
83 NODISCARD void* Release()
84 {
85 void* Ptr = m_pDataStart;
86 Reset();
87 return Ptr;
88 }
89
90 NODISCARD void* ReleaseOwnership() noexcept
91 {
92 m_pAllocator = nullptr;
93 return GetDataPtr();
94 }
95
96 NODISCARD void* GetDataPtr() const noexcept
97 {
98 return m_pDataStart;
99 }
100
101 void AddSpace(size_t size, size_t alignment) noexcept
102 {
103 VERIFY(m_pDataStart == nullptr, "Memory has already been allocated");
104 VERIFY(IsPowerOfTwo(alignment), "Alignment is not a power of two!");
105
106 if (size == 0)
107 return;
108
109 if (m_CurrAlignment == 0)
110 {
111 VERIFY(m_ReservedSize == 0, "This is expected to be a very first time the space is added");
112 m_CurrAlignment = sizeof(void*);
113 }
114
115 if (alignment > m_CurrAlignment)
116 {
117 // Reserve extra space that may be needed for alignment
118 m_ReservedSize += alignment - m_CurrAlignment;
119 }
120 m_CurrAlignment = alignment;
121
122 size = Align(size, alignment);
123 m_ReservedSize += size;
124
125 #if DILIGENT_DEBUG
126 m_DbgAllocations.emplace_back(size, alignment, m_ReservedSize);
127 #endif
128 }
129
130 template <typename T>
131 void AddSpace(size_t count = 1) noexcept
132 {
133 AddSpace(sizeof(T) * count, alignof(T));
134 }
135
136 void AddSpaceForString(const Char* str) noexcept
137 {
138 VERIFY_EXPR(str != nullptr);
139 AddSpace(strlen(str) + 1, 1);
140 }
141
142 void AddSpaceForString(const String& str) noexcept
143 {
144 AddSpaceForString(str.c_str());
145 }
146
147 void Reserve(size_t size)
148 {
149 VERIFY(m_pDataStart == nullptr, "Memory has already been allocated");
150 VERIFY(m_ReservedSize == 0, "Space has been added to the allocator and will be overriden");
151 m_ReservedSize = size;
152 Reserve();
153 }
154
155 void Reserve()
156 {
157 VERIFY(m_pDataStart == nullptr, "Memory has already been allocated");
158 VERIFY(m_pAllocator != nullptr, "Allocator must not be null");
159 // Make sure the data size is at least sizeof(void*)-aligned
160 m_ReservedSize = Align(m_ReservedSize, sizeof(void*));
161 if (m_ReservedSize > 0)
162 {
163 m_pDataStart = reinterpret_cast<uint8_t*>(m_pAllocator->Allocate(m_ReservedSize, "Raw memory for linear allocator", __FILE__, __LINE__));
164 VERIFY(m_pDataStart == Align(m_pDataStart, sizeof(void*)), "Memory pointer must be at least sizeof(void*)-aligned");
165
166 m_pCurrPtr = m_pDataStart;
167 }
168 m_CurrAlignment = sizeof(void*);
169 }
170
171 NODISCARD void* Allocate(size_t size, size_t alignment)
172 {
173 VERIFY(size == 0 || m_pDataStart != nullptr, "Memory has not been allocated");
174 VERIFY(IsPowerOfTwo(alignment), "Alignment is not a power of two!");
175
176 if (size == 0)
177 return nullptr;
178
179 size = Align(size, alignment);
180
181 #if DILIGENT_DEBUG
182 VERIFY(m_DbgCurrAllocation < m_DbgAllocations.size(), "Allocation number exceed the number of allocations that were originally reserved.");
183 const auto& CurrAllocation = m_DbgAllocations[m_DbgCurrAllocation++];
184 VERIFY(CurrAllocation.size == size, "Allocation size (", size, ") does not match the initially requested size (", CurrAllocation.size, ")");
185 VERIFY(CurrAllocation.alignment == alignment, "Allocation alignment (", alignment, ") does not match the initially requested alignment (", CurrAllocation.alignment, ")");
186 #endif
187
188 VERIFY(Align(m_pCurrPtr, m_CurrAlignment) == m_pCurrPtr, "Current pointer is not aligned as expected");
189 m_pCurrPtr = Align(m_pCurrPtr, alignment);
190 m_CurrAlignment = alignment;
191
192 VERIFY(m_pCurrPtr + size <= m_pDataStart + CurrAllocation.reserved_size,
193 "Allocation size exceeds the initially reserved space. This is likely a bug.");
194
195 auto* ptr = m_pCurrPtr;
196 m_pCurrPtr += size;
197
198 VERIFY(m_pCurrPtr <= m_pDataStart + m_ReservedSize, "Allocation size exceeds the reserved space");
199
200 return ptr;
201 }
202
203 template <typename T>
204 NODISCARD T* Allocate(size_t count = 1)
205 {
206 return reinterpret_cast<T*>(Allocate(sizeof(T) * count, alignof(T)));
207 }
208
209 template <typename T, typename... Args>
210 NODISCARD T* Construct(Args&&... args)
211 {
212 T* Ptr = Allocate<T>();
213 new (Ptr) T{std::forward<Args>(args)...};
214 return Ptr;
215 }
216
217 template <typename T, typename... Args>
218 NODISCARD T* ConstructArray(size_t count, const Args&... args)
219 {
220 T* Ptr = Allocate<T>(count);
221 for (size_t i = 0; i < count; ++i)
222 {
223 new (Ptr + i) T{args...};
224 }
225 return Ptr;
226 }
227
228 template <typename T>
229 NODISCARD T* Copy(const T& Src)
230 {
231 return Construct<T>(Src);
232 }
233
234 template <typename T>
235 NODISCARD T* CopyArray(const T* Src, size_t count)
236 {
237 T* Dst = Allocate<T>(count);
238 for (size_t i = 0; i < count; ++i)
239 {
240 new (Dst + i) T{Src[i]};
241 }
242 return Dst;
243 }
244
245 NODISCARD Char* CopyString(const char* Str)
246 {
247 if (Str == nullptr)
248 return nullptr;
249
250 auto* Ptr = reinterpret_cast<Char*>(Allocate(strlen(Str) + 1, 1));
251 Char* Dst = Ptr;
252
253 const auto* pDataEnd = reinterpret_cast<Char*>(m_pDataStart) + m_ReservedSize;
254 while (*Str != 0 && Dst < pDataEnd)
255 {
256 *(Dst++) = *(Str++);
257 }
258 if (Dst < pDataEnd)
259 *(Dst++) = 0;
260 else
261 UNEXPECTED("Not enough space reserved for the string");
262
263 VERIFY_EXPR(reinterpret_cast<Char*>(m_pCurrPtr) == Dst);
264 return Ptr;
265 }
266
267 NODISCARD Char* CopyString(const std::string& Str)
268 {
269 return CopyString(Str.c_str());
270 }
271
272 NODISCARD size_t GetCurrentSize() const
273 {
274 VERIFY(m_pDataStart != nullptr, "Memory has not been allocated");
275 return static_cast<size_t>(m_pCurrPtr - m_pDataStart);
276 }
277
278 NODISCARD size_t GetReservedSize() const
279 {
280 return m_ReservedSize;
281 }
282
283 private:
284 void Reset()
285 {
286 m_pDataStart = nullptr;
287 m_pCurrPtr = nullptr;
288 m_ReservedSize = 0;
289 m_CurrAlignment = 0;
290 m_pAllocator = nullptr;
291
292 #if DILIGENT_DEBUG
293 m_DbgCurrAllocation = 0;
294 m_DbgAllocations.clear();
295 #endif
296 }
297
298 uint8_t* m_pDataStart = nullptr;
299 uint8_t* m_pCurrPtr = nullptr;
300 size_t m_ReservedSize = 0;
301 size_t m_CurrAlignment = 0;
302 IMemoryAllocator* m_pAllocator = nullptr;
303
304 #if DILIGENT_DEBUG
305 size_t m_DbgCurrAllocation = 0;
306 struct DbgAllocationInfo
307 {
308 const size_t size;
309 const size_t alignment;
310 const size_t reserved_size;
311
312 DbgAllocationInfo(size_t _size, size_t _alignment, size_t _reserved_size) :
313 size{_size},
314 alignment{_alignment},
315 reserved_size{_reserved_size}
316 {
317 }
318 };
319 std::vector<DbgAllocationInfo> m_DbgAllocations;
320 #endif
321 };
322
323 } // namespace Diligent
3535 #include "BottomLevelAS.h"
3636 #include "DeviceObjectBase.hpp"
3737 #include "RenderDeviceBase.hpp"
38 #include "LinearAllocator.hpp"
38 #include "FixedLinearAllocator.hpp"
3939 #include "HashUtils.hpp"
4040
4141 namespace Diligent
5858 /// Copies bottom-level AS geometry description using MemPool to allocate required dynamic space.
5959 void CopyBLASGeometryDesc(const BottomLevelASDesc& SrcDesc,
6060 BottomLevelASDesc& DstDesc,
61 LinearAllocator& MemPool,
61 FixedLinearAllocator& MemPool,
6262 const BLASNameToIndex* pSrcNameToIndex,
6363 BLASNameToIndex& DstNameToIndex) noexcept(false);
6464
218218 private:
219219 void CopyGeometryDescriptionUnsafe(const BottomLevelASDesc& SrcDesc, const BLASNameToIndex* pSrcNameToIndex) noexcept(false)
220220 {
221 LinearAllocator MemPool{GetRawAllocator()};
221 FixedLinearAllocator MemPool{GetRawAllocator()};
222222 CopyBLASGeometryDesc(SrcDesc, this->m_Desc, MemPool, pSrcNameToIndex, this->m_NameToIndex);
223223 this->m_pRawPtr = MemPool.Release();
224224 }
3939 #include "STDAllocator.hpp"
4040 #include "EngineMemory.h"
4141 #include "GraphicsAccessories.hpp"
42 #include "LinearAllocator.hpp"
42 #include "FixedLinearAllocator.hpp"
4343 #include "HashUtils.hpp"
4444
4545 namespace Diligent
4848 void ValidateGraphicsPipelineCreateInfo(const GraphicsPipelineStateCreateInfo& CreateInfo) noexcept(false);
4949 void ValidateComputePipelineCreateInfo(const ComputePipelineStateCreateInfo& CreateInfo) noexcept(false);
5050 void ValidateRayTracingPipelineCreateInfo(IRenderDevice* pDevice, const RayTracingPipelineStateCreateInfo& CreateInfo) noexcept(false);
51
52 void CopyRayTracingShaderGroups(std::unordered_map<HashMapStringKey, Uint32, HashMapStringKey::Hasher>& NameToGroupIndex,
53 const RayTracingPipelineStateCreateInfo& CreateInfo,
54 FixedLinearAllocator& MemPool) noexcept(false);
5155
5256 void CorrectGraphicsPipelineDesc(GraphicsPipelineDesc& GraphicsPipeline) noexcept;
5357
294298
295299
296300 void ReserveSpaceForPipelineDesc(const GraphicsPipelineStateCreateInfo& CreateInfo,
297 LinearAllocator& MemPool) noexcept
301 FixedLinearAllocator& MemPool) noexcept
298302 {
299303 MemPool.AddSpace<GraphicsPipelineDesc>();
300304 ReserveResourceLayout(CreateInfo.PSODesc.ResourceLayout, MemPool);
314318 }
315319
316320 void ReserveSpaceForPipelineDesc(const ComputePipelineStateCreateInfo& CreateInfo,
317 LinearAllocator& MemPool) const noexcept
321 FixedLinearAllocator& MemPool) const noexcept
318322 {
319323 ReserveResourceLayout(CreateInfo.PSODesc.ResourceLayout, MemPool);
320324 }
321325
322326 void ReserveSpaceForPipelineDesc(const RayTracingPipelineStateCreateInfo& CreateInfo,
323 LinearAllocator& MemPool) const noexcept
327 FixedLinearAllocator& MemPool) const noexcept
324328 {
325329 for (Uint32 i = 0; i < CreateInfo.GeneralShaderCount; ++i)
326330 {
477481
478482
479483 void InitializePipelineDesc(const GraphicsPipelineStateCreateInfo& CreateInfo,
480 LinearAllocator& MemPool)
484 FixedLinearAllocator& MemPool)
481485 {
482486 this->m_pGraphicsPipelineDesc = MemPool.Copy(CreateInfo.GraphicsPipeline);
483487
607611 }
608612
609613 void InitializePipelineDesc(const ComputePipelineStateCreateInfo& CreateInfo,
610 LinearAllocator& MemPool)
614 FixedLinearAllocator& MemPool)
611615 {
612616 CopyResourceLayout(CreateInfo.PSODesc.ResourceLayout, this->m_Desc.ResourceLayout, MemPool);
613617 }
614618
615619 void InitializePipelineDesc(const RayTracingPipelineStateCreateInfo& CreateInfo,
616 TNameToGroupIndexMap&& NameToGroupIndex,
617 LinearAllocator& MemPool) noexcept
618 {
620 FixedLinearAllocator& MemPool) noexcept
621 {
622 TNameToGroupIndexMap NameToGroupIndex;
623 CopyRayTracingShaderGroups(NameToGroupIndex, CreateInfo, MemPool);
624
619625 CopyResourceLayout(CreateInfo.PSODesc.ResourceLayout, this->m_Desc.ResourceLayout, MemPool);
620626
621627 size_t RTDataSize = sizeof(RayTracingPipelineData);
635641 }
636642
637643 private:
638 static void ReserveResourceLayout(const PipelineResourceLayoutDesc& SrcLayout, LinearAllocator& MemPool) noexcept
644 static void ReserveResourceLayout(const PipelineResourceLayoutDesc& SrcLayout, FixedLinearAllocator& MemPool) noexcept
639645 {
640646 if (SrcLayout.Variables != nullptr)
641647 {
661667 static_assert(std::is_trivially_destructible<decltype(*SrcLayout.ImmutableSamplers)>::value, "Add destructor for this object to Destruct()");
662668 }
663669
664 static void CopyResourceLayout(const PipelineResourceLayoutDesc& SrcLayout, PipelineResourceLayoutDesc& DstLayout, LinearAllocator& MemPool)
670 static void CopyResourceLayout(const PipelineResourceLayoutDesc& SrcLayout, PipelineResourceLayoutDesc& DstLayout, FixedLinearAllocator& MemPool)
665671 {
666672 if (SrcLayout.Variables != nullptr)
667673 {
104104
105105 void CopyBLASGeometryDesc(const BottomLevelASDesc& SrcDesc,
106106 BottomLevelASDesc& DstDesc,
107 LinearAllocator& MemPool,
107 FixedLinearAllocator& MemPool,
108108 const BLASNameToIndex* pSrcNameToIndex,
109109 BLASNameToIndex& DstNameToIndex) noexcept(false)
110110 {
308308 }
309309 }
310310
311 void CopyRayTracingShaderGroups(std::unordered_map<HashMapStringKey, Uint32, HashMapStringKey::Hasher>& NameToGroupIndex,
312 const RayTracingPipelineStateCreateInfo& CreateInfo,
313 FixedLinearAllocator& MemPool) noexcept(false)
314 {
315 const auto& PSODesc = CreateInfo.PSODesc;
316 Uint32 GroupIndex = 0;
317
318 for (Uint32 i = 0; i < CreateInfo.GeneralShaderCount; ++i)
319 {
320 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(CreateInfo.pGeneralShaders[i].Name)}, GroupIndex++).second;
321 if (!IsUniqueName)
322 LOG_PSO_ERROR_AND_THROW("pGeneralShaders[", i, "].Name must be unique");
323 }
324 for (Uint32 i = 0; i < CreateInfo.TriangleHitShaderCount; ++i)
325 {
326 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(CreateInfo.pTriangleHitShaders[i].Name)}, GroupIndex++).second;
327 if (!IsUniqueName)
328 LOG_PSO_ERROR_AND_THROW("pTriangleHitShaders[", i, "].Name must be unique");
329 }
330 for (Uint32 i = 0; i < CreateInfo.ProceduralHitShaderCount; ++i)
331 {
332 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(CreateInfo.pProceduralHitShaders[i].Name)}, GroupIndex++).second;
333 if (!IsUniqueName)
334 LOG_PSO_ERROR_AND_THROW("pProceduralHitShaders[", i, "].Name must be unique");
335 }
336
337 VERIFY_EXPR(Uint32{CreateInfo.GeneralShaderCount} + Uint32{CreateInfo.TriangleHitShaderCount} + Uint32{CreateInfo.ProceduralHitShaderCount} == GroupIndex);
338 }
339
311340 #undef VALIDATE_SHADER_TYPE
312341 #undef LOG_PSO_ERROR_AND_THROW
313342
4545 const auto NumShaderStages = GetNumShaderStages();
4646 VERIFY_EXPR(NumShaderStages > 0 && NumShaderStages == ShaderStages.size());
4747
48 LinearAllocator MemPool{GetRawAllocator()};
48 FixedLinearAllocator MemPool{GetRawAllocator()};
4949
5050 MemPool.AddSpace<ShaderResourceCacheD3D11>(NumShaderStages);
5151 MemPool.AddSpace<ShaderResourceLayoutD3D11>(NumShaderStages);
3030 #include "DeviceContextD3D11Impl.hpp"
3131 #include "RenderDeviceD3D11Impl.hpp"
3232 #include "ShaderD3D11Impl.hpp"
33 #include "LinearAllocator.hpp"
33 #include "FixedLinearAllocator.hpp"
3434
3535 namespace Diligent
3636 {
5454 m_ResourceLayoutIndex.fill(-1);
5555 m_NumActiveShaders = static_cast<Uint8>(pPSO->GetNumShaderStages());
5656
57 LinearAllocator MemPool{GetRawAllocator()};
57 FixedLinearAllocator MemPool{GetRawAllocator()};
5858 MemPool.AddSpace<ShaderResourceCacheD3D11>(m_NumActiveShaders);
5959 MemPool.AddSpace<ShaderResourceLayoutD3D11>(m_NumActiveShaders);
6060
9999 std::array<D3D12_PRIMITIVE_TOPOLOGY_TYPE, PRIMITIVE_TOPOLOGY_NUM_TOPOLOGIES> m_Map;
100100 };
101101
102
103 template <typename TNameToGroupIndexMap>
102 using TBindingMapPerStage = std::array<IDXCompiler::TResourceBindingMap, MAX_SHADERS_IN_PIPELINE>;
103
104104 void BuildRTPipelineDescription(const RayTracingPipelineStateCreateInfo& CreateInfo,
105 TNameToGroupIndexMap& NameToGroupIndex,
106105 std::vector<D3D12_STATE_SUBOBJECT>& Subobjects,
106 std::vector<CComPtr<IDxcBlob>>& ShaderBlobs,
107107 DynamicLinearAllocator& TempPool,
108 LinearAllocator& MemPool)
108 IDXCompiler* compiler,
109 const TBindingMapPerStage& BindingMapPerStage) noexcept(false)
109110 {
110111 #define LOG_PSO_ERROR_AND_THROW(...) LOG_ERROR_AND_THROW("Description of ray tracing PSO '", CreateInfo.PSODesc.Name, "' is invalid: ", ##__VA_ARGS__)
111112
112113 Uint32 ShaderIndex = 0;
113 Uint32 GroupIndex = 0;
114114
115115 std::unordered_map<IShader*, LPCWSTR> UniqueShaders;
116116
133133 auto Result = UniqueShaders.emplace(pShader, nullptr);
134134 if (Result.second)
135135 {
136 auto& LibDesc = *TempPool.Construct<D3D12_DXIL_LIBRARY_DESC>();
137 auto& ExportDesc = *TempPool.Construct<D3D12_EXPORT_DESC>();
138 auto* pShaderD3D12 = ValidatedCast<ShaderD3D12Impl>(pShader);
139
140 LibDesc.DXILLibrary.BytecodeLength = pShaderD3D12->GetShaderByteCode()->GetBufferSize();
141 LibDesc.DXILLibrary.pShaderBytecode = pShaderD3D12->GetShaderByteCode()->GetBufferPointer();
136 auto& LibDesc = *TempPool.Construct<D3D12_DXIL_LIBRARY_DESC>();
137 auto& ExportDesc = *TempPool.Construct<D3D12_EXPORT_DESC>();
138 auto* pShaderD3D12 = ValidatedCast<ShaderD3D12Impl>(pShader);
139 Uint32 ShaderIdx = GetShaderTypePipelineIndex(pShaderD3D12->GetDesc().ShaderType, PIPELINE_TYPE_RAY_TRACING);
140 auto& BindingMap = BindingMapPerStage[ShaderIdx];
141
142 CComPtr<IDxcBlob> pBlob;
143 if (!compiler->RemapResourceBinding(BindingMap, reinterpret_cast<IDxcBlob*>(pShaderD3D12->GetShaderByteCode()), &pBlob))
144 LOG_ERROR_AND_THROW("Failed to remap resource bindings");
145
146 LibDesc.DXILLibrary.BytecodeLength = pBlob->GetBufferSize();
147 LibDesc.DXILLibrary.pShaderBytecode = pBlob->GetBufferPointer();
142148 LibDesc.NumExports = 1;
143149 LibDesc.pExports = &ExportDesc;
144150
151157 ExportDesc.Name = ShaderIndexToStr(++ShaderIndex);
152158
153159 Subobjects.push_back({D3D12_STATE_SUBOBJECT_TYPE_DXIL_LIBRARY, &LibDesc});
160 ShaderBlobs.push_back(pBlob);
154161
155162 Result.first->second = ExportDesc.Name;
156163 return ExportDesc.Name;
161168 return nullptr;
162169 };
163170
171 ShaderBlobs.reserve(CreateInfo.GeneralShaderCount + CreateInfo.TriangleHitShaderCount + CreateInfo.ProceduralHitShaderCount);
172
164173 for (Uint32 i = 0; i < CreateInfo.GeneralShaderCount; ++i)
165174 {
166175 const auto& GeneralShader = CreateInfo.pGeneralShaders[i];
167176 AddDxilLib(GeneralShader.pShader, GeneralShader.Name);
168
169 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(GeneralShader.Name)}, GroupIndex++).second;
170 if (!IsUniqueName)
171 LOG_PSO_ERROR_AND_THROW("pGeneralShaders[", i, "].Name must be unique");
172177 }
173178
174179 for (Uint32 i = 0; i < CreateInfo.TriangleHitShaderCount; ++i)
183188 HitGroupDesc.IntersectionShaderImport = nullptr;
184189
185190 Subobjects.push_back({D3D12_STATE_SUBOBJECT_TYPE_HIT_GROUP, &HitGroupDesc});
186
187 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(TriHitShader.Name)}, GroupIndex++).second;
188 if (!IsUniqueName)
189 LOG_PSO_ERROR_AND_THROW("pTriangleHitShaders[", i, "].Name must be unique");
190191 }
191192
192193 for (Uint32 i = 0; i < CreateInfo.ProceduralHitShaderCount; ++i)
201202 HitGroupDesc.IntersectionShaderImport = AddDxilLib(ProcHitShader.pIntersectionShader, nullptr);
202203
203204 Subobjects.push_back({D3D12_STATE_SUBOBJECT_TYPE_HIT_GROUP, &HitGroupDesc});
204
205 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(ProcHitShader.Name)}, GroupIndex++).second;
206 if (!IsUniqueName)
207 LOG_PSO_ERROR_AND_THROW("pProceduralHitShaders[", i, "].Name must be unique");
208 }
209
210 VERIFY_EXPR(Uint32{CreateInfo.GeneralShaderCount} + Uint32{CreateInfo.TriangleHitShaderCount} + Uint32{CreateInfo.ProceduralHitShaderCount} == GroupIndex);
205 }
211206
212207 constexpr Uint32 RecursionDepthLimit = D3D12_RAYTRACING_MAX_DECLARABLE_TRACE_RECURSION_DEPTH - 1;
213208 if (CreateInfo.RayTracingPipeline.MaxRecursionDepth > RecursionDepthLimit)
283278 }
284279 }
285280
286 void RemapResourceBinding(IDXCompiler* compiler,
287 const RootSignature& RootSig,
288 const std::array<Int8, MAX_SHADERS_IN_PIPELINE>& ResourceLayoutIndex,
289 const ShaderResourceLayoutD3D12* pResourceLayouts,
290 const ShaderResourceLayoutD3D12* pStaticLayouts,
291 Uint32 NumStages,
292 std::vector<D3D12_STATE_SUBOBJECT>& Subobjects,
293 std::vector<CComPtr<IDxcBlob>>& ShaderBlobs) noexcept(false)
294 {
295 IDXCompiler::TBindingMapPerStage BindingMapPerStage;
296 String EntryPoint;
297
281 void ExtractResourceBindingMap(const RootSignature& RootSig,
282 const std::array<Int8, MAX_SHADERS_IN_PIPELINE>& ResourceLayoutIndex,
283 const ShaderResourceLayoutD3D12* pResourceLayouts,
284 const ShaderResourceLayoutD3D12* pStaticLayouts,
285 TBindingMapPerStage& BindingMapPerStage) noexcept(false)
286 {
298287 const auto ExtractResources = [&](const ShaderResourceLayoutD3D12* pLayouts) //
299288 {
300289 for (Uint32 ShaderIdx = 0; ShaderIdx < ResourceLayoutIndex.size(); ++ShaderIdx)
314303 for (Uint32 i = 0; i < ResCount; ++i)
315304 {
316305 const auto& Attribs = ResLayout.GetSrvCbvUav(VarType, i).Attribs;
317 auto Iter = BindingMap.emplace(HashMapStringKey{Attribs.Name}, Attribs.BindPoint).first;
306 VERIFY_EXPR(Attribs.Name != nullptr && strlen(Attribs.Name) > 0);
307
308 auto Iter = BindingMap.emplace(HashMapStringKey{Attribs.Name}, Attribs.BindPoint).first;
318309 VERIFY_EXPR(Iter->second == Attribs.BindPoint);
319310 }
320311 for (Uint32 i = 0; i < SampCount; ++i)
321312 {
322313 const auto& Attribs = ResLayout.GetSampler(VarType, i).Attribs;
323 auto Iter = BindingMap.emplace(HashMapStringKey{Attribs.Name}, Attribs.BindPoint).first;
314 VERIFY_EXPR(Attribs.Name != nullptr && strlen(Attribs.Name) > 0);
315
316 auto Iter = BindingMap.emplace(HashMapStringKey{Attribs.Name}, Attribs.BindPoint).first;
324317 VERIFY_EXPR(Iter->second == Attribs.BindPoint);
325318 }
326319 }
337330 if (LayoutIdx < 0)
338331 continue;
339332
333 VERIFY_EXPR(ImtblSmplr.Name.length() > 0);
334 if (ImtblSmplr.Name.empty())
335 continue;
336
340337 auto& BindingMap = BindingMapPerStage[ShaderIdx];
341338 BindingMap.emplace(HashMapStringKey{ImtblSmplr.Name.c_str()}, ImtblSmplr.ShaderRegister);
342 }
343
344 for (auto& SubObj : Subobjects)
345 {
346 if (SubObj.Type == D3D12_STATE_SUBOBJECT_TYPE_DXIL_LIBRARY)
347 {
348 auto& DxilLib = *reinterpret_cast<D3D12_DXIL_LIBRARY_DESC*>(const_cast<void*>(SubObj.pDesc));
349 VERIFY_EXPR(DxilLib.NumExports == 1);
350
351 const auto& Export = *DxilLib.pExports;
352 EntryPoint.resize(wcslen(Export.ExportToRename));
353 for (size_t i = 0; i < EntryPoint.size(); ++i)
354 EntryPoint[i] = static_cast<char>(Export.ExportToRename[i]);
355
356 CComPtr<IDxcBlob> pBlob;
357 compiler->RemapResourceBinding(BindingMapPerStage, EntryPoint.c_str(), DxilLib.DXILLibrary.pShaderBytecode, DxilLib.DXILLibrary.BytecodeLength, &pBlob);
358
359 if (!pBlob)
360 LOG_ERROR_AND_THROW("Failed to remap resource bindings");
361
362 DxilLib.DXILLibrary.pShaderBytecode = pBlob->GetBufferPointer();
363 DxilLib.DXILLibrary.BytecodeLength = pBlob->GetBufferSize();
364
365 ShaderBlobs.push_back(pBlob);
366 }
367339 }
368340 }
369341
397369
398370 ExtractShaders<ShaderD3D12Impl>(CreateInfo, ShaderStages);
399371
400 LinearAllocator MemPool{GetRawAllocator()};
372 FixedLinearAllocator MemPool{GetRawAllocator()};
401373
402374 const auto NumShaderStages = GetNumShaderStages();
403375 VERIFY_EXPR(NumShaderStages > 0 && NumShaderStages == ShaderStages.size());
444416 {
445417 TShaderStages ShaderStages;
446418 InitInternalObjects(CreateInfo, ShaderStages, nullptr,
447 [this](const GraphicsPipelineStateCreateInfo& CreateInfo, LinearAllocator& MemPool) //
419 [this](const GraphicsPipelineStateCreateInfo& CreateInfo, FixedLinearAllocator& MemPool) //
448420 {
449421 InitializePipelineDesc(CreateInfo, MemPool);
450422 } //
655627 {
656628 TShaderStages ShaderStages;
657629 InitInternalObjects(CreateInfo, ShaderStages, nullptr,
658 [this](const ComputePipelineStateCreateInfo& CreateInfo, LinearAllocator& MemPool) //
630 [this](const ComputePipelineStateCreateInfo& CreateInfo, FixedLinearAllocator& MemPool) //
659631 {
660632 InitializePipelineDesc(CreateInfo, MemPool);
661633 } //
712684 {
713685 try
714686 {
715 LocalRootSignature LocalRootSig{CreateInfo.pShaderRecordName, CreateInfo.RayTracingPipeline.ShaderRecordSize};
716 TShaderStages ShaderStages;
717 std::vector<D3D12_STATE_SUBOBJECT> Subobjects;
718 DynamicLinearAllocator TempPool{GetRawAllocator(), 4 << 10};
687 LocalRootSignature LocalRootSig{CreateInfo.pShaderRecordName, CreateInfo.RayTracingPipeline.ShaderRecordSize};
688 TShaderStages ShaderStages;
689 DynamicLinearAllocator TempPool{GetRawAllocator(), 4 << 10};
719690
720691 InitInternalObjects(CreateInfo, ShaderStages, &LocalRootSig,
721 [&](const RayTracingPipelineStateCreateInfo& CreateInfo, LinearAllocator& MemPool) //
692 [&](const RayTracingPipelineStateCreateInfo& CreateInfo, FixedLinearAllocator& MemPool) //
722693 {
723 TNameToGroupIndexMap NameToGroupIndex;
724 BuildRTPipelineDescription(CreateInfo, NameToGroupIndex, Subobjects, TempPool, MemPool);
725 InitializePipelineDesc(CreateInfo, std::move(NameToGroupIndex), MemPool);
694 InitializePipelineDesc(CreateInfo, MemPool);
726695 } //
727696 );
728697
729698 auto pd3d12Device = pDeviceD3D12->GetD3D12Device5();
699
700 TBindingMapPerStage BindingMapPerStage;
701 ExtractResourceBindingMap(m_RootSig, m_ResourceLayoutIndex, &m_pShaderResourceLayouts[0], &m_pShaderResourceLayouts[GetNumShaderStages()], BindingMapPerStage);
702
703 std::vector<D3D12_STATE_SUBOBJECT> Subobjects;
704 std::vector<CComPtr<IDxcBlob>> ShaderBlobs;
705 BuildRTPipelineDescription(CreateInfo, Subobjects, ShaderBlobs, TempPool, pDeviceD3D12->GetDxCompiler(), BindingMapPerStage);
730706
731707 D3D12_GLOBAL_ROOT_SIGNATURE GlobalRoot = {m_RootSig.GetD3D12RootSignature()};
732708 Subobjects.push_back({D3D12_STATE_SUBOBJECT_TYPE_GLOBAL_ROOT_SIGNATURE, &GlobalRoot});
734710 D3D12_LOCAL_ROOT_SIGNATURE LocalRoot = {LocalRootSig.Create(pd3d12Device)};
735711 if (LocalRoot.pLocalRootSignature)
736712 Subobjects.push_back({D3D12_STATE_SUBOBJECT_TYPE_LOCAL_ROOT_SIGNATURE, &LocalRoot});
737
738 std::vector<CComPtr<IDxcBlob>> ShaderBlobs;
739 RemapResourceBinding(pDeviceD3D12->GetDxCompiler(), m_RootSig, m_ResourceLayoutIndex,
740 &m_pShaderResourceLayouts[0], &m_pShaderResourceLayouts[GetNumShaderStages()], GetNumShaderStages(),
741 Subobjects, ShaderBlobs);
742713
743714 D3D12_STATE_OBJECT_DESC RTPipelineDesc = {};
744715 RTPipelineDesc.Type = D3D12_STATE_OBJECT_TYPE_RAYTRACING_PIPELINE;
810781 }
811782
812783 IMPLEMENT_QUERY_INTERFACE(PipelineStateD3D12Impl, IID_PipelineStateD3D12, TPipelineStateBase)
813
814784
815785 void PipelineStateD3D12Impl::InitResourceLayouts(const PipelineStateCreateInfo& CreateInfo,
816786 TShaderStages& ShaderStages,
2929 #include "PipelineStateD3D12Impl.hpp"
3030 #include "ShaderD3D12Impl.hpp"
3131 #include "RenderDeviceD3D12Impl.hpp"
32 #include "LinearAllocator.hpp"
32 #include "FixedLinearAllocator.hpp"
3333
3434 namespace Diligent
3535 {
5252 {
5353 m_ResourceLayoutIndex.fill(-1);
5454
55 LinearAllocator MemPool{GetRawAllocator()};
55 FixedLinearAllocator MemPool{GetRawAllocator()};
5656 MemPool.AddSpace<ShaderVariableManagerD3D12>(m_NumShaders);
5757 MemPool.Reserve();
5858 m_pShaderVarMgrs = MemPool.ConstructArray<ShaderVariableManagerD3D12>(m_NumShaders, std::ref(*this), std::ref(m_ShaderResourceCache));
3838 #include "PipelineStateD3D12Impl.hpp"
3939 #include "ShaderResourceVariableBase.hpp"
4040 #include "ShaderVariableD3DBase.hpp"
41 #include "LinearAllocator.hpp"
41 #include "FixedLinearAllocator.hpp"
4242 #include "TopLevelASD3D12.h"
4343
4444 namespace Diligent
109109 VERIFY_EXPR(GetSamplerCount(VarType) == SamplerCount[VarType]);
110110 }
111111
112 LinearAllocator MemPool{Allocator};
112 FixedLinearAllocator MemPool{Allocator};
113113 MemPool.AddSpace<D3D12Resource>(GetTotalResourceCount());
114114 MemPool.AddSpace<char>(StringPoolSize);
115115
111111 void Initialize(const PSOCreateInfoType& CreateInfo, const std::vector<GLPipelineShaderStageInfo>& ShaderStages);
112112
113113 void InitResourceLayouts(const std::vector<GLPipelineShaderStageInfo>& ShaderStages,
114 LinearAllocator& MemPool);
114 FixedLinearAllocator& MemPool);
115115
116116 void Destruct();
117117
3939 template <typename PSOCreateInfoType>
4040 void PipelineStateGLImpl::Initialize(const PSOCreateInfoType& CreateInfo, const std::vector<GLPipelineShaderStageInfo>& ShaderStages)
4141 {
42 LinearAllocator MemPool{GetRawAllocator()};
42 FixedLinearAllocator MemPool{GetRawAllocator()};
4343 VERIFY_EXPR(m_NumShaderStages > 0 && m_NumShaderStages == ShaderStages.size());
4444 if (!GetDevice()->GetDeviceCaps().Features.SeparablePrograms)
4545 m_NumShaderStages = 1;
189189
190190
191191 void PipelineStateGLImpl::InitResourceLayouts(const std::vector<GLPipelineShaderStageInfo>& ShaderStages,
192 LinearAllocator& MemPool)
192 FixedLinearAllocator& MemPool)
193193 {
194194 auto* const pDeviceGL = GetDevice();
195195 const auto& deviceCaps = pDeviceGL->GetDeviceCaps();
381381
382382 template <typename TNameToGroupIndexMap>
383383 void BuildRTPipelineDescription(const RayTracingPipelineStateCreateInfo& CreateInfo,
384 TNameToGroupIndexMap& NameToGroupIndex,
384 const TNameToGroupIndexMap& NameToGroupIndex,
385385 std::vector<VkRayTracingShaderGroupCreateInfoKHR>& ShaderGroups,
386 const ShaderResourceLayoutVk::TShaderStages& ShaderStages,
387 LinearAllocator& MemPool)
386 const ShaderResourceLayoutVk::TShaderStages& ShaderStages)
388387 {
389388 #define LOG_PSO_ERROR_AND_THROW(...) LOG_ERROR_AND_THROW("Description of ray tracing PSO '", CreateInfo.PSODesc.Name, "' is invalid: ", ##__VA_ARGS__)
390389 ShaderGroups.reserve(CreateInfo.GeneralShaderCount + CreateInfo.TriangleHitShaderCount + CreateInfo.ProceduralHitShaderCount);
391
392 Uint32 GroupIndex = 0;
393390
394391 std::array<Uint32, MAX_SHADERS_IN_PIPELINE> ShaderIndices = {};
395392 std::unordered_map<const IShader*, Uint32> UniqueShaders;
428425 Group.anyHitShader = VK_SHADER_UNUSED_KHR;
429426 Group.intersectionShader = VK_SHADER_UNUSED_KHR;
430427
431 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(GeneralShader.Name)}, GroupIndex++).second;
432 if (!IsUniqueName)
433 LOG_PSO_ERROR_AND_THROW("pGeneralShaders[", i, "].Name must be unique");
428 #ifdef DILIGENT_DEVELOPMENT
429 auto Iter = NameToGroupIndex.find(GeneralShader.Name);
430 CHECK_THROW(Iter != NameToGroupIndex.end());
431 CHECK_THROW(Iter->second == ShaderGroups.size());
432 #endif
434433
435434 ShaderGroups.push_back(Group);
436435 }
448447 Group.anyHitShader = ShaderToIndex(TriHitShader.pAnyHitShader);
449448 Group.intersectionShader = VK_SHADER_UNUSED_KHR;
450449
451 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(TriHitShader.Name)}, GroupIndex++).second;
452 if (!IsUniqueName)
453 LOG_PSO_ERROR_AND_THROW("pTriangleHitShaders[", i, "].Name must be unique");
450 #ifdef DILIGENT_DEVELOPMENT
451 auto Iter = NameToGroupIndex.find(TriHitShader.Name);
452 CHECK_THROW(Iter != NameToGroupIndex.end());
453 CHECK_THROW(Iter->second == ShaderGroups.size());
454 #endif
454455
455456 ShaderGroups.push_back(Group);
456457 }
468469 Group.closestHitShader = ShaderToIndex(ProcHitShader.pClosestHitShader);
469470 Group.anyHitShader = ShaderToIndex(ProcHitShader.pAnyHitShader);
470471
471 bool IsUniqueName = NameToGroupIndex.emplace(HashMapStringKey{MemPool.CopyString(ProcHitShader.Name)}, GroupIndex++).second;
472 if (!IsUniqueName)
473 LOG_PSO_ERROR_AND_THROW("pProceduralHitShaders[", i, "].Name must be unique");
472 #ifdef DILIGENT_DEVELOPMENT
473 auto Iter = NameToGroupIndex.find(ProcHitShader.Name);
474 CHECK_THROW(Iter != NameToGroupIndex.end());
475 CHECK_THROW(Iter->second == ShaderGroups.size());
476 #endif
474477
475478 ShaderGroups.push_back(Group);
476479 }
477
478 VERIFY_EXPR(Uint32{CreateInfo.GeneralShaderCount} + Uint32{CreateInfo.TriangleHitShaderCount} + Uint32{CreateInfo.ProceduralHitShaderCount} == GroupIndex);
479480
480481 #ifdef DILIGENT_DEVELOPMENT
481482 Uint32 ShaderIndex2 = 0;
654655 TShaderStages ShaderStages;
655656 ExtractShaders<ShaderVkImpl>(CreateInfo, ShaderStages);
656657
657 LinearAllocator MemPool{GetRawAllocator()};
658 FixedLinearAllocator MemPool{GetRawAllocator()};
658659
659660 const auto NumShaderStages = GetNumShaderStages();
660661 VERIFY_EXPR(NumShaderStages > 0 && NumShaderStages == ShaderStages.size());
706707 std::vector<VulkanUtilities::ShaderModuleWrapper> ShaderModules;
707708
708709 InitInternalObjects(CreateInfo, vkShaderStages, ShaderModules,
709 [this](const GraphicsPipelineStateCreateInfo& CreateInfo, LinearAllocator& MemPool, TShaderStages /*ShaderStages*/) //
710 [this](const GraphicsPipelineStateCreateInfo& CreateInfo, FixedLinearAllocator& MemPool, TShaderStages /*ShaderStages*/) //
710711 {
711712 InitializePipelineDesc(CreateInfo, MemPool);
712713 } //
734735 std::vector<VulkanUtilities::ShaderModuleWrapper> ShaderModules;
735736
736737 InitInternalObjects(CreateInfo, vkShaderStages, ShaderModules,
737 [this](const ComputePipelineStateCreateInfo& CreateInfo, LinearAllocator& MemPool, TShaderStages /*ShaderStages*/) //
738 [this](const ComputePipelineStateCreateInfo& CreateInfo, FixedLinearAllocator& MemPool, TShaderStages /*ShaderStages*/) //
738739 {
739740 InitializePipelineDesc(CreateInfo, MemPool);
740741 } //
765766 std::vector<VkRayTracingShaderGroupCreateInfoKHR> ShaderGroups;
766767
767768 InitInternalObjects(CreateInfo, vkShaderStages, ShaderModules,
768 [&](const RayTracingPipelineStateCreateInfo& CreateInfo, LinearAllocator& MemPool, TShaderStages& ShaderStages) //
769 [&](const RayTracingPipelineStateCreateInfo& CreateInfo, FixedLinearAllocator& MemPool, TShaderStages& ShaderStages) //
769770 {
770 TNameToGroupIndexMap NameToGroupIndex;
771 BuildRTPipelineDescription(CreateInfo, NameToGroupIndex, ShaderGroups, ShaderStages, MemPool);
772 InitializePipelineDesc(CreateInfo, std::move(NameToGroupIndex), MemPool);
771 InitializePipelineDesc(CreateInfo, MemPool);
772 BuildRTPipelineDescription(CreateInfo, m_pRayTracingPipelineData->NameToGroupIndex, ShaderGroups, ShaderStages);
773773 } //
774774 );
775775
2929 #include "PipelineStateVkImpl.hpp"
3030 #include "ShaderVkImpl.hpp"
3131 #include "RenderDeviceVkImpl.hpp"
32 #include "LinearAllocator.hpp"
32 #include "FixedLinearAllocator.hpp"
3333
3434 namespace Diligent
3535 {
5353
5454 m_NumShaders = static_cast<decltype(m_NumShaders)>(pPSO->GetNumShaderStages());
5555
56 LinearAllocator MemPool{GetRawAllocator()};
56 FixedLinearAllocator MemPool{GetRawAllocator()};
5757 MemPool.AddSpace<ShaderVariableManagerVk>(m_NumShaders);
5858 MemPool.Reserve();
5959 m_pShaderVarMgrs = MemPool.ConstructArray<ShaderVariableManagerVk>(m_NumShaders, std::ref(*this), std::ref(m_ShaderResourceCache));
192192 }
193193 }
194194
195 LinearAllocator MemPool{Allocator};
195 FixedLinearAllocator MemPool{Allocator};
196196
197197 MemPool.AddSpace<VkResource>(TotalResources);
198198 MemPool.AddSpace<ImmutableSamplerPtrType>(m_NumImmutableSamplers);
8484 IDataBlob** ppCompilerOutput) noexcept(false) = 0;
8585
8686 using TResourceBindingMap = std::unordered_map<HashMapStringKey, Uint32, HashMapStringKey::Hasher>;
87 using TBindingMapPerStage = std::array<TResourceBindingMap, MAX_SHADERS_IN_PIPELINE>;
8887
89 virtual bool RemapResourceBinding(const TBindingMapPerStage& BindingMapPerStage,
90 const char* EntryPoint,
91 const void* pBytecode,
92 size_t BytecodeSize,
93 IDxcBlob** ppByteCodeBlob) = 0;
88 virtual bool RemapResourceBinding(const TResourceBindingMap& ResourceMap,
89 IDxcBlob* pSrcBytecode,
90 IDxcBlob** ppDstByteCode) = 0;
9491
9592 // Attempts to extract shader reflection from the bytecode using DXC.
9693 virtual void GetD3D12ShaderReflection(IDxcBlob* pShaderBytecode,
4141 #include "DataBlobImpl.hpp"
4242 #include "RefCntAutoPtr.hpp"
4343 #include "ShaderToolsCommon.hpp"
44 #include "PlatformMisc.hpp"
45 #include "GraphicsAccessories.hpp"
4644
4745 #if D3D12_SUPPORTED
4846 # include <d3d12shader.h>
9391 virtual void GetD3D12ShaderReflection(IDxcBlob* pShaderBytecode,
9492 ID3D12ShaderReflection** ppShaderReflection) override final;
9593
96 virtual bool RemapResourceBinding(const TBindingMapPerStage& BindingMapPerStage,
97 const char* EntryPoint,
98 const void* pBytecode,
99 size_t BytecodeSize,
100 IDxcBlob** ppByteCodeBlob) override final;
94 virtual bool RemapResourceBinding(const TResourceBindingMap& ResourceMap,
95 IDxcBlob* pSrcBytecode,
96 IDxcBlob** ppDstByteCode) override final;
10197
10298 private:
10399 DxcCreateInstanceProc Load()
140136 return m_pCreateInstance;
141137 }
142138
143 bool ValidateAndSign(DxcCreateInstanceProc CreateInstance, IDxcLibrary* library, CComPtr<IDxcBlob>& compiled, IDxcBlob** ppBlobOut) const;
144 bool PatchDXIL(const TResourceBindingMap& ResourceMap, String& DXIL) const;
145 SHADER_TYPE GetEntryShaderType(const String& EntryPoint, const String& DXIL) const;
139 bool ValidateAndSign(DxcCreateInstanceProc CreateInstance, IDxcLibrary* library, CComPtr<IDxcBlob>& compiled, IDxcBlob** ppBlobOut) const;
140 bool PatchDXIL(const TResourceBindingMap& ResourceMap, String& DXIL) const;
146141
147142 private:
148143 DxcCreateInstanceProc m_pCreateInstance = nullptr;
764759 }
765760 }
766761
767 bool DXCompilerImpl::RemapResourceBinding(const TBindingMapPerStage& BindingMapPerStage,
768 const char* EntryPoint,
769 const void* pBytecode,
770 size_t BytecodeSize,
771 IDxcBlob** ppByteCodeBlob)
762 bool DXCompilerImpl::RemapResourceBinding(const TResourceBindingMap& ResourceMap,
763 IDxcBlob* pSrcBytecode,
764 IDxcBlob** ppDstByteCode)
772765 {
773766 #if D3D12_SUPPORTED
774767 auto CreateInstance = GetCreateInstaceProc();
804797 return false;
805798 }
806799
807 CComPtr<IDxcBlobEncoding> srcBytecode;
808 hr = library->CreateBlobWithEncodingFromPinned(pBytecode, static_cast<Uint32>(BytecodeSize), 0, &srcBytecode);
809 if (FAILED(hr))
810 {
811 LOG_ERROR("Failed to create bytecode blob");
812 return false;
813 }
814
815800 CComPtr<IDxcBlobEncoding> disasm;
816 hr = compiler->Disassemble(srcBytecode, &disasm);
801 hr = compiler->Disassemble(pSrcBytecode, &disasm);
817802 if (FAILED(hr))
818803 {
819804 LOG_ERROR("Failed to disassemble bytecode");
822807
823808 String dxilAsm;
824809 dxilAsm.assign(static_cast<const char*>(disasm->GetBufferPointer()), disasm->GetBufferSize());
825
826 SHADER_TYPE shaderType = GetEntryShaderType(EntryPoint, dxilAsm);
827 const Uint32 shaderIndex = GetShaderTypePipelineIndex(shaderType, PIPELINE_TYPE_RAY_TRACING);
828 const auto& ResourceMap = BindingMapPerStage[shaderIndex];
829810
830811 if (!PatchDXIL(ResourceMap, dxilAsm))
831812 {
873854 if (FAILED(hr))
874855 return false;
875856
876 return ValidateAndSign(CreateInstance, library, compiled, ppByteCodeBlob);
857 return ValidateAndSign(CreateInstance, library, compiled, ppDstByteCode);
877858 #else
878859
879860 return false;
945926 return true;
946927 }
947928
948 namespace
949 {
950 template <Uint32 S>
951 inline bool ReverseCmp(const char* lhsRev, const char (&rhs)[S])
952 {
953 const Uint32 count = S - 1;
954 const char* lhs = lhsRev - count;
955 return std::memcmp(lhs, rhs, count) == 0;
956 }
957 } // namespace
958
959 SHADER_TYPE DXCompilerImpl::GetEntryShaderType(const String& EntryPoint, const String& DXIL) const
960 {
961 const String Pattern = "void " + EntryPoint + "(";
962 const char ShaderTypeStart[] = "[shader(\\22";
963 const char ShaderTypeEnd[] = "\\22)]";
964 const char RayGenShader[] = "raygeneration";
965 const char MissShader[] = "miss";
966 const char AnyHitShader[] = "anyhit";
967 const char ClosestHitShader[] = "closesthit";
968 const char IntersectionShader[] = "intersection";
969 const char CallableShader[] = "callable";
970
971 size_t pos = DXIL.find(Pattern);
972 if (pos == String::npos)
973 return SHADER_TYPE_UNKNOWN;
974
975 size_t endPos = DXIL.rfind(ShaderTypeEnd, pos);
976 if (endPos == String::npos)
977 return SHADER_TYPE_UNKNOWN;
978
979 const char* str = &DXIL[endPos];
980 // clang-format off
981 if (ReverseCmp(str, RayGenShader )) return SHADER_TYPE_RAY_GEN;
982 if (ReverseCmp(str, MissShader )) return SHADER_TYPE_RAY_MISS;
983 if (ReverseCmp(str, AnyHitShader )) return SHADER_TYPE_RAY_ANY_HIT;
984 if (ReverseCmp(str, ClosestHitShader )) return SHADER_TYPE_RAY_CLOSEST_HIT;
985 if (ReverseCmp(str, IntersectionShader)) return SHADER_TYPE_RAY_INTERSECTION;
986 if (ReverseCmp(str, CallableShader )) return SHADER_TYPE_CALLABLE;
987 // clang-format on
988 return SHADER_TYPE_UNKNOWN;
989 }
990
991929 } // namespace Diligent
2828
2929 #include "DefaultRawMemoryAllocator.hpp"
3030 #include "FixedBlockMemoryAllocator.hpp"
31 #include "LinearAllocator.hpp"
31 #include "FixedLinearAllocator.hpp"
32 #include "DynamicLinearAllocator.hpp"
3233
3334 #include "gtest/gtest.h"
3435
122123 }
123124 }
124125
125 TEST(Common_LinearAllocator, EmptyAllocator)
126 {
127 LinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
126 TEST(Common_FixedLinearAllocator, EmptyAllocator)
127 {
128 FixedLinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
128129 Allocator.AddSpace(0, 16);
129130 Allocator.Reserve();
130131 EXPECT_EQ(Allocator.GetReservedSize(), size_t{0});
132133 EXPECT_EQ(pNull, nullptr);
133134 }
134135
135 TEST(Common_LinearAllocator, LargeAlignment)
136 {
137 LinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
136 TEST(Common_FixedLinearAllocator, LargeAlignment)
137 {
138 FixedLinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
138139 Allocator.AddSpace(32, 8192);
139140 Allocator.Reserve();
140141 auto* Ptr = Allocator.Allocate(32, 8192);
141142 EXPECT_EQ(Ptr, Align(Ptr, 8192));
142143 }
143144
144 TEST(Common_LinearAllocator, ObjectConstruction)
145 {
146 LinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
145 TEST(Common_FixedLinearAllocator, ObjectConstruction)
146 {
147 FixedLinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
147148
148149 struct alignas(1024) TObj1k
149150 {
219220 }
220221 }
221222
223 TEST(Common_DynamicLinearAllocator, ObjectConstruction)
224 {
225 DynamicLinearAllocator Allocator{DefaultRawMemoryAllocator::GetAllocator()};
226
227 EXPECT_TRUE(reinterpret_cast<size_t>(Allocator.Allocate(10, 16)) % 16 == 0);
228 EXPECT_TRUE(reinterpret_cast<size_t>(Allocator.Allocate(200, 64)) % 64 == 0);
229 }
230
222231 } // namespace