Hash :
ce53aff0
Author :
Date :
2024-11-05T16:57:57
Vulkan: Add per descriptorSet LRU cache eviction Before this CL, the descriptor set cache eviction is at the pool level. Either the entire pool is deleted or not. It is also not LRU based. This CL adds a per descriptor set cache eviction and reuse evicted descriptorSet before allocating a new pool. This eviction is LRU based so that it is more precise. The mCurrentFrameCount is passed into various API so that it can make eviction decision based on the frame number. In this CL, anything not been used in last 10 frames will be evicted and recycled before allocate a new pool. Since eviction is based on individual descriptor set, not by pool, ProgramExecutableVk no longer needs to track the DescriptorSetPool object. mDescriptorPools has been removed from ProgramExecutableVk class. As measured by crrev.com/c/5425496/133 This LRU linked list maintenance does not add any measurable time difference, but reduces total descriptorSet pool count by one third (from 75 down to 48). running test name: "TracePerf", backend: "_vulkan", story: "batman_telltale" Before this CL: cacheMissCount: 200, averageTime:23998 ns cacheHitCount: 1075445, averageTime:626 ns descriptorSetEvicted: 0, descriptorSetPoolCount:75 Average frame time 3.9262 ms After this CL: cacheMissCount: 200, averageTime:23207 ns cacheHitCount: 1025415, averageTime:602 ns descriptorSetEvicted: 102708, descriptorSetPoolCount:48 Average frame time 3.9074 ms BYPASS_LARGE_CHANGE_WARNING Bug: angleproject:372268711 Change-Id: I84daaf46f4557cbbfdb94c10c5386001105f5046 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/5985112 Reviewed-by: Shahbaz Youssefi <syoussefi@chromium.org> Reviewed-by: Yuxin Hu <yuxinhu@google.com> Commit-Queue: Charlie Lao <cclao@google.com>
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
//
// Copyright 2020 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// ProgramExecutableVk.h: Collects the information and interfaces common to both ProgramVks and
// ProgramPipelineVks in order to execute/draw with either.
#ifndef LIBANGLE_RENDERER_VULKAN_PROGRAMEXECUTABLEVK_H_
#define LIBANGLE_RENDERER_VULKAN_PROGRAMEXECUTABLEVK_H_
#include "common/bitset_utils.h"
#include "common/mathutil.h"
#include "common/utilities.h"
#include "libANGLE/Context.h"
#include "libANGLE/InfoLog.h"
#include "libANGLE/ProgramExecutable.h"
#include "libANGLE/renderer/ProgramExecutableImpl.h"
#include "libANGLE/renderer/vulkan/ContextVk.h"
#include "libANGLE/renderer/vulkan/ShaderInterfaceVariableInfoMap.h"
#include "libANGLE/renderer/vulkan/spv_utils.h"
#include "libANGLE/renderer/vulkan/vk_cache_utils.h"
#include "libANGLE/renderer/vulkan/vk_helpers.h"
namespace rx
{
class ShaderInfo final : angle::NonCopyable
{
public:
ShaderInfo();
~ShaderInfo();
angle::Result initShaders(vk::Context *context,
const gl::ShaderBitSet &linkedShaderStages,
const gl::ShaderMap<const angle::spirv::Blob *> &spirvBlobs,
const ShaderInterfaceVariableInfoMap &variableInfoMap,
bool isGLES1);
void initShaderFromProgram(gl::ShaderType shaderType, const ShaderInfo &programShaderInfo);
void clear();
ANGLE_INLINE bool valid() const { return mIsInitialized; }
const gl::ShaderMap<angle::spirv::Blob> &getSpirvBlobs() const { return mSpirvBlobs; }
// Save and load implementation for GLES Program Binary support.
void load(gl::BinaryInputStream *stream);
void save(gl::BinaryOutputStream *stream);
private:
gl::ShaderMap<angle::spirv::Blob> mSpirvBlobs;
bool mIsInitialized = false;
};
union ProgramTransformOptions final
{
struct
{
uint8_t surfaceRotation : 1;
uint8_t removeTransformFeedbackEmulation : 1;
uint8_t multiSampleFramebufferFetch : 1;
uint8_t enableSampleShading : 1;
uint8_t reserved : 4; // must initialize to zero
};
uint8_t permutationIndex;
static constexpr uint32_t kPermutationCount = 0x1 << 4;
};
static_assert(sizeof(ProgramTransformOptions) == 1, "Size check failed");
static_assert(static_cast<int>(SurfaceRotation::EnumCount) <= 8, "Size check failed");
class ProgramInfo final : angle::NonCopyable
{
public:
ProgramInfo();
~ProgramInfo();
angle::Result initProgram(vk::Context *context,
gl::ShaderType shaderType,
bool isLastPreFragmentStage,
bool isTransformFeedbackProgram,
const ShaderInfo &shaderInfo,
ProgramTransformOptions optionBits,
const ShaderInterfaceVariableInfoMap &variableInfoMap);
void release(ContextVk *contextVk);
ANGLE_INLINE bool valid(gl::ShaderType shaderType) const
{
return mProgramHelper.valid(shaderType);
}
vk::ShaderProgramHelper &getShaderProgram() { return mProgramHelper; }
private:
vk::ShaderProgramHelper mProgramHelper;
gl::ShaderMap<vk::RefCounted<vk::ShaderModule>> mShaders;
};
using ImmutableSamplerIndexMap = angle::HashMap<vk::YcbcrConversionDesc, uint32_t>;
class ProgramExecutableVk : public ProgramExecutableImpl
{
public:
ProgramExecutableVk(const gl::ProgramExecutable *executable);
~ProgramExecutableVk() override;
void destroy(const gl::Context *context) override;
void save(ContextVk *contextVk, bool isSeparable, gl::BinaryOutputStream *stream);
angle::Result load(ContextVk *contextVk,
bool isSeparable,
gl::BinaryInputStream *stream,
egl::CacheGetResult *resultOut);
void setUniform1fv(GLint location, GLsizei count, const GLfloat *v) override;
void setUniform2fv(GLint location, GLsizei count, const GLfloat *v) override;
void setUniform3fv(GLint location, GLsizei count, const GLfloat *v) override;
void setUniform4fv(GLint location, GLsizei count, const GLfloat *v) override;
void setUniform1iv(GLint location, GLsizei count, const GLint *v) override;
void setUniform2iv(GLint location, GLsizei count, const GLint *v) override;
void setUniform3iv(GLint location, GLsizei count, const GLint *v) override;
void setUniform4iv(GLint location, GLsizei count, const GLint *v) override;
void setUniform1uiv(GLint location, GLsizei count, const GLuint *v) override;
void setUniform2uiv(GLint location, GLsizei count, const GLuint *v) override;
void setUniform3uiv(GLint location, GLsizei count, const GLuint *v) override;
void setUniform4uiv(GLint location, GLsizei count, const GLuint *v) override;
void setUniformMatrix2fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix3fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix4fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix2x3fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix3x2fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix2x4fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix4x2fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix3x4fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void setUniformMatrix4x3fv(GLint location,
GLsizei count,
GLboolean transpose,
const GLfloat *value) override;
void getUniformfv(const gl::Context *context, GLint location, GLfloat *params) const override;
void getUniformiv(const gl::Context *context, GLint location, GLint *params) const override;
void getUniformuiv(const gl::Context *context, GLint location, GLuint *params) const override;
void clearVariableInfoMap();
vk::BufferSerial getCurrentDefaultUniformBufferSerial() const
{
return mCurrentDefaultUniformBufferSerial;
}
// Get the graphics pipeline if already created.
angle::Result getGraphicsPipeline(ContextVk *contextVk,
vk::GraphicsPipelineSubset pipelineSubset,
const vk::GraphicsPipelineDesc &desc,
const vk::GraphicsPipelineDesc **descPtrOut,
vk::PipelineHelper **pipelineOut);
angle::Result createGraphicsPipeline(ContextVk *contextVk,
vk::GraphicsPipelineSubset pipelineSubset,
vk::PipelineCacheAccess *pipelineCache,
PipelineSource source,
const vk::GraphicsPipelineDesc &desc,
const vk::GraphicsPipelineDesc **descPtrOut,
vk::PipelineHelper **pipelineOut);
angle::Result linkGraphicsPipelineLibraries(ContextVk *contextVk,
vk::PipelineCacheAccess *pipelineCache,
const vk::GraphicsPipelineDesc &desc,
vk::PipelineHelper *vertexInputPipeline,
vk::PipelineHelper *shadersPipeline,
vk::PipelineHelper *fragmentOutputPipeline,
const vk::GraphicsPipelineDesc **descPtrOut,
vk::PipelineHelper **pipelineOut);
angle::Result getOrCreateComputePipeline(vk::Context *context,
vk::PipelineCacheAccess *pipelineCache,
PipelineSource source,
vk::PipelineRobustness pipelineRobustness,
vk::PipelineProtectedAccess pipelineProtectedAccess,
vk::PipelineHelper **pipelineOut);
const vk::PipelineLayout &getPipelineLayout() const { return mPipelineLayout.get(); }
void resetLayout(ContextVk *contextVk);
angle::Result createPipelineLayout(vk::Context *context,
PipelineLayoutCache *pipelineLayoutCache,
DescriptorSetLayoutCache *descriptorSetLayoutCache,
gl::ActiveTextureArray<TextureVk *> *activeTextures);
angle::Result initializeDescriptorPools(
vk::Context *context,
DescriptorSetLayoutCache *descriptorSetLayoutCache,
vk::DescriptorSetArray<vk::MetaDescriptorPool> *metaDescriptorPools);
angle::Result updateTexturesDescriptorSet(vk::Context *context,
uint32_t currentFrame,
const gl::ActiveTextureArray<TextureVk *> &textures,
const gl::SamplerBindingVector &samplers,
PipelineType pipelineType,
UpdateDescriptorSetsBuilder *updateBuilder);
angle::Result updateShaderResourcesDescriptorSet(
vk::Context *context,
uint32_t currentFrame,
UpdateDescriptorSetsBuilder *updateBuilder,
const vk::WriteDescriptorDescs &writeDescriptorDescs,
const vk::DescriptorSetDescBuilder &shaderResourcesDesc,
vk::SharedDescriptorSetCacheKey *newSharedCacheKeyOut);
angle::Result updateUniformsAndXfbDescriptorSet(
vk::Context *context,
uint32_t currentFrame,
UpdateDescriptorSetsBuilder *updateBuilder,
const vk::WriteDescriptorDescs &writeDescriptorDescs,
vk::BufferHelper *defaultUniformBuffer,
vk::DescriptorSetDescBuilder *uniformsAndXfbDesc,
vk::SharedDescriptorSetCacheKey *sharedCacheKeyOut);
template <typename CommandBufferT>
angle::Result bindDescriptorSets(vk::Context *context,
uint32_t currentFrame,
vk::CommandBufferHelperCommon *commandBufferHelper,
CommandBufferT *commandBuffer,
PipelineType pipelineType);
bool usesDynamicUniformBufferDescriptors() const
{
return mUniformBufferDescriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
}
VkDescriptorType getUniformBufferDescriptorType() const { return mUniformBufferDescriptorType; }
bool usesDynamicShaderStorageBufferDescriptors() const { return false; }
VkDescriptorType getStorageBufferDescriptorType() const
{
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
}
VkDescriptorType getAtomicCounterBufferDescriptorType() const
{
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
}
bool usesDynamicAtomicCounterBufferDescriptors() const { return false; }
bool areImmutableSamplersCompatible(
const ImmutableSamplerIndexMap &immutableSamplerIndexMap) const
{
return (mImmutableSamplerIndexMap == immutableSamplerIndexMap);
}
size_t getDefaultUniformAlignedSize(vk::Context *context, gl::ShaderType shaderType) const
{
vk::Renderer *renderer = context->getRenderer();
size_t alignment = static_cast<size_t>(
renderer->getPhysicalDeviceProperties().limits.minUniformBufferOffsetAlignment);
return roundUp(mDefaultUniformBlocks[shaderType]->uniformData.size(), alignment);
}
std::shared_ptr<BufferAndLayout> &getSharedDefaultUniformBlock(gl::ShaderType shaderType)
{
return mDefaultUniformBlocks[shaderType];
}
bool updateAndCheckDirtyUniforms()
{
if (ANGLE_LIKELY(!mExecutable->IsPPO()))
{
return mDefaultUniformBlocksDirty.any();
}
const auto &ppoExecutables = mExecutable->getPPOProgramExecutables();
for (gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
{
ProgramExecutableVk *executableVk = vk::GetImpl(ppoExecutables[shaderType].get());
if (executableVk->mDefaultUniformBlocksDirty.test(shaderType))
{
mDefaultUniformBlocksDirty.set(shaderType);
// Note: this relies on onProgramBind marking everything as dirty
executableVk->mDefaultUniformBlocksDirty.reset(shaderType);
}
}
return mDefaultUniformBlocksDirty.any();
}
void setAllDefaultUniformsDirty();
angle::Result updateUniforms(vk::Context *context,
uint32_t currentFrame,
UpdateDescriptorSetsBuilder *updateBuilder,
vk::BufferHelper *emptyBuffer,
vk::DynamicBuffer *defaultUniformStorage,
bool isTransformFeedbackActiveUnpaused,
TransformFeedbackVk *transformFeedbackVk);
void onProgramBind();
const ShaderInterfaceVariableInfoMap &getVariableInfoMap() const { return mVariableInfoMap; }
angle::Result warmUpPipelineCache(vk::Renderer *renderer,
vk::PipelineRobustness pipelineRobustness,
vk::PipelineProtectedAccess pipelineProtectedAccess)
{
return getPipelineCacheWarmUpTasks(renderer, pipelineRobustness, pipelineProtectedAccess,
nullptr);
}
angle::Result getPipelineCacheWarmUpTasks(
vk::Renderer *renderer,
vk::PipelineRobustness pipelineRobustness,
vk::PipelineProtectedAccess pipelineProtectedAccess,
std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut);
void waitForPostLinkTasks(const gl::Context *context) override
{
ContextVk *contextVk = vk::GetImpl(context);
waitForPostLinkTasksImpl(contextVk);
}
void waitForComputePostLinkTasks(ContextVk *contextVk)
{
ASSERT(mExecutable->hasLinkedShaderStage(gl::ShaderType::Compute));
waitForPostLinkTasksImpl(contextVk);
}
void waitForGraphicsPostLinkTasks(ContextVk *contextVk,
const vk::GraphicsPipelineDesc ¤tGraphicsPipelineDesc);
angle::Result mergePipelineCacheToRenderer(vk::Context *context) const;
const vk::WriteDescriptorDescs &getShaderResourceWriteDescriptorDescs() const
{
return mShaderResourceWriteDescriptorDescs;
}
const vk::WriteDescriptorDescs &getDefaultUniformWriteDescriptorDescs(
TransformFeedbackVk *transformFeedbackVk) const
{
return transformFeedbackVk == nullptr ? mDefaultUniformWriteDescriptorDescs
: mDefaultUniformAndXfbWriteDescriptorDescs;
}
const vk::WriteDescriptorDescs &getTextureWriteDescriptorDescs() const
{
return mTextureWriteDescriptorDescs;
}
// The following functions are for internal use of programs, including from a threaded link job:
angle::Result resizeUniformBlockMemory(vk::Context *context,
const gl::ShaderMap<size_t> &requiredBufferSize);
void resolvePrecisionMismatch(const gl::ProgramMergedVaryings &mergedVaryings);
angle::Result initShaders(vk::Context *context,
const gl::ShaderBitSet &linkedShaderStages,
const gl::ShaderMap<const angle::spirv::Blob *> &spirvBlobs,
bool isGLES1)
{
return mOriginalShaderInfo.initShaders(context, linkedShaderStages, spirvBlobs,
mVariableInfoMap, isGLES1);
}
void assignAllSpvLocations(vk::Context *context,
const gl::ProgramState &programState,
const gl::ProgramLinkedResources &resources)
{
SpvSourceOptions options = SpvCreateSourceOptions(
context->getFeatures(), context->getRenderer()->getMaxColorInputAttachmentCount());
SpvAssignAllLocations(options, programState, resources, &mVariableInfoMap);
}
private:
class WarmUpTaskCommon;
class WarmUpComputeTask;
class WarmUpGraphicsTask;
friend class ProgramVk;
friend class ProgramPipelineVk;
void reset(ContextVk *contextVk);
void addInterfaceBlockDescriptorSetDesc(const std::vector<gl::InterfaceBlock> &blocks,
gl::ShaderBitSet shaderTypes,
VkDescriptorType descType,
vk::DescriptorSetLayoutDesc *descOut);
void addAtomicCounterBufferDescriptorSetDesc(
const std::vector<gl::AtomicCounterBuffer> &atomicCounterBuffers,
vk::DescriptorSetLayoutDesc *descOut);
void addImageDescriptorSetDesc(vk::DescriptorSetLayoutDesc *descOut);
void addInputAttachmentDescriptorSetDesc(vk::Context *context,
vk::DescriptorSetLayoutDesc *descOut);
angle::Result addTextureDescriptorSetDesc(
vk::Context *context,
const gl::ActiveTextureArray<TextureVk *> *activeTextures,
vk::DescriptorSetLayoutDesc *descOut);
size_t calcUniformUpdateRequiredSpace(vk::Context *context,
gl::ShaderMap<VkDeviceSize> *uniformOffsets) const;
ANGLE_INLINE angle::Result initProgram(vk::Context *context,
gl::ShaderType shaderType,
bool isLastPreFragmentStage,
bool isTransformFeedbackProgram,
ProgramTransformOptions optionBits,
ProgramInfo *programInfo,
const ShaderInterfaceVariableInfoMap &variableInfoMap)
{
ASSERT(mOriginalShaderInfo.valid());
// Create the program pipeline. This is done lazily and once per combination of
// specialization constants.
if (!programInfo->valid(shaderType))
{
ANGLE_TRY(programInfo->initProgram(context, shaderType, isLastPreFragmentStage,
isTransformFeedbackProgram, mOriginalShaderInfo,
optionBits, variableInfoMap));
}
ASSERT(programInfo->valid(shaderType));
return angle::Result::Continue;
}
ANGLE_INLINE angle::Result initGraphicsShaderProgram(
vk::Context *context,
gl::ShaderType shaderType,
bool isLastPreFragmentStage,
bool isTransformFeedbackProgram,
ProgramTransformOptions optionBits,
ProgramInfo *programInfo,
const ShaderInterfaceVariableInfoMap &variableInfoMap)
{
mValidGraphicsPermutations.set(optionBits.permutationIndex);
return initProgram(context, shaderType, isLastPreFragmentStage, isTransformFeedbackProgram,
optionBits, programInfo, variableInfoMap);
}
ANGLE_INLINE angle::Result initComputeProgram(
vk::Context *context,
ProgramInfo *programInfo,
const ShaderInterfaceVariableInfoMap &variableInfoMap,
const vk::ComputePipelineOptions &pipelineOptions)
{
mValidComputePermutations.set(pipelineOptions.permutationIndex);
ProgramTransformOptions optionBits = {};
return initProgram(context, gl::ShaderType::Compute, false, false, optionBits, programInfo,
variableInfoMap);
}
ProgramTransformOptions getTransformOptions(ContextVk *contextVk,
const vk::GraphicsPipelineDesc &desc);
angle::Result initGraphicsShaderPrograms(vk::Context *context,
ProgramTransformOptions transformOptions);
angle::Result initProgramThenCreateGraphicsPipeline(vk::Context *context,
ProgramTransformOptions transformOptions,
vk::GraphicsPipelineSubset pipelineSubset,
vk::PipelineCacheAccess *pipelineCache,
PipelineSource source,
const vk::GraphicsPipelineDesc &desc,
const vk::RenderPass &compatibleRenderPass,
const vk::GraphicsPipelineDesc **descPtrOut,
vk::PipelineHelper **pipelineOut);
angle::Result createGraphicsPipelineImpl(vk::Context *context,
ProgramTransformOptions transformOptions,
vk::GraphicsPipelineSubset pipelineSubset,
vk::PipelineCacheAccess *pipelineCache,
PipelineSource source,
const vk::GraphicsPipelineDesc &desc,
const vk::RenderPass &compatibleRenderPass,
const vk::GraphicsPipelineDesc **descPtrOut,
vk::PipelineHelper **pipelineOut);
angle::Result prepareForWarmUpPipelineCache(
vk::Context *context,
vk::PipelineRobustness pipelineRobustness,
vk::PipelineProtectedAccess pipelineProtectedAccess,
vk::GraphicsPipelineSubset subset,
bool *isComputeOut,
angle::FixedVector<bool, 2> *surfaceRotationVariationsOut,
vk::GraphicsPipelineDesc **graphicsPipelineDescOut,
vk::RenderPass *renderPassOut);
angle::Result warmUpComputePipelineCache(vk::Context *context,
vk::PipelineRobustness pipelineRobustness,
vk::PipelineProtectedAccess pipelineProtectedAccess);
angle::Result warmUpGraphicsPipelineCache(vk::Context *context,
vk::PipelineRobustness pipelineRobustness,
vk::PipelineProtectedAccess pipelineProtectedAccess,
vk::GraphicsPipelineSubset subset,
const bool isSurfaceRotated,
const vk::GraphicsPipelineDesc &graphicsPipelineDesc,
const vk::RenderPass &renderPass,
vk::PipelineHelper *placeholderPipelineHelper);
void waitForPostLinkTasksImpl(ContextVk *contextVk);
angle::Result getOrAllocateDescriptorSet(vk::Context *context,
uint32_t currentFrame,
UpdateDescriptorSetsBuilder *updateBuilder,
const vk::DescriptorSetDescBuilder &descriptorSetDesc,
const vk::WriteDescriptorDescs &writeDescriptorDescs,
DescriptorSetIndex setIndex,
vk::SharedDescriptorSetCacheKey *newSharedCacheKeyOut);
// When loading from cache / binary, initialize the pipeline cache with given data. Otherwise
// the cache is lazily created as needed.
angle::Result initializePipelineCache(vk::Context *context,
bool compressed,
const std::vector<uint8_t> &pipelineData);
angle::Result ensurePipelineCacheInitialized(vk::Context *context);
void initializeWriteDescriptorDesc(vk::Context *context);
// Descriptor sets and pools for shader resources for this program.
vk::DescriptorSetArray<vk::DescriptorSetPointer> mDescriptorSets;
vk::DescriptorSetArray<vk::DynamicDescriptorPoolPointer> mDynamicDescriptorPools;
vk::BufferSerial mCurrentDefaultUniformBufferSerial;
// We keep a reference to the pipeline and descriptor set layouts. This ensures they don't get
// deleted while this program is in use.
uint32_t mImmutableSamplersMaxDescriptorCount;
ImmutableSamplerIndexMap mImmutableSamplerIndexMap;
vk::AtomicBindingPointer<vk::PipelineLayout> mPipelineLayout;
vk::DescriptorSetLayoutPointerArray mDescriptorSetLayouts;
// A set of dynamic offsets used with vkCmdBindDescriptorSets for the default uniform buffers.
VkDescriptorType mUniformBufferDescriptorType;
gl::ShaderVector<uint32_t> mDynamicUniformDescriptorOffsets;
std::vector<uint32_t> mDynamicShaderResourceDescriptorOffsets;
ShaderInterfaceVariableInfoMap mVariableInfoMap;
static_assert((ProgramTransformOptions::kPermutationCount == 16),
"ProgramTransformOptions::kPermutationCount must be 16.");
angle::BitSet16<ProgramTransformOptions::kPermutationCount> mValidGraphicsPermutations;
static_assert((vk::ComputePipelineOptions::kPermutationCount == 4),
"ComputePipelineOptions::kPermutationCount must be 4.");
angle::BitSet8<vk::ComputePipelineOptions::kPermutationCount> mValidComputePermutations;
// We store all permutations of surface rotation and transformed SPIR-V programs here. We may
// need some LRU algorithm to free least used programs to reduce the number of programs.
ProgramInfo mGraphicsProgramInfos[ProgramTransformOptions::kPermutationCount];
ProgramInfo mComputeProgramInfo;
// Pipeline caches. The pipelines are tightly coupled with the shaders they are created for, so
// they live in the program executable. With VK_EXT_graphics_pipeline_library, the pipeline is
// divided in subsets; the "shaders" subset is created based on the shaders, so its cache lives
// in the program executable. The "vertex input" and "fragment output" pipelines are
// independent, and live in the context.
CompleteGraphicsPipelineCache
mCompleteGraphicsPipelines[ProgramTransformOptions::kPermutationCount];
ShadersGraphicsPipelineCache
mShadersGraphicsPipelines[ProgramTransformOptions::kPermutationCount];
vk::ComputePipelineCache mComputePipelines;
DefaultUniformBlockMap mDefaultUniformBlocks;
gl::ShaderBitSet mDefaultUniformBlocksDirty;
ShaderInfo mOriginalShaderInfo;
// The pipeline cache specific to this program executable. Currently:
//
// - This is used during warm up (at link time)
// - The contents are merged to Renderer's pipeline cache immediately after warm up
// - The contents are returned as part of program binary
// - Draw-time pipeline creation uses Renderer's cache
//
// Without VK_EXT_graphics_pipeline_library, this cache is not used for draw-time pipeline
// creations to allow reuse of other blobs that are independent of the actual shaders; vertex
// input fetch, fragment output and blend.
//
// With VK_EXT_graphics_pipeline_library, this cache is used for the "shaders" subset of the
// pipeline.
vk::PipelineCache mPipelineCache;
vk::GraphicsPipelineDesc mWarmUpGraphicsPipelineDesc;
// The "layout" information for descriptorSets
vk::WriteDescriptorDescs mShaderResourceWriteDescriptorDescs;
vk::WriteDescriptorDescs mTextureWriteDescriptorDescs;
vk::WriteDescriptorDescs mDefaultUniformWriteDescriptorDescs;
vk::WriteDescriptorDescs mDefaultUniformAndXfbWriteDescriptorDescs;
vk::DescriptorSetLayoutDesc mShaderResourceSetDesc;
vk::DescriptorSetLayoutDesc mTextureSetDesc;
vk::DescriptorSetLayoutDesc mDefaultUniformAndXfbSetDesc;
};
} // namespace rx
#endif // LIBANGLE_RENDERER_VULKAN_PROGRAMEXECUTABLEVK_H_