vk_staging_buffer_pool: Inline tick tests

Load the current tick to a local variable, moving it out of an atomic
and allowing us to compare the value without going through a pointer
each time. This should make the loop more optimizable.
This commit is contained in:
ReinUsesLisp 2021-02-13 05:13:29 -03:00
parent 682d82faf3
commit 6171566296
2 changed files with 7 additions and 1 deletions

View File

@ -24,6 +24,11 @@ public:
return current_tick; return current_tick;
} }
/// Returns the last known GPU tick.
[[nodiscard]] u64 KnownGpuTick() const noexcept {
return gpu_tick;
}
/// Returns the timeline semaphore handle. /// Returns the timeline semaphore handle.
[[nodiscard]] VkSemaphore Handle() const noexcept { [[nodiscard]] VkSemaphore Handle() const noexcept {
return *semaphore; return *semaphore;

View File

@ -175,8 +175,9 @@ StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
} }
bool StagingBufferPool::AreRegionsActive(size_t region_begin, size_t region_end) const { bool StagingBufferPool::AreRegionsActive(size_t region_begin, size_t region_end) const {
const u64 gpu_tick = scheduler.GetMasterSemaphore().KnownGpuTick();
return std::any_of(sync_ticks.begin() + region_begin, sync_ticks.begin() + region_end, return std::any_of(sync_ticks.begin() + region_begin, sync_ticks.begin() + region_end,
[this](u64 sync_tick) { return !scheduler.IsFree(sync_tick); }); [gpu_tick](u64 sync_tick) { return gpu_tick < sync_tick; });
}; };
StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage) { StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage) {