From 7fcd0fee6d580a381fdc4a4ec5b77687e4857b5b Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 5 Apr 2020 17:23:49 -0400 Subject: [PATCH] Buffer Cache: Use vAddr instead of physical memory. --- src/core/memory.cpp | 115 ++++++++++++++ src/core/memory.h | 10 ++ src/video_core/buffer_cache/buffer_block.h | 42 +++-- src/video_core/buffer_cache/buffer_cache.h | 144 +++++++++++------- src/video_core/buffer_cache/map_interval.h | 12 +- src/video_core/memory_manager.h | 5 + .../renderer_opengl/gl_buffer_cache.cpp | 8 +- .../renderer_opengl/gl_buffer_cache.h | 4 +- .../renderer_opengl/gl_rasterizer.cpp | 4 +- .../renderer_vulkan/vk_buffer_cache.cpp | 8 +- .../renderer_vulkan/vk_buffer_cache.h | 4 +- .../renderer_vulkan/vk_rasterizer.cpp | 4 +- 12 files changed, 254 insertions(+), 106 deletions(-) diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 9ceb7fabcd..6061d37aeb 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -257,10 +257,59 @@ struct Memory::Impl { } } + void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, src_addr, size); + std::memset(dest_buffer, 0, copy_amount); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + const u8* const src_ptr = + page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + std::memcpy(dest_buffer, src_ptr, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + std::memcpy(dest_buffer, host_ptr, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + dest_buffer = static_cast(dest_buffer) + copy_amount; + remaining_size -= copy_amount; + } + } + void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); } + void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { + ReadBlockUnsafe(*system.CurrentProcess(), src_addr, dest_buffer, size); + } + void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, const std::size_t size) { const auto& page_table = process.VMManager().page_table; @@ -305,10 +354,57 @@ struct Memory::Impl { } } + void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, + const void* src_buffer, const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = dest_addr >> PAGE_BITS; + std::size_t page_offset = dest_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, dest_addr, size); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + u8* const dest_ptr = + page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + std::memcpy(dest_ptr, src_buffer, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + std::memcpy(host_ptr, src_buffer, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + src_buffer = static_cast(src_buffer) + copy_amount; + remaining_size -= copy_amount; + } + } + void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); } + void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { + WriteBlockUnsafe(*system.CurrentProcess(), dest_addr, src_buffer, size); + } + void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { const auto& page_table = process.VMManager().page_table; std::size_t remaining_size = size; @@ -696,6 +792,15 @@ void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_ impl->ReadBlock(src_addr, dest_buffer, size); } +void Memory::ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, + void* dest_buffer, const std::size_t size) { + impl->ReadBlockUnsafe(process, src_addr, dest_buffer, size); +} + +void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { + impl->ReadBlockUnsafe(src_addr, dest_buffer, size); +} + void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, std::size_t size) { impl->WriteBlock(process, dest_addr, src_buffer, size); @@ -705,6 +810,16 @@ void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std impl->WriteBlock(dest_addr, src_buffer, size); } +void Memory::WriteBlockUnsafe(const Kernel::Process& process, VAddr dest_addr, + const void* src_buffer, std::size_t size) { + impl->WriteBlockUnsafe(process, dest_addr, src_buffer, size); +} + +void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, + const std::size_t size) { + impl->WriteBlockUnsafe(dest_addr, src_buffer, size); +} + void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { impl->ZeroBlock(process, dest_addr, size); } diff --git a/src/core/memory.h b/src/core/memory.h index 8913a9da42..97750f8512 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -294,6 +294,9 @@ public: void ReadBlock(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, std::size_t size); + void ReadBlockUnsafe(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, + std::size_t size); + /** * Reads a contiguous block of bytes from the current process' address space. * @@ -312,6 +315,8 @@ public: */ void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size); + void ReadBlockUnsafe(VAddr src_addr, void* dest_buffer, std::size_t size); + /** * Writes a range of bytes into a given process' address space at the specified * virtual address. @@ -335,6 +340,9 @@ public: void WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, std::size_t size); + void WriteBlockUnsafe(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, + std::size_t size); + /** * Writes a range of bytes into the current process' address space at the specified * virtual address. @@ -356,6 +364,8 @@ public: */ void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size); + void WriteBlockUnsafe(VAddr dest_addr, const void* src_buffer, std::size_t size); + /** * Fills the specified address range within a process' address space with zeroes. * diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h index 4b91931821..e35ee0b673 100644 --- a/src/video_core/buffer_cache/buffer_block.h +++ b/src/video_core/buffer_cache/buffer_block.h @@ -15,37 +15,29 @@ namespace VideoCommon { class BufferBlock { public: - bool Overlaps(const CacheAddr start, const CacheAddr end) const { - return (cache_addr < end) && (cache_addr_end > start); + bool Overlaps(const VAddr start, const VAddr end) const { + return (cpu_addr < end) && (cpu_addr_end > start); } - bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const { - return cache_addr <= other_start && other_end <= cache_addr_end; + bool IsInside(const VAddr other_start, const VAddr other_end) const { + return cpu_addr <= other_start && other_end <= cpu_addr_end; } - u8* GetWritableHostPtr() const { - return FromCacheAddr(cache_addr); + std::size_t GetOffset(const VAddr in_addr) { + return static_cast(in_addr - cpu_addr); } - u8* GetWritableHostPtr(std::size_t offset) const { - return FromCacheAddr(cache_addr + offset); + VAddr GetCpuAddr() const { + return cpu_addr; } - std::size_t GetOffset(const CacheAddr in_addr) { - return static_cast(in_addr - cache_addr); + VAddr GetCpuAddrEnd() const { + return cpu_addr_end; } - CacheAddr GetCacheAddr() const { - return cache_addr; - } - - CacheAddr GetCacheAddrEnd() const { - return cache_addr_end; - } - - void SetCacheAddr(const CacheAddr new_addr) { - cache_addr = new_addr; - cache_addr_end = new_addr + size; + void SetCpuAddr(const VAddr new_addr) { + cpu_addr = new_addr; + cpu_addr_end = new_addr + size; } std::size_t GetSize() const { @@ -61,14 +53,14 @@ public: } protected: - explicit BufferBlock(CacheAddr cache_addr, const std::size_t size) : size{size} { - SetCacheAddr(cache_addr); + explicit BufferBlock(VAddr cpu_addr, const std::size_t size) : size{size} { + SetCpuAddr(cpu_addr); } ~BufferBlock() = default; private: - CacheAddr cache_addr{}; - CacheAddr cache_addr_end{}; + VAddr cpu_addr{}; + VAddr cpu_addr_end{}; std::size_t size{}; u64 epoch{}; }; diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 186aca61d3..262d0fc6e3 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -37,28 +37,45 @@ public: bool is_written = false, bool use_fast_cbuf = false) { std::lock_guard lock{mutex}; - auto& memory_manager = system.GPU().MemoryManager(); - const auto host_ptr = memory_manager.GetPointer(gpu_addr); - if (!host_ptr) { + const std::optional cpu_addr_opt = + system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + + if (!cpu_addr_opt) { return {GetEmptyBuffer(size), 0}; } - const auto cache_addr = ToCacheAddr(host_ptr); + + VAddr cpu_addr = *cpu_addr_opt; // Cache management is a big overhead, so only cache entries with a given size. // TODO: Figure out which size is the best for given games. constexpr std::size_t max_stream_size = 0x800; if (use_fast_cbuf || size < max_stream_size) { - if (!is_written && !IsRegionWritten(cache_addr, cache_addr + size - 1)) { + if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { + auto& memory_manager = system.GPU().MemoryManager(); if (use_fast_cbuf) { - return ConstBufferUpload(host_ptr, size); + if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { + const auto host_ptr = memory_manager.GetPointer(gpu_addr); + return ConstBufferUpload(host_ptr, size); + } else { + staging_buffer.resize(size); + memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); + return ConstBufferUpload(staging_buffer.data(), size); + } } else { - return StreamBufferUpload(host_ptr, size, alignment); + if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { + const auto host_ptr = memory_manager.GetPointer(gpu_addr); + return StreamBufferUpload(host_ptr, size, alignment); + } else { + staging_buffer.resize(size); + memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); + return StreamBufferUpload(staging_buffer.data(), size, alignment); + } } } } - auto block = GetBlock(cache_addr, size); - auto map = MapAddress(block, gpu_addr, cache_addr, size); + auto block = GetBlock(cpu_addr, size); + auto map = MapAddress(block, gpu_addr, cpu_addr, size); if (is_written) { map->MarkAsModified(true, GetModifiedTicks()); if (!map->IsWritten()) { @@ -71,7 +88,7 @@ public: } } - const u64 offset = static_cast(block->GetOffset(cache_addr)); + const u64 offset = static_cast(block->GetOffset(cpu_addr)); return {ToHandle(block), offset}; } @@ -112,7 +129,7 @@ public: } /// Write any cached resources overlapping the specified region back to memory - void FlushRegion(CacheAddr addr, std::size_t size) { + void FlushRegion(VAddr addr, std::size_t size) { std::lock_guard lock{mutex}; std::vector objects = GetMapsInRange(addr, size); @@ -127,7 +144,7 @@ public: } /// Mark the specified region as being invalidated - void InvalidateRegion(CacheAddr addr, u64 size) { + void InvalidateRegion(VAddr addr, u64 size) { std::lock_guard lock{mutex}; std::vector objects = GetMapsInRange(addr, size); @@ -152,7 +169,7 @@ protected: virtual void WriteBarrier() = 0; - virtual TBuffer CreateBlock(CacheAddr cache_addr, std::size_t size) = 0; + virtual TBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, const u8* data) = 0; @@ -169,20 +186,17 @@ protected: /// Register an object into the cache void Register(const MapInterval& new_map, bool inherit_written = false) { - const CacheAddr cache_ptr = new_map->GetStart(); - const std::optional cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(new_map->GetGpuAddress()); - if (!cache_ptr || !cpu_addr) { + const VAddr cpu_addr = new_map->GetStart(); + if (!cpu_addr) { LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}", new_map->GetGpuAddress()); return; } const std::size_t size = new_map->GetEnd() - new_map->GetStart(); - new_map->SetCpuAddress(*cpu_addr); new_map->MarkAsRegistered(true); const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; mapped_addresses.insert({interval, new_map}); - rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1); + rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); if (inherit_written) { MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); new_map->MarkAsWritten(true); @@ -192,7 +206,7 @@ protected: /// Unregisters an object from the cache void Unregister(MapInterval& map) { const std::size_t size = map->GetEnd() - map->GetStart(); - rasterizer.UpdatePagesCachedCount(map->GetCpuAddress(), size, -1); + rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1); map->MarkAsRegistered(false); if (map->IsWritten()) { UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); @@ -202,32 +216,39 @@ protected: } private: - MapInterval CreateMap(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) { + MapInterval CreateMap(const VAddr start, const VAddr end, const GPUVAddr gpu_addr) { return std::make_shared(start, end, gpu_addr); } - MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, - const CacheAddr cache_addr, const std::size_t size) { + MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr, + const std::size_t size) { - std::vector overlaps = GetMapsInRange(cache_addr, size); + std::vector overlaps = GetMapsInRange(cpu_addr, size); if (overlaps.empty()) { - const CacheAddr cache_addr_end = cache_addr + size; - MapInterval new_map = CreateMap(cache_addr, cache_addr_end, gpu_addr); - u8* host_ptr = FromCacheAddr(cache_addr); - UploadBlockData(block, block->GetOffset(cache_addr), size, host_ptr); + auto& memory_manager = system.GPU().MemoryManager(); + const VAddr cpu_addr_end = cpu_addr + size; + MapInterval new_map = CreateMap(cpu_addr, cpu_addr_end, gpu_addr); + if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { + u8* host_ptr = memory_manager.GetPointer(gpu_addr); + UploadBlockData(block, block->GetOffset(cpu_addr), size, host_ptr); + } else { + staging_buffer.resize(size); + memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); + UploadBlockData(block, block->GetOffset(cpu_addr), size, staging_buffer.data()); + } Register(new_map); return new_map; } - const CacheAddr cache_addr_end = cache_addr + size; + const VAddr cpu_addr_end = cpu_addr + size; if (overlaps.size() == 1) { MapInterval& current_map = overlaps[0]; - if (current_map->IsInside(cache_addr, cache_addr_end)) { + if (current_map->IsInside(cpu_addr, cpu_addr_end)) { return current_map; } } - CacheAddr new_start = cache_addr; - CacheAddr new_end = cache_addr_end; + VAddr new_start = cpu_addr; + VAddr new_end = cpu_addr_end; bool write_inheritance = false; bool modified_inheritance = false; // Calculate new buffer parameters @@ -237,7 +258,7 @@ private: write_inheritance |= overlap->IsWritten(); modified_inheritance |= overlap->IsModified(); } - GPUVAddr new_gpu_addr = gpu_addr + new_start - cache_addr; + GPUVAddr new_gpu_addr = gpu_addr + new_start - cpu_addr; for (auto& overlap : overlaps) { Unregister(overlap); } @@ -250,7 +271,7 @@ private: return new_map; } - void UpdateBlock(const TBuffer& block, CacheAddr start, CacheAddr end, + void UpdateBlock(const TBuffer& block, VAddr start, VAddr end, std::vector& overlaps) { const IntervalType base_interval{start, end}; IntervalSet interval_set{}; @@ -262,13 +283,15 @@ private: for (auto& interval : interval_set) { std::size_t size = interval.upper() - interval.lower(); if (size > 0) { - u8* host_ptr = FromCacheAddr(interval.lower()); - UploadBlockData(block, block->GetOffset(interval.lower()), size, host_ptr); + staging_buffer.resize(size); + system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); + UploadBlockData(block, block->GetOffset(interval.lower()), size, + staging_buffer.data()); } } } - std::vector GetMapsInRange(CacheAddr addr, std::size_t size) { + std::vector GetMapsInRange(VAddr addr, std::size_t size) { if (size == 0) { return {}; } @@ -290,8 +313,9 @@ private: void FlushMap(MapInterval map) { std::size_t size = map->GetEnd() - map->GetStart(); TBuffer block = blocks[map->GetStart() >> block_page_bits]; - u8* host_ptr = FromCacheAddr(map->GetStart()); - DownloadBlockData(block, block->GetOffset(map->GetStart()), size, host_ptr); + staging_buffer.resize(size); + DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data()); + system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size); map->MarkAsModified(false, 0); } @@ -316,14 +340,14 @@ private: TBuffer EnlargeBlock(TBuffer buffer) { const std::size_t old_size = buffer->GetSize(); const std::size_t new_size = old_size + block_page_size; - const CacheAddr cache_addr = buffer->GetCacheAddr(); - TBuffer new_buffer = CreateBlock(cache_addr, new_size); + const VAddr cpu_addr = buffer->GetCpuAddr(); + TBuffer new_buffer = CreateBlock(cpu_addr, new_size); CopyBlock(buffer, new_buffer, 0, 0, old_size); buffer->SetEpoch(epoch); pending_destruction.push_back(buffer); - const CacheAddr cache_addr_end = cache_addr + new_size - 1; - u64 page_start = cache_addr >> block_page_bits; - const u64 page_end = cache_addr_end >> block_page_bits; + const VAddr cpu_addr_end = cpu_addr + new_size - 1; + u64 page_start = cpu_addr >> block_page_bits; + const u64 page_end = cpu_addr_end >> block_page_bits; while (page_start <= page_end) { blocks[page_start] = new_buffer; ++page_start; @@ -334,9 +358,9 @@ private: TBuffer MergeBlocks(TBuffer first, TBuffer second) { const std::size_t size_1 = first->GetSize(); const std::size_t size_2 = second->GetSize(); - const CacheAddr first_addr = first->GetCacheAddr(); - const CacheAddr second_addr = second->GetCacheAddr(); - const CacheAddr new_addr = std::min(first_addr, second_addr); + const VAddr first_addr = first->GetCpuAddr(); + const VAddr second_addr = second->GetCpuAddr(); + const VAddr new_addr = std::min(first_addr, second_addr); const std::size_t new_size = size_1 + size_2; TBuffer new_buffer = CreateBlock(new_addr, new_size); CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); @@ -345,9 +369,9 @@ private: second->SetEpoch(epoch); pending_destruction.push_back(first); pending_destruction.push_back(second); - const CacheAddr cache_addr_end = new_addr + new_size - 1; + const VAddr cpu_addr_end = new_addr + new_size - 1; u64 page_start = new_addr >> block_page_bits; - const u64 page_end = cache_addr_end >> block_page_bits; + const u64 page_end = cpu_addr_end >> block_page_bits; while (page_start <= page_end) { blocks[page_start] = new_buffer; ++page_start; @@ -355,18 +379,18 @@ private: return new_buffer; } - TBuffer GetBlock(const CacheAddr cache_addr, const std::size_t size) { + TBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { TBuffer found{}; - const CacheAddr cache_addr_end = cache_addr + size - 1; - u64 page_start = cache_addr >> block_page_bits; - const u64 page_end = cache_addr_end >> block_page_bits; + const VAddr cpu_addr_end = cpu_addr + size - 1; + u64 page_start = cpu_addr >> block_page_bits; + const u64 page_end = cpu_addr_end >> block_page_bits; while (page_start <= page_end) { auto it = blocks.find(page_start); if (it == blocks.end()) { if (found) { found = EnlargeBlock(found); } else { - const CacheAddr start_addr = (page_start << block_page_bits); + const VAddr start_addr = (page_start << block_page_bits); found = CreateBlock(start_addr, block_page_size); blocks[page_start] = found; } @@ -386,7 +410,7 @@ private: return found; } - void MarkRegionAsWritten(const CacheAddr start, const CacheAddr end) { + void MarkRegionAsWritten(const VAddr start, const VAddr end) { u64 page_start = start >> write_page_bit; const u64 page_end = end >> write_page_bit; while (page_start <= page_end) { @@ -400,7 +424,7 @@ private: } } - void UnmarkRegionAsWritten(const CacheAddr start, const CacheAddr end) { + void UnmarkRegionAsWritten(const VAddr start, const VAddr end) { u64 page_start = start >> write_page_bit; const u64 page_end = end >> write_page_bit; while (page_start <= page_end) { @@ -416,7 +440,7 @@ private: } } - bool IsRegionWritten(const CacheAddr start, const CacheAddr end) const { + bool IsRegionWritten(const VAddr start, const VAddr end) const { u64 page_start = start >> write_page_bit; const u64 page_end = end >> write_page_bit; while (page_start <= page_end) { @@ -440,8 +464,8 @@ private: u64 buffer_offset = 0; u64 buffer_offset_base = 0; - using IntervalSet = boost::icl::interval_set; - using IntervalCache = boost::icl::interval_map; + using IntervalSet = boost::icl::interval_set; + using IntervalCache = boost::icl::interval_map; using IntervalType = typename IntervalCache::interval_type; IntervalCache mapped_addresses; @@ -456,6 +480,8 @@ private: u64 epoch = 0; u64 modified_ticks = 0; + std::vector staging_buffer; + std::recursive_mutex mutex; }; diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h index 3a104d5cd3..b0956029d3 100644 --- a/src/video_core/buffer_cache/map_interval.h +++ b/src/video_core/buffer_cache/map_interval.h @@ -11,7 +11,7 @@ namespace VideoCommon { class MapIntervalBase { public: - MapIntervalBase(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) + MapIntervalBase(const VAddr start, const VAddr end, const GPUVAddr gpu_addr) : start{start}, end{end}, gpu_addr{gpu_addr} {} void SetCpuAddress(VAddr new_cpu_addr) { @@ -26,7 +26,7 @@ public: return gpu_addr; } - bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const { + bool IsInside(const VAddr other_start, const VAddr other_end) const { return (start <= other_start && other_end <= end); } @@ -46,11 +46,11 @@ public: return is_registered; } - CacheAddr GetStart() const { + VAddr GetStart() const { return start; } - CacheAddr GetEnd() const { + VAddr GetEnd() const { return end; } @@ -76,8 +76,8 @@ public: } private: - CacheAddr start; - CacheAddr end; + VAddr start; + VAddr end; GPUVAddr gpu_addr; VAddr cpu_addr{}; bool is_written{}; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 073bdb4916..f4ec77a3d1 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -97,6 +97,11 @@ public: void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); + static bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { + const std::size_t page = (gpu_addr & page_mask) + size; + return page <= page_size; + } + private: using VMAMap = std::map; using VMAHandle = VMAMap::const_iterator; diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 0375fca178..4eb37a96c0 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -21,8 +21,8 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs; MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128)); -CachedBufferBlock::CachedBufferBlock(CacheAddr cache_addr, const std::size_t size) - : VideoCommon::BufferBlock{cache_addr, size} { +CachedBufferBlock::CachedBufferBlock(VAddr cpu_addr, const std::size_t size) + : VideoCommon::BufferBlock{cpu_addr, size} { gl_buffer.Create(); glNamedBufferData(gl_buffer.handle, static_cast(size), nullptr, GL_DYNAMIC_DRAW); } @@ -47,8 +47,8 @@ OGLBufferCache::~OGLBufferCache() { glDeleteBuffers(static_cast(std::size(cbufs)), std::data(cbufs)); } -Buffer OGLBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) { - return std::make_shared(cache_addr, size); +Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { + return std::make_shared(cpu_addr, size); } void OGLBufferCache::WriteBarrier() { diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index 8c7145443d..d94a112526 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -31,7 +31,7 @@ using GenericBufferCache = VideoCommon::BufferCache(size), BufferUsage | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst, @@ -68,8 +68,8 @@ VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::S VKBufferCache::~VKBufferCache() = default; -Buffer VKBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) { - return std::make_shared(device, memory_manager, cache_addr, size); +Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { + return std::make_shared(device, memory_manager, cpu_addr, size); } const vk::Buffer* VKBufferCache::ToHandle(const Buffer& buffer) { diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 3f38eed0c9..5082146180 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -30,7 +30,7 @@ class VKScheduler; class CachedBufferBlock final : public VideoCommon::BufferBlock { public: explicit CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, - CacheAddr cache_addr, std::size_t size); + VAddr cpu_addr, std::size_t size); ~CachedBufferBlock(); const vk::Buffer* GetHandle() const { @@ -55,7 +55,7 @@ public: protected: void WriteBarrier() override {} - Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override; + Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; const vk::Buffer* ToHandle(const Buffer& buffer) override; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index cc76d96ea0..b6ba5de12b 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -501,7 +501,7 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) { } CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); texture_cache.FlushRegion(addr, size); - buffer_cache.FlushRegion(cache_addr, size); + buffer_cache.FlushRegion(addr, size); query_cache.FlushRegion(cache_addr, size); } @@ -512,7 +512,7 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) { CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); texture_cache.InvalidateRegion(addr, size); pipeline_cache.InvalidateRegion(cache_addr, size); - buffer_cache.InvalidateRegion(cache_addr, size); + buffer_cache.InvalidateRegion(addr, size); query_cache.InvalidateRegion(cache_addr, size); }