2022-04-23 10:59:50 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2020-04-05 21:14:26 +02:00
|
|
|
|
|
|
|
#include "core/core.h"
|
2021-02-13 02:58:31 +01:00
|
|
|
#include "core/hle/kernel/k_page_heap.h"
|
2020-04-05 21:14:26 +02:00
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
namespace Kernel {
|
2020-04-05 21:14:26 +02:00
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
void KPageHeap::Initialize(KPhysicalAddress address, size_t size,
|
|
|
|
KVirtualAddress management_address, size_t management_size,
|
|
|
|
const size_t* block_shifts, size_t num_block_shifts) {
|
2022-02-22 02:52:36 +01:00
|
|
|
// Check our assumptions.
|
2023-03-18 02:26:04 +01:00
|
|
|
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
2020-04-05 21:14:26 +02:00
|
|
|
ASSERT(Common::IsAligned(size, PageSize));
|
2022-02-22 02:52:36 +01:00
|
|
|
ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
|
2023-03-18 02:26:04 +01:00
|
|
|
const KVirtualAddress management_end = management_address + management_size;
|
2020-04-05 21:14:26 +02:00
|
|
|
|
2022-02-22 02:52:36 +01:00
|
|
|
// Set our members.
|
|
|
|
m_heap_address = address;
|
|
|
|
m_heap_size = size;
|
|
|
|
m_num_blocks = num_block_shifts;
|
|
|
|
|
|
|
|
// Setup bitmaps.
|
|
|
|
m_management_data.resize(management_size / sizeof(u64));
|
|
|
|
u64* cur_bitmap_storage{m_management_data.data()};
|
|
|
|
for (size_t i = 0; i < num_block_shifts; i++) {
|
|
|
|
const size_t cur_block_shift = block_shifts[i];
|
|
|
|
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
|
|
|
|
cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift,
|
|
|
|
next_block_shift, cur_bitmap_storage);
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
2022-02-22 02:52:36 +01:00
|
|
|
|
|
|
|
// Ensure we didn't overextend our bounds.
|
2023-03-18 02:26:04 +01:00
|
|
|
ASSERT(KVirtualAddress(cur_bitmap_storage) <= management_end);
|
2022-02-22 02:52:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t KPageHeap::GetNumFreePages() const {
|
|
|
|
size_t num_free = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < m_num_blocks; i++) {
|
|
|
|
num_free += m_blocks[i].GetNumFreePages();
|
|
|
|
}
|
|
|
|
|
|
|
|
return num_free;
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
|
2022-02-22 02:52:36 +01:00
|
|
|
const size_t needed_size = m_blocks[index].GetSize();
|
2020-04-05 21:14:26 +02:00
|
|
|
|
2022-02-22 02:52:36 +01:00
|
|
|
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
|
2023-03-18 02:26:04 +01:00
|
|
|
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != 0) {
|
2022-02-22 02:52:36 +01:00
|
|
|
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
|
|
|
|
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
|
2022-10-29 22:55:30 +02:00
|
|
|
// Get the size and required alignment.
|
|
|
|
const size_t needed_size = num_pages * PageSize;
|
|
|
|
const size_t align_size = align_pages * PageSize;
|
|
|
|
|
|
|
|
// Determine meta-alignment of our desired alignment size.
|
|
|
|
const size_t align_shift = std::countr_zero(align_size);
|
|
|
|
|
|
|
|
// Decide on a block to allocate from.
|
|
|
|
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
|
|
|
|
{
|
|
|
|
// By default, we'll want to look at all blocks larger than our current one.
|
|
|
|
s32 max_blocks = static_cast<s32>(m_num_blocks);
|
|
|
|
|
|
|
|
// Determine the maximum block we should try to allocate from.
|
|
|
|
size_t possible_alignments = 0;
|
|
|
|
for (s32 i = index; i < max_blocks; ++i) {
|
|
|
|
// Add the possible alignments from blocks at the current size.
|
|
|
|
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
|
|
|
|
m_blocks[i].GetNumFreeBlocks();
|
|
|
|
|
|
|
|
// If there are enough possible alignments, we don't need to look at larger blocks.
|
|
|
|
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
|
|
|
|
max_blocks = i + 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have any possible alignments which require a larger block, we need to pick one.
|
|
|
|
if (possible_alignments > 0 && index + 1 < max_blocks) {
|
|
|
|
// Select a random alignment from the possibilities.
|
|
|
|
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
|
|
|
|
|
|
|
|
// Determine which block corresponds to the random alignment we chose.
|
|
|
|
possible_alignments = 0;
|
|
|
|
for (s32 i = index; i < max_blocks; ++i) {
|
|
|
|
// Add the possible alignments from blocks at the current size.
|
|
|
|
possible_alignments +=
|
|
|
|
(1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
|
|
|
|
m_blocks[i].GetNumFreeBlocks();
|
|
|
|
|
|
|
|
// If the current block gets us to our random choice, use the current block.
|
|
|
|
if (rnd < possible_alignments) {
|
|
|
|
index = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pop a block from the index we selected.
|
2023-03-18 02:26:04 +01:00
|
|
|
if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != 0) {
|
2022-10-29 22:55:30 +02:00
|
|
|
// Determine how much size we have left over.
|
|
|
|
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
|
|
|
|
leftover_size > 0) {
|
|
|
|
// Determine how many valid alignments we can have.
|
|
|
|
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
|
|
|
|
|
|
|
|
// Select a random valid alignment.
|
|
|
|
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
|
|
|
|
|
|
|
|
// Free memory before the random offset.
|
|
|
|
if (random_offset != 0) {
|
|
|
|
this->Free(addr, random_offset / PageSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance our block by the random offset.
|
|
|
|
addr += random_offset;
|
|
|
|
|
|
|
|
// Free memory after our allocated block.
|
|
|
|
if (random_offset != leftover_size) {
|
|
|
|
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the block we allocated.
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
|
2020-04-05 21:14:26 +02:00
|
|
|
do {
|
2022-02-22 02:52:36 +01:00
|
|
|
block = m_blocks[index++].PushBlock(block);
|
2020-04-05 21:14:26 +02:00
|
|
|
} while (block != 0);
|
|
|
|
}
|
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
|
2022-02-22 02:52:36 +01:00
|
|
|
// Freeing no pages is a no-op.
|
2020-04-05 21:14:26 +02:00
|
|
|
if (num_pages == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-22 02:52:36 +01:00
|
|
|
// Find the largest block size that we can free, and free as many as possible.
|
|
|
|
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
|
2023-03-18 02:26:04 +01:00
|
|
|
const KPhysicalAddress start = addr;
|
|
|
|
const KPhysicalAddress end = addr + num_pages * PageSize;
|
|
|
|
KPhysicalAddress before_start = start;
|
|
|
|
KPhysicalAddress before_end = start;
|
|
|
|
KPhysicalAddress after_start = end;
|
|
|
|
KPhysicalAddress after_end = end;
|
2020-04-05 21:14:26 +02:00
|
|
|
while (big_index >= 0) {
|
2022-02-22 02:52:36 +01:00
|
|
|
const size_t block_size = m_blocks[big_index].GetSize();
|
2023-03-18 02:26:04 +01:00
|
|
|
const KPhysicalAddress big_start = Common::AlignUp(GetInteger(start), block_size);
|
|
|
|
const KPhysicalAddress big_end = Common::AlignDown(GetInteger(end), block_size);
|
2020-04-05 21:14:26 +02:00
|
|
|
if (big_start < big_end) {
|
2022-02-22 02:52:36 +01:00
|
|
|
// Free as many big blocks as we can.
|
|
|
|
for (auto block = big_start; block < big_end; block += block_size) {
|
|
|
|
this->FreeBlock(block, big_index);
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
|
|
|
before_end = big_start;
|
|
|
|
after_start = big_end;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
big_index--;
|
|
|
|
}
|
|
|
|
ASSERT(big_index >= 0);
|
|
|
|
|
2022-02-22 02:52:36 +01:00
|
|
|
// Free space before the big blocks.
|
|
|
|
for (s32 i = big_index - 1; i >= 0; i--) {
|
|
|
|
const size_t block_size = m_blocks[i].GetSize();
|
2020-04-05 21:14:26 +02:00
|
|
|
while (before_start + block_size <= before_end) {
|
|
|
|
before_end -= block_size;
|
2022-02-22 02:52:36 +01:00
|
|
|
this->FreeBlock(before_end, i);
|
2020-04-05 21:14:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-22 02:52:36 +01:00
|
|
|
// Free space after the big blocks.
|
|
|
|
for (s32 i = big_index - 1; i >= 0; i--) {
|
|
|
|
const size_t block_size = m_blocks[i].GetSize();
|
2020-04-05 21:14:26 +02:00
|
|
|
while (after_start + block_size <= after_end) {
|
2022-02-22 02:52:36 +01:00
|
|
|
this->FreeBlock(after_start, i);
|
2020-04-05 21:14:26 +02:00
|
|
|
after_start += block_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-22 02:52:36 +01:00
|
|
|
size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
|
|
|
|
size_t num_block_shifts) {
|
|
|
|
size_t overhead_size = 0;
|
|
|
|
for (size_t i = 0; i < num_block_shifts; i++) {
|
|
|
|
const size_t cur_block_shift = block_shifts[i];
|
|
|
|
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
|
2021-02-13 02:58:31 +01:00
|
|
|
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
|
2020-04-05 21:14:26 +02:00
|
|
|
region_size, cur_block_shift, next_block_shift);
|
|
|
|
}
|
|
|
|
return Common::AlignUp(overhead_size, PageSize);
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
} // namespace Kernel
|