[memory] coalesce mappings of MultiPageLevel (theyre redundant)

Signed-off-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2026-04-14 00:37:59 +00:00 committed by crueter
parent e6ad51e3d5
commit 0b81e4e6f9

View file

@ -13,13 +13,13 @@
namespace Common { namespace Common {
template <typename BaseAddr> template <typename BaseAddr>
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, std::size_t first_level_bits_, std::size_t page_bits_)
std::size_t first_level_bits_, : address_space_bits{address_space_bits_}
std::size_t page_bits_) , first_level_bits{first_level_bits_}
: address_space_bits{address_space_bits_}, , page_bits{page_bits_}
first_level_bits{first_level_bits_}, page_bits{page_bits_} { {
if (page_bits == 0) { if (page_bits == 0) {
return; return;
} }
first_level_shift = address_space_bits - first_level_bits; first_level_shift = address_space_bits - first_level_bits;
first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
@ -30,12 +30,9 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
#else #else
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
if (base == MAP_FAILED)
if (base == MAP_FAILED) {
base = nullptr; base = nullptr;
}
#endif #endif
ASSERT(base); ASSERT(base);
base_ptr = reinterpret_cast<BaseAddr*>(base); base_ptr = reinterpret_cast<BaseAddr*>(base);
} }
@ -56,29 +53,21 @@ template <typename BaseAddr>
void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) { void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
const u64 new_start = start >> first_level_shift; const u64 new_start = start >> first_level_shift;
const u64 new_end = (start + size) >> first_level_shift; const u64 new_end = (start + size) >> first_level_shift;
for (u64 i = new_start; i <= new_end; i++) { for (u64 i = new_start; i <= new_end; i++)
if (!first_level_map[i]) { if (!first_level_map[i])
AllocateLevel(i); AllocateLevel(i);
}
}
} }
template <typename BaseAddr> template <typename BaseAddr>
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) { void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 index) {
void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size; void* ptr = reinterpret_cast<char *>(base_ptr) + index * first_level_chunk_size;
#ifdef _WIN32 #ifdef _WIN32
void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; void* base = VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE);
#else
void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
if (base == MAP_FAILED) {
base = nullptr;
}
#endif
ASSERT(base); ASSERT(base);
#else
first_level_map[level] = base; void* base = ptr;
#endif
first_level_map[index] = base;
} }
} // namespace Common } // namespace Common