Merge pull request #181 from shadps4-emu/new_memory
core: Rework memory manager
This commit is contained in:
commit
4287bfcb91
|
@ -20,7 +20,7 @@ std::string NativeErrorToString(int e) {
|
||||||
|
|
||||||
DWORD res = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
DWORD res = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||||
nullptr, e, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
nullptr, e, MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
|
||||||
reinterpret_cast<LPSTR>(&err_str), 1, nullptr);
|
reinterpret_cast<LPSTR>(&err_str), 1, nullptr);
|
||||||
if (!res) {
|
if (!res) {
|
||||||
return "(FormatMessageA failed to format error)";
|
return "(FormatMessageA failed to format error)";
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
#include "common/error.h"
|
#include "common/error.h"
|
||||||
#include "core/address_space.h"
|
#include "core/address_space.h"
|
||||||
#include "core/libraries/kernel/memory_management.h"
|
#include "core/libraries/kernel/memory_management.h"
|
||||||
#include "core/virtual_memory.h"
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
@ -17,11 +16,40 @@
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
|
||||||
static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE;
|
static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE;
|
||||||
static constexpr size_t VirtualSize = USER_MAX - USER_MIN + 1;
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
struct AddressSpace::Impl {
|
struct AddressSpace::Impl {
|
||||||
Impl() : process{GetCurrentProcess()} {
|
Impl() : process{GetCurrentProcess()} {
|
||||||
|
// Allocate virtual address placeholder for our address space.
|
||||||
|
MEM_ADDRESS_REQUIREMENTS req{};
|
||||||
|
MEM_EXTENDED_PARAMETER param{};
|
||||||
|
req.LowestStartingAddress = reinterpret_cast<PVOID>(SYSTEM_MANAGED_MIN);
|
||||||
|
// The ending address must align to page boundary - 1
|
||||||
|
// https://stackoverflow.com/questions/54223343/virtualalloc2-with-memextendedparameteraddressrequirements-always-produces-error
|
||||||
|
req.HighestEndingAddress = reinterpret_cast<PVOID>(USER_MIN + UserSize - 1);
|
||||||
|
req.Alignment = 0;
|
||||||
|
param.Type = MemExtendedParameterAddressRequirements;
|
||||||
|
param.Pointer = &req;
|
||||||
|
|
||||||
|
// Typically, lower parts of system managed area is already reserved in windows.
|
||||||
|
// If reservation fails attempt again by reducing the area size a little bit.
|
||||||
|
// System managed is about 31GB in size so also cap the number of times we can reduce it
|
||||||
|
// to a reasonable amount.
|
||||||
|
static constexpr size_t ReductionOnFail = 1_GB;
|
||||||
|
static constexpr size_t MaxReductions = 10;
|
||||||
|
virtual_size = SystemSize + UserSize + ReductionOnFail;
|
||||||
|
for (u32 i = 0; i < MaxReductions && !virtual_base; i++) {
|
||||||
|
virtual_size -= ReductionOnFail;
|
||||||
|
virtual_base = static_cast<u8*>(VirtualAlloc2(process, NULL, virtual_size,
|
||||||
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
||||||
|
PAGE_NOACCESS, ¶m, 1));
|
||||||
|
}
|
||||||
|
ASSERT_MSG(virtual_base, "Unable to reserve virtual address space!");
|
||||||
|
|
||||||
|
// Initializer placeholder tracker
|
||||||
|
const uintptr_t virtual_addr = reinterpret_cast<uintptr_t>(virtual_base);
|
||||||
|
placeholders.insert({virtual_addr, virtual_addr + virtual_size});
|
||||||
|
|
||||||
// Allocate backing file that represents the total physical memory.
|
// Allocate backing file that represents the total physical memory.
|
||||||
backing_handle =
|
backing_handle =
|
||||||
CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ,
|
CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ,
|
||||||
|
@ -35,21 +63,6 @@ struct AddressSpace::Impl {
|
||||||
void* const ret = MapViewOfFile3(backing_handle, process, backing_base, 0, BackingSize,
|
void* const ret = MapViewOfFile3(backing_handle, process, backing_base, 0, BackingSize,
|
||||||
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0);
|
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0);
|
||||||
ASSERT(ret == backing_base);
|
ASSERT(ret == backing_base);
|
||||||
// Allocate virtual address placeholder for our address space.
|
|
||||||
MEM_ADDRESS_REQUIREMENTS req{};
|
|
||||||
MEM_EXTENDED_PARAMETER param{};
|
|
||||||
req.LowestStartingAddress = reinterpret_cast<PVOID>(USER_MIN);
|
|
||||||
req.HighestEndingAddress = reinterpret_cast<PVOID>(USER_MAX);
|
|
||||||
req.Alignment = 0;
|
|
||||||
param.Type = MemExtendedParameterAddressRequirements;
|
|
||||||
param.Pointer = &req;
|
|
||||||
virtual_base = static_cast<u8*>(VirtualAlloc2(process, nullptr, VirtualSize,
|
|
||||||
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
|
||||||
PAGE_NOACCESS, ¶m, 1));
|
|
||||||
ASSERT(virtual_base);
|
|
||||||
|
|
||||||
const uintptr_t virtual_addr = reinterpret_cast<uintptr_t>(virtual_base);
|
|
||||||
placeholders.insert({virtual_addr, virtual_addr + VirtualSize});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~Impl() {
|
~Impl() {
|
||||||
|
@ -71,7 +84,7 @@ struct AddressSpace::Impl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void* MapUser(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot) {
|
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot) {
|
||||||
const auto it = placeholders.find(virtual_addr);
|
const auto it = placeholders.find(virtual_addr);
|
||||||
ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region");
|
ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region");
|
||||||
ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + size <= it->upper(),
|
ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + size <= it->upper(),
|
||||||
|
@ -106,54 +119,25 @@ struct AddressSpace::Impl {
|
||||||
ptr = MapViewOfFile3(backing_handle, process, reinterpret_cast<PVOID>(virtual_addr),
|
ptr = MapViewOfFile3(backing_handle, process, reinterpret_cast<PVOID>(virtual_addr),
|
||||||
phys_addr, size, MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
|
phys_addr, size, MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
|
||||||
} else {
|
} else {
|
||||||
ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
|
ptr =
|
||||||
MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
|
VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
|
||||||
|
MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
|
||||||
}
|
}
|
||||||
ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg());
|
ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg());
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* MapPrivate(VAddr virtual_addr, size_t size, u64 alignment, ULONG prot,
|
void Unmap(VAddr virtual_addr, PAddr phys_addr, size_t size) {
|
||||||
bool no_commit = false) {
|
bool ret;
|
||||||
// Map a private allocation
|
if (phys_addr != -1) {
|
||||||
PVOID addr = reinterpret_cast<PVOID>(virtual_addr);
|
ret = UnmapViewOfFile2(process, reinterpret_cast<PVOID>(virtual_addr),
|
||||||
MEM_ADDRESS_REQUIREMENTS req{};
|
|
||||||
MEM_EXTENDED_PARAMETER param{};
|
|
||||||
// req.LowestStartingAddress =
|
|
||||||
// (virtual_addr == 0 ? reinterpret_cast<PVOID>(SYSTEM_MANAGED_MIN)
|
|
||||||
// : reinterpret_cast<PVOID>(virtual_addr));
|
|
||||||
req.HighestEndingAddress = reinterpret_cast<PVOID>(SYSTEM_MANAGED_MAX);
|
|
||||||
req.Alignment = alignment < 64_KB ? 0 : alignment;
|
|
||||||
param.Type = MemExtendedParameterAddressRequirements;
|
|
||||||
param.Pointer = &req;
|
|
||||||
ULONG alloc_type = MEM_RESERVE | (alignment > 2_MB ? MEM_LARGE_PAGES : 0);
|
|
||||||
if (!no_commit) {
|
|
||||||
alloc_type |= MEM_COMMIT;
|
|
||||||
}
|
|
||||||
// Check if the area has been reserved beforehand (typically for tesselation buffer)
|
|
||||||
// and in that case don't reserve it again as Windows complains.
|
|
||||||
if (virtual_addr) {
|
|
||||||
MEMORY_BASIC_INFORMATION info;
|
|
||||||
VirtualQuery(addr, &info, sizeof(info));
|
|
||||||
if (info.State == MEM_RESERVE) {
|
|
||||||
alloc_type &= ~MEM_RESERVE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void* ptr{};
|
|
||||||
if (virtual_addr) {
|
|
||||||
ptr = VirtualAlloc2(process, addr, size, alloc_type, prot, NULL, 0);
|
|
||||||
ASSERT_MSG(ptr && VAddr(ptr) == virtual_addr, "{}", Common::GetLastErrorMsg());
|
|
||||||
} else {
|
|
||||||
ptr = VirtualAlloc2(process, nullptr, size, alloc_type, prot, ¶m, 1);
|
|
||||||
ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg());
|
|
||||||
}
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void UnmapUser(VAddr virtual_addr, size_t size) {
|
|
||||||
const bool ret = UnmapViewOfFile2(process, reinterpret_cast<PVOID>(virtual_addr),
|
|
||||||
MEM_PRESERVE_PLACEHOLDER);
|
MEM_PRESERVE_PLACEHOLDER);
|
||||||
ASSERT_MSG(ret, "Unmap operation on virtual_addr={:#X} failed", virtual_addr);
|
} else {
|
||||||
|
ret = VirtualFreeEx(process, reinterpret_cast<PVOID>(virtual_addr), size,
|
||||||
|
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
|
||||||
|
}
|
||||||
|
ASSERT_MSG(ret, "Unmap operation on virtual_addr={:#X} failed: {}", virtual_addr,
|
||||||
|
Common::GetLastErrorMsg());
|
||||||
|
|
||||||
// The unmap call will create a new placeholder region. We need to see if we can coalesce it
|
// The unmap call will create a new placeholder region. We need to see if we can coalesce it
|
||||||
// with neighbors.
|
// with neighbors.
|
||||||
|
@ -186,12 +170,6 @@ struct AddressSpace::Impl {
|
||||||
placeholders.insert({placeholder_start, placeholder_end});
|
placeholders.insert({placeholder_start, placeholder_end});
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnmapPrivate(VAddr virtual_addr, size_t size) {
|
|
||||||
const bool ret =
|
|
||||||
VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_addr), 0, MEM_RELEASE);
|
|
||||||
ASSERT_MSG(ret, "{}", Common::GetLastErrorMsg());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||||
DWORD new_flags{};
|
DWORD new_flags{};
|
||||||
if (read && write) {
|
if (read && write) {
|
||||||
|
@ -221,6 +199,7 @@ struct AddressSpace::Impl {
|
||||||
HANDLE backing_handle{};
|
HANDLE backing_handle{};
|
||||||
u8* backing_base{};
|
u8* backing_base{};
|
||||||
u8* virtual_base{};
|
u8* virtual_base{};
|
||||||
|
size_t virtual_size{};
|
||||||
boost::icl::separate_interval_set<uintptr_t> placeholders;
|
boost::icl::separate_interval_set<uintptr_t> placeholders;
|
||||||
};
|
};
|
||||||
#else
|
#else
|
||||||
|
@ -239,22 +218,12 @@ struct AddressSpace::Impl {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
void* MapUser(VAddr virtual_addr, PAddr phys_addr, size_t size, PosixPageProtection prot) {
|
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, PosixPageProtection prot) {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* MapPrivate(VAddr virtual_addr, size_t size, u64 alignment, PosixPageProtection prot,
|
void Unmap(VAddr virtual_addr, PAddr phys_addr, size_t size) {
|
||||||
bool no_commit = false) {
|
|
||||||
UNREACHABLE();
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void UnmapUser(VAddr virtual_addr, size_t size) {
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
void UnmapPrivate(VAddr virtual_addr, size_t size) {
|
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,36 +233,30 @@ struct AddressSpace::Impl {
|
||||||
|
|
||||||
u8* backing_base{};
|
u8* backing_base{};
|
||||||
u8* virtual_base{};
|
u8* virtual_base{};
|
||||||
|
size_t virtual_size{};
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
AddressSpace::AddressSpace() : impl{std::make_unique<Impl>()} {
|
AddressSpace::AddressSpace() : impl{std::make_unique<Impl>()} {
|
||||||
virtual_base = impl->virtual_base;
|
virtual_base = impl->virtual_base;
|
||||||
backing_base = impl->backing_base;
|
backing_base = impl->backing_base;
|
||||||
|
virtual_size = impl->virtual_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
AddressSpace::~AddressSpace() = default;
|
AddressSpace::~AddressSpace() = default;
|
||||||
|
|
||||||
void* AddressSpace::Map(VAddr virtual_addr, size_t size, u64 alignment, PAddr phys_addr) {
|
void* AddressSpace::Map(VAddr virtual_addr, size_t size, u64 alignment, PAddr phys_addr,
|
||||||
if (virtual_addr >= USER_MIN) {
|
bool is_exec) {
|
||||||
return impl->MapUser(virtual_addr, phys_addr, size, PAGE_READWRITE);
|
return impl->Map(virtual_addr, phys_addr, size,
|
||||||
}
|
is_exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
|
||||||
return impl->MapPrivate(virtual_addr, size, alignment, PAGE_READWRITE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddressSpace::Unmap(VAddr virtual_addr, size_t size) {
|
void AddressSpace::Unmap(VAddr virtual_addr, size_t size, PAddr phys_addr) {
|
||||||
if (virtual_addr >= USER_MIN) {
|
return impl->Unmap(virtual_addr, phys_addr, size);
|
||||||
return impl->UnmapUser(virtual_addr, size);
|
|
||||||
}
|
|
||||||
return impl->UnmapPrivate(virtual_addr, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
||||||
return impl->Protect(virtual_addr, size, true, true, true);
|
return impl->Protect(virtual_addr, size, true, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* AddressSpace::Reserve(size_t size, u64 alignment) {
|
|
||||||
return impl->MapPrivate(0, size, alignment, PAGE_READWRITE, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Core
|
} // namespace Core
|
||||||
|
|
|
@ -14,9 +14,23 @@ enum class MemoryPermission : u32 {
|
||||||
Write = 1 << 1,
|
Write = 1 << 1,
|
||||||
ReadWrite = Read | Write,
|
ReadWrite = Read | Write,
|
||||||
Execute = 1 << 2,
|
Execute = 1 << 2,
|
||||||
|
ReadWriteExecute = Read | Write | Execute,
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
|
||||||
|
|
||||||
|
constexpr VAddr SYSTEM_RESERVED = 0x800000000ULL;
|
||||||
|
constexpr VAddr CODE_BASE_OFFSET = 0x100000000ULL;
|
||||||
|
constexpr VAddr SYSTEM_MANAGED_MIN = 0x0000040000ULL;
|
||||||
|
constexpr VAddr SYSTEM_MANAGED_MAX = 0x07FFFFBFFFULL;
|
||||||
|
constexpr VAddr USER_MIN = 0x1000000000ULL;
|
||||||
|
constexpr VAddr USER_MAX = 0xFBFFFFFFFFULL;
|
||||||
|
|
||||||
|
// User area size is normally larger than this. However games are unlikely to map to high
|
||||||
|
// regions of that area, so by default we allocate a smaller virtual address space (about 1/4th).
|
||||||
|
// to save space on page tables.
|
||||||
|
static constexpr size_t UserSize = 1ULL << 38;
|
||||||
|
static constexpr size_t SystemSize = USER_MIN - SYSTEM_MANAGED_MIN;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents the user virtual address space backed by a dmem memory block
|
* Represents the user virtual address space backed by a dmem memory block
|
||||||
*/
|
*/
|
||||||
|
@ -25,12 +39,15 @@ public:
|
||||||
explicit AddressSpace();
|
explicit AddressSpace();
|
||||||
~AddressSpace();
|
~AddressSpace();
|
||||||
|
|
||||||
[[nodiscard]] u8* VirtualBase() noexcept {
|
[[nodiscard]] VAddr VirtualBase() noexcept {
|
||||||
return virtual_base;
|
return reinterpret_cast<VAddr>(virtual_base);
|
||||||
}
|
}
|
||||||
[[nodiscard]] const u8* VirtualBase() const noexcept {
|
[[nodiscard]] const u8* VirtualBase() const noexcept {
|
||||||
return virtual_base;
|
return virtual_base;
|
||||||
}
|
}
|
||||||
|
[[nodiscard]] size_t VirtualSize() const noexcept {
|
||||||
|
return virtual_size;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maps memory to the specified virtual address.
|
* @brief Maps memory to the specified virtual address.
|
||||||
|
@ -42,20 +59,20 @@ public:
|
||||||
* If zero is provided the mapping is considered as private.
|
* If zero is provided the mapping is considered as private.
|
||||||
* @return A pointer to the mapped memory.
|
* @return A pointer to the mapped memory.
|
||||||
*/
|
*/
|
||||||
void* Map(VAddr virtual_addr, size_t size, u64 alignment = 0, PAddr phys_addr = -1);
|
void* Map(VAddr virtual_addr, size_t size, u64 alignment = 0, PAddr phys_addr = -1,
|
||||||
|
bool exec = false);
|
||||||
|
|
||||||
/// Unmaps specified virtual memory area.
|
/// Unmaps specified virtual memory area.
|
||||||
void Unmap(VAddr virtual_addr, size_t size);
|
void Unmap(VAddr virtual_addr, size_t size, PAddr phys_addr);
|
||||||
|
|
||||||
void Protect(VAddr virtual_addr, size_t size, MemoryPermission perms);
|
void Protect(VAddr virtual_addr, size_t size, MemoryPermission perms);
|
||||||
|
|
||||||
void* Reserve(size_t size, u64 alignment);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Impl;
|
struct Impl;
|
||||||
std::unique_ptr<Impl> impl;
|
std::unique_ptr<Impl> impl;
|
||||||
u8* backing_base{};
|
u8* backing_base{};
|
||||||
u8* virtual_base{};
|
u8* virtual_base{};
|
||||||
|
size_t virtual_size{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Core
|
} // namespace Core
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
#include "core/libraries/gnmdriver/gnmdriver.h"
|
#include "core/libraries/gnmdriver/gnmdriver.h"
|
||||||
#include "core/libraries/libs.h"
|
#include "core/libraries/libs.h"
|
||||||
#include "core/libraries/videoout/video_out.h"
|
#include "core/libraries/videoout/video_out.h"
|
||||||
#include "core/memory.h"
|
|
||||||
#include "core/platform.h"
|
#include "core/platform.h"
|
||||||
#include "video_core/amdgpu/liverpool.h"
|
#include "video_core/amdgpu/liverpool.h"
|
||||||
#include "video_core/amdgpu/pm4_cmds.h"
|
#include "video_core/amdgpu/pm4_cmds.h"
|
||||||
|
@ -40,10 +39,7 @@ struct AscQueueInfo {
|
||||||
u32 ring_size_dw;
|
u32 ring_size_dw;
|
||||||
};
|
};
|
||||||
static VideoCore::SlotVector<AscQueueInfo> asc_queues{};
|
static VideoCore::SlotVector<AscQueueInfo> asc_queues{};
|
||||||
|
static constexpr VAddr tessellation_factors_ring_addr = 0xFF0000000ULL;
|
||||||
static constexpr u32 TessellationFactorRingSize = 128_KB;
|
|
||||||
static constexpr u32 TessellationFactorRingAlignment = 64_KB; // toolkit is using this alignment
|
|
||||||
VAddr tessellation_factors_ring_addr{0};
|
|
||||||
|
|
||||||
static void DumpCommandList(std::span<const u32> cmd_list, const std::string& postfix) {
|
static void DumpCommandList(std::span<const u32> cmd_list, const std::string& postfix) {
|
||||||
using namespace Common::FS;
|
using namespace Common::FS;
|
||||||
|
@ -624,11 +620,6 @@ int PS4_SYSV_ABI sceGnmGetShaderStatus() {
|
||||||
VAddr PS4_SYSV_ABI sceGnmGetTheTessellationFactorRingBufferBaseAddress() {
|
VAddr PS4_SYSV_ABI sceGnmGetTheTessellationFactorRingBufferBaseAddress() {
|
||||||
LOG_TRACE(Lib_GnmDriver, "called");
|
LOG_TRACE(Lib_GnmDriver, "called");
|
||||||
// Actual virtual buffer address is hardcoded in the driver to 0xff00'000
|
// Actual virtual buffer address is hardcoded in the driver to 0xff00'000
|
||||||
if (tessellation_factors_ring_addr == 0) {
|
|
||||||
auto* memory = Core::Memory::Instance();
|
|
||||||
tessellation_factors_ring_addr =
|
|
||||||
memory->Reserve(TessellationFactorRingSize, TessellationFactorRingAlignment);
|
|
||||||
}
|
|
||||||
return tessellation_factors_ring_addr;
|
return tessellation_factors_ring_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,20 @@ s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(size_t len, size_t alignment,
|
||||||
physAddrOut);
|
physAddrOut);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, size_t len) {
|
||||||
|
LOG_INFO(Kernel_Vmm, "called start = {:#x}, len = {:#x}", start, len);
|
||||||
|
auto* memory = Core::Memory::Instance();
|
||||||
|
memory->Free(start, len);
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 PS4_SYSV_ABI sceKernelVirtualQuery(const void* addr, int flags, OrbisVirtualQueryInfo* info,
|
||||||
|
size_t infoSize) {
|
||||||
|
LOG_INFO(Kernel_Vmm, "called addr = {}, flags = {:#x}", fmt::ptr(addr), flags);
|
||||||
|
auto* memory = Core::Memory::Instance();
|
||||||
|
return memory->VirtualQuery(std::bit_cast<VAddr>(addr), flags, info);
|
||||||
|
}
|
||||||
|
|
||||||
int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags,
|
int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags,
|
||||||
s64 directMemoryStart, u64 alignment,
|
s64 directMemoryStart, u64 alignment,
|
||||||
const char* name) {
|
const char* name) {
|
||||||
|
@ -83,7 +97,7 @@ int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, i
|
||||||
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
|
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
|
||||||
auto* memory = Core::Memory::Instance();
|
auto* memory = Core::Memory::Instance();
|
||||||
return memory->MapMemory(addr, in_addr, len, mem_prot, map_flags, Core::VMAType::Direct, "",
|
return memory->MapMemory(addr, in_addr, len, mem_prot, map_flags, Core::VMAType::Direct, "",
|
||||||
directMemoryStart, alignment);
|
false, directMemoryStart, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int flags,
|
int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int flags,
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/bit_field.h"
|
||||||
#include "common/types.h"
|
#include "common/types.h"
|
||||||
|
|
||||||
constexpr u64 SCE_KERNEL_MAIN_DMEM_SIZE = 5376_MB; // ~ 6GB
|
constexpr u64 SCE_KERNEL_MAIN_DMEM_SIZE = 5376_MB; // ~ 6GB
|
||||||
|
@ -36,6 +37,22 @@ struct OrbisQueryInfo {
|
||||||
int memoryType;
|
int memoryType;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct OrbisVirtualQueryInfo {
|
||||||
|
uintptr_t start;
|
||||||
|
uintptr_t end;
|
||||||
|
size_t offset;
|
||||||
|
s32 protection;
|
||||||
|
s32 memory_type;
|
||||||
|
union {
|
||||||
|
BitField<0, 1, u32> is_flexible;
|
||||||
|
BitField<1, 1, u32> is_direct;
|
||||||
|
BitField<2, 1, u32> is_stack;
|
||||||
|
BitField<3, 1, u32> is_pooled;
|
||||||
|
BitField<4, 1, u32> is_commited;
|
||||||
|
};
|
||||||
|
std::array<char, 32> name;
|
||||||
|
};
|
||||||
|
|
||||||
u64 PS4_SYSV_ABI sceKernelGetDirectMemorySize();
|
u64 PS4_SYSV_ABI sceKernelGetDirectMemorySize();
|
||||||
int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u64 len,
|
int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u64 len,
|
||||||
u64 alignment, int memoryType, s64* physAddrOut);
|
u64 alignment, int memoryType, s64* physAddrOut);
|
||||||
|
@ -46,6 +63,9 @@ int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int fl
|
||||||
s64 directMemoryStart, u64 alignment);
|
s64 directMemoryStart, u64 alignment);
|
||||||
s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(size_t len, size_t alignment, int memoryType,
|
s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(size_t len, size_t alignment, int memoryType,
|
||||||
s64* physAddrOut);
|
s64* physAddrOut);
|
||||||
|
s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, size_t len);
|
||||||
|
s32 PS4_SYSV_ABI sceKernelVirtualQuery(const void* addr, int flags, OrbisVirtualQueryInfo* info,
|
||||||
|
size_t infoSize);
|
||||||
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addrInOut, std::size_t len, int prot,
|
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addrInOut, std::size_t len, int prot,
|
||||||
int flags, const char* name);
|
int flags, const char* name);
|
||||||
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/scope_exit.h"
|
#include "common/scope_exit.h"
|
||||||
|
@ -13,49 +12,82 @@
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
|
||||||
MemoryManager::MemoryManager() {
|
MemoryManager::MemoryManager() {
|
||||||
// Insert a virtual memory area that covers the user area.
|
// Insert an area that covers direct memory physical block.
|
||||||
const size_t user_size = USER_MAX - USER_MIN - 1;
|
dmem_map.emplace(0, DirectMemoryArea{0, SCE_KERNEL_MAIN_DMEM_SIZE});
|
||||||
vma_map.emplace(USER_MIN, VirtualMemoryArea{USER_MIN, user_size});
|
|
||||||
|
|
||||||
// Insert a virtual memory area that covers the system managed area.
|
// Insert a virtual memory area that covers the entire area we manage.
|
||||||
const size_t sys_size = SYSTEM_MANAGED_MAX - SYSTEM_MANAGED_MIN - 1;
|
const VAddr virtual_base = impl.VirtualBase();
|
||||||
vma_map.emplace(SYSTEM_MANAGED_MIN, VirtualMemoryArea{SYSTEM_MANAGED_MIN, sys_size});
|
const size_t virtual_size = impl.VirtualSize();
|
||||||
|
vma_map.emplace(virtual_base, VirtualMemoryArea{virtual_base, virtual_size});
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryManager::~MemoryManager() = default;
|
MemoryManager::~MemoryManager() = default;
|
||||||
|
|
||||||
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
||||||
int memory_type) {
|
int memory_type) {
|
||||||
PAddr free_addr = search_start;
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
// Iterate through allocated blocked and find the next free position
|
auto dmem_area = FindDmemArea(search_start);
|
||||||
for (const auto& block : allocations) {
|
|
||||||
const PAddr end = block.base + block.size;
|
const auto is_suitable = [&] {
|
||||||
free_addr = std::max(end, free_addr);
|
return dmem_area->second.is_free && dmem_area->second.size >= size;
|
||||||
|
};
|
||||||
|
while (!is_suitable() && dmem_area->second.GetEnd() <= search_end) {
|
||||||
|
dmem_area++;
|
||||||
}
|
}
|
||||||
|
ASSERT_MSG(is_suitable(), "Unable to find free direct memory area");
|
||||||
|
|
||||||
// Align free position
|
// Align free position
|
||||||
|
PAddr free_addr = dmem_area->second.base;
|
||||||
free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr;
|
free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr;
|
||||||
ASSERT(free_addr >= search_start && free_addr + size <= search_end);
|
|
||||||
|
|
||||||
// Add the allocated region to the list and commit its pages.
|
// Add the allocated region to the list and commit its pages.
|
||||||
allocations.emplace_back(free_addr, size, memory_type);
|
auto& area = AddDmemAllocation(free_addr, size);
|
||||||
|
area.memory_type = memory_type;
|
||||||
|
area.is_free = false;
|
||||||
return free_addr;
|
return free_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::Free(PAddr phys_addr, size_t size) {
|
void MemoryManager::Free(PAddr phys_addr, size_t size) {
|
||||||
const auto it = std::ranges::find_if(allocations, [&](const auto& alloc) {
|
std::scoped_lock lk{mutex};
|
||||||
return alloc.base == phys_addr && alloc.size == size;
|
|
||||||
});
|
|
||||||
ASSERT(it != allocations.end());
|
|
||||||
|
|
||||||
// Free the ranges.
|
const auto dmem_area = FindDmemArea(phys_addr);
|
||||||
allocations.erase(it);
|
ASSERT(dmem_area != dmem_map.end() && dmem_area->second.base == phys_addr &&
|
||||||
|
dmem_area->second.size == size);
|
||||||
|
|
||||||
|
// Release any dmem mappings that reference this physical block.
|
||||||
|
std::vector<std::pair<VAddr, u64>> remove_list;
|
||||||
|
for (const auto& [addr, mapping] : vma_map) {
|
||||||
|
if (mapping.type != VMAType::Direct) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (mapping.phys_base <= phys_addr && phys_addr < mapping.phys_base + mapping.size) {
|
||||||
|
LOG_INFO(Kernel_Vmm, "Unmaping direct mapping {:#x} with size {:#x}", addr,
|
||||||
|
mapping.size);
|
||||||
|
// Unmaping might erase from vma_map. We can't do it here.
|
||||||
|
remove_list.emplace_back(addr, mapping.size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const auto& [addr, size] : remove_list) {
|
||||||
|
UnmapMemory(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark region as free and attempt to coalesce it with neighbours.
|
||||||
|
auto& area = dmem_area->second;
|
||||||
|
area.is_free = true;
|
||||||
|
area.memory_type = 0;
|
||||||
|
MergeAdjacent(dmem_map, dmem_area);
|
||||||
}
|
}
|
||||||
|
|
||||||
int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
||||||
MemoryMapFlags flags, VMAType type, std::string_view name,
|
MemoryMapFlags flags, VMAType type, std::string_view name,
|
||||||
PAddr phys_addr, u64 alignment) {
|
bool is_exec, PAddr phys_addr, u64 alignment) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
// When virtual addr is zero, force it to virtual_base. The guest cannot pass Fixed
|
||||||
|
// flag so we will take the branch that searches for free (or reserved) mappings.
|
||||||
|
virtual_addr = (virtual_addr == 0) ? impl.VirtualBase() : virtual_addr;
|
||||||
|
|
||||||
VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr;
|
VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr;
|
||||||
SCOPE_EXIT {
|
SCOPE_EXIT {
|
||||||
auto& new_vma = AddMapping(mapped_addr, size);
|
auto& new_vma = AddMapping(mapped_addr, size);
|
||||||
|
@ -65,18 +97,11 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
||||||
new_vma.type = type;
|
new_vma.type = type;
|
||||||
|
|
||||||
if (type == VMAType::Direct) {
|
if (type == VMAType::Direct) {
|
||||||
|
new_vma.phys_base = phys_addr;
|
||||||
MapVulkanMemory(mapped_addr, size);
|
MapVulkanMemory(mapped_addr, size);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// When virtual addr is zero let the address space manager pick the address.
|
|
||||||
// Alignment matters here as we let the OS pick the address.
|
|
||||||
if (virtual_addr == 0) {
|
|
||||||
*out_addr = impl.Map(virtual_addr, size, alignment);
|
|
||||||
mapped_addr = std::bit_cast<VAddr>(*out_addr);
|
|
||||||
return ORBIS_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fixed mapping means the virtual address must exactly match the provided one.
|
// Fixed mapping means the virtual address must exactly match the provided one.
|
||||||
if (True(flags & MemoryMapFlags::Fixed) && True(flags & MemoryMapFlags::NoOverwrite)) {
|
if (True(flags & MemoryMapFlags::Fixed) && True(flags & MemoryMapFlags::NoOverwrite)) {
|
||||||
// This should return SCE_KERNEL_ERROR_ENOMEM but shouldn't normally happen.
|
// This should return SCE_KERNEL_ERROR_ENOMEM but shouldn't normally happen.
|
||||||
|
@ -92,21 +117,28 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
||||||
it++;
|
it++;
|
||||||
}
|
}
|
||||||
ASSERT(it != vma_map.end());
|
ASSERT(it != vma_map.end());
|
||||||
mapped_addr = alignment > 0 ? Common::AlignUp(it->second.base, alignment) : it->second.base;
|
const VAddr base = it->second.base;
|
||||||
|
mapped_addr = alignment > 0 ? Common::AlignUp(base, alignment) : base;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform the mapping.
|
// Perform the mapping.
|
||||||
*out_addr = impl.Map(mapped_addr, size, alignment, phys_addr);
|
*out_addr = impl.Map(mapped_addr, size, alignment, phys_addr, is_exec);
|
||||||
return ORBIS_OK;
|
return ORBIS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
// TODO: Partial unmaps are technically supported by the guest.
|
// TODO: Partial unmaps are technically supported by the guest.
|
||||||
const auto it = vma_map.find(virtual_addr);
|
const auto it = vma_map.find(virtual_addr);
|
||||||
ASSERT_MSG(it != vma_map.end() && it->first == virtual_addr,
|
ASSERT_MSG(it != vma_map.end() && it->first == virtual_addr,
|
||||||
"Attempting to unmap partially mapped range");
|
"Attempting to unmap partially mapped range");
|
||||||
|
|
||||||
if (it->second.type == VMAType::Direct) {
|
const auto type = it->second.type;
|
||||||
|
fmt::print("{}\n", u32(type));
|
||||||
|
std::fflush(stdout);
|
||||||
|
const PAddr phys_addr = type == VMAType::Direct ? it->second.phys_base : -1;
|
||||||
|
if (type == VMAType::Direct) {
|
||||||
UnmapVulkanMemory(virtual_addr, size);
|
UnmapVulkanMemory(virtual_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,13 +147,15 @@ void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
||||||
vma.type = VMAType::Free;
|
vma.type = VMAType::Free;
|
||||||
vma.prot = MemoryProt::NoAccess;
|
vma.prot = MemoryProt::NoAccess;
|
||||||
vma.phys_base = 0;
|
vma.phys_base = 0;
|
||||||
MergeAdjacent(it);
|
MergeAdjacent(vma_map, it);
|
||||||
|
|
||||||
// Unmap the memory region.
|
// Unmap the memory region.
|
||||||
impl.Unmap(virtual_addr, size);
|
impl.Unmap(virtual_addr, size, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
|
int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
const auto it = FindVMA(addr);
|
const auto it = FindVMA(addr);
|
||||||
const auto& vma = it->second;
|
const auto& vma = it->second;
|
||||||
ASSERT_MSG(vma.type != VMAType::Free, "Provided address is not mapped");
|
ASSERT_MSG(vma.type != VMAType::Free, "Provided address is not mapped");
|
||||||
|
@ -132,18 +166,70 @@ int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* pr
|
||||||
return ORBIS_OK;
|
return ORBIS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
|
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
||||||
Libraries::Kernel::OrbisQueryInfo* out_info) {
|
Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
||||||
const auto it = std::ranges::find_if(allocations, [&](const DirectMemoryArea& alloc) {
|
auto it = FindVMA(addr);
|
||||||
return alloc.base <= addr && addr < alloc.base + alloc.size;
|
if (it->second.type == VMAType::Free && flags == 1) {
|
||||||
});
|
it++;
|
||||||
if (it == allocations.end()) {
|
}
|
||||||
return SCE_KERNEL_ERROR_EACCES;
|
if (it->second.type == VMAType::Free) {
|
||||||
|
LOG_WARNING(Kernel_Vmm, "VirtualQuery on free memory region");
|
||||||
|
return ORBIS_KERNEL_ERROR_EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_info->start = it->base;
|
const auto& vma = it->second;
|
||||||
out_info->end = it->base + it->size;
|
info->start = vma.base;
|
||||||
out_info->memoryType = it->memory_type;
|
info->end = vma.base + vma.size;
|
||||||
|
info->is_flexible.Assign(vma.type == VMAType::Flexible);
|
||||||
|
info->is_direct.Assign(vma.type == VMAType::Direct);
|
||||||
|
info->is_commited.Assign(vma.type != VMAType::Free);
|
||||||
|
if (vma.type == VMAType::Direct) {
|
||||||
|
const auto dmem_it = FindDmemArea(vma.phys_base);
|
||||||
|
ASSERT(dmem_it != dmem_map.end());
|
||||||
|
info->memory_type = dmem_it->second.memory_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
|
||||||
|
Libraries::Kernel::OrbisQueryInfo* out_info) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
auto dmem_area = FindDmemArea(addr);
|
||||||
|
if (dmem_area->second.is_free && find_next) {
|
||||||
|
dmem_area++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dmem_area == dmem_map.end() || dmem_area->second.is_free) {
|
||||||
|
LOG_ERROR(Core, "Unable to find allocated direct memory region to query!");
|
||||||
|
return ORBIS_KERNEL_ERROR_EACCES;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto& area = dmem_area->second;
|
||||||
|
out_info->start = area.base;
|
||||||
|
out_info->end = area.GetEnd();
|
||||||
|
out_info->memoryType = area.memory_type;
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment,
|
||||||
|
PAddr* phys_addr_out, size_t* size_out) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
auto dmem_area = FindDmemArea(search_start);
|
||||||
|
PAddr paddr{};
|
||||||
|
size_t max_size{};
|
||||||
|
while (dmem_area != dmem_map.end() && dmem_area->second.GetEnd() <= search_end) {
|
||||||
|
if (dmem_area->second.size > max_size) {
|
||||||
|
paddr = dmem_area->second.base;
|
||||||
|
max_size = dmem_area->second.size;
|
||||||
|
}
|
||||||
|
dmem_area++;
|
||||||
|
}
|
||||||
|
|
||||||
|
*phys_addr_out = alignment > 0 ? Common::AlignUp(paddr, alignment) : paddr;
|
||||||
|
*size_out = max_size;
|
||||||
return ORBIS_OK;
|
return ORBIS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,6 +264,30 @@ VirtualMemoryArea& MemoryManager::AddMapping(VAddr virtual_addr, size_t size) {
|
||||||
return vma_handle->second;
|
return vma_handle->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DirectMemoryArea& MemoryManager::AddDmemAllocation(PAddr addr, size_t size) {
|
||||||
|
auto dmem_handle = FindDmemArea(addr);
|
||||||
|
ASSERT_MSG(dmem_handle != dmem_map.end(), "Physical address not in dmem_map");
|
||||||
|
|
||||||
|
const DirectMemoryArea& area = dmem_handle->second;
|
||||||
|
ASSERT_MSG(area.is_free && area.base <= addr,
|
||||||
|
"Adding an allocation to already allocated region");
|
||||||
|
|
||||||
|
const PAddr start_in_area = addr - area.base;
|
||||||
|
const PAddr end_in_vma = start_in_area + size;
|
||||||
|
ASSERT_MSG(end_in_vma <= area.size, "Mapping cannot fit inside free region");
|
||||||
|
|
||||||
|
if (end_in_vma != area.size) {
|
||||||
|
// Split VMA at the end of the allocated region
|
||||||
|
Split(dmem_handle, end_in_vma);
|
||||||
|
}
|
||||||
|
if (start_in_area != 0) {
|
||||||
|
// Split VMA at the start of the allocated region
|
||||||
|
dmem_handle = Split(dmem_handle, start_in_area);
|
||||||
|
}
|
||||||
|
|
||||||
|
return dmem_handle->second;
|
||||||
|
}
|
||||||
|
|
||||||
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) {
|
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) {
|
||||||
auto& old_vma = vma_handle->second;
|
auto& old_vma = vma_handle->second;
|
||||||
ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0);
|
ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0);
|
||||||
|
@ -193,24 +303,17 @@ MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offse
|
||||||
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryManager::VMAHandle MemoryManager::MergeAdjacent(VMAHandle iter) {
|
MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, size_t offset_in_area) {
|
||||||
const auto next_vma = std::next(iter);
|
auto& old_area = dmem_handle->second;
|
||||||
if (next_vma != vma_map.end() && iter->second.CanMergeWith(next_vma->second)) {
|
ASSERT(offset_in_area < old_area.size && offset_in_area > 0);
|
||||||
iter->second.size += next_vma->second.size;
|
|
||||||
vma_map.erase(next_vma);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iter != vma_map.begin()) {
|
auto new_area = old_area;
|
||||||
auto prev_vma = std::prev(iter);
|
old_area.size = offset_in_area;
|
||||||
if (prev_vma->second.CanMergeWith(iter->second)) {
|
new_area.base += offset_in_area;
|
||||||
prev_vma->second.size += iter->second.size;
|
new_area.size -= offset_in_area;
|
||||||
vma_map.erase(iter);
|
|
||||||
iter = prev_vma;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return iter;
|
return dmem_map.emplace_hint(std::next(dmem_handle), new_area.base, new_area);
|
||||||
}
|
};
|
||||||
|
|
||||||
void MemoryManager::MapVulkanMemory(VAddr addr, size_t size) {
|
void MemoryManager::MapVulkanMemory(VAddr addr, size_t size) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <mutex>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <boost/icl/split_interval_map.hpp>
|
#include <boost/icl/split_interval_map.hpp>
|
||||||
|
@ -11,6 +12,7 @@
|
||||||
#include "common/singleton.h"
|
#include "common/singleton.h"
|
||||||
#include "common/types.h"
|
#include "common/types.h"
|
||||||
#include "core/address_space.h"
|
#include "core/address_space.h"
|
||||||
|
#include "core/libraries/kernel/memory_management.h"
|
||||||
#include "video_core/renderer_vulkan/vk_common.h"
|
#include "video_core/renderer_vulkan/vk_common.h"
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
@ -47,12 +49,28 @@ enum class VMAType : u32 {
|
||||||
Flexible = 3,
|
Flexible = 3,
|
||||||
Pooled = 4,
|
Pooled = 4,
|
||||||
Stack = 5,
|
Stack = 5,
|
||||||
|
Code = 6,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct DirectMemoryArea {
|
struct DirectMemoryArea {
|
||||||
PAddr base = 0;
|
PAddr base = 0;
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
int memory_type = 0;
|
int memory_type = 0;
|
||||||
|
bool is_free = true;
|
||||||
|
|
||||||
|
PAddr GetEnd() const {
|
||||||
|
return base + size;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CanMergeWith(const DirectMemoryArea& next) const {
|
||||||
|
if (base + size != next.base) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (is_free != next.is_free) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct VirtualMemoryArea {
|
struct VirtualMemoryArea {
|
||||||
|
@ -81,14 +99,10 @@ struct VirtualMemoryArea {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
constexpr VAddr SYSTEM_RESERVED = 0x800000000ULL;
|
|
||||||
constexpr VAddr CODE_BASE_OFFSET = 0x100000000ULL;
|
|
||||||
constexpr VAddr SYSTEM_MANAGED_MIN = 0x0000040000ULL;
|
|
||||||
constexpr VAddr SYSTEM_MANAGED_MAX = 0x07FFFFBFFFULL;
|
|
||||||
constexpr VAddr USER_MIN = 0x1000000000ULL;
|
|
||||||
constexpr VAddr USER_MAX = 0xFBFFFFFFFFULL;
|
|
||||||
|
|
||||||
class MemoryManager {
|
class MemoryManager {
|
||||||
|
using DMemMap = std::map<PAddr, DirectMemoryArea>;
|
||||||
|
using DMemHandle = DMemMap::iterator;
|
||||||
|
|
||||||
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
|
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
|
||||||
using VMAHandle = VMAMap::iterator;
|
using VMAHandle = VMAMap::iterator;
|
||||||
|
|
||||||
|
@ -107,35 +121,57 @@ public:
|
||||||
|
|
||||||
int MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
int MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
||||||
MemoryMapFlags flags, VMAType type, std::string_view name = "",
|
MemoryMapFlags flags, VMAType type, std::string_view name = "",
|
||||||
PAddr phys_addr = -1, u64 alignment = 0);
|
bool is_exec = false, PAddr phys_addr = -1, u64 alignment = 0);
|
||||||
|
|
||||||
void UnmapMemory(VAddr virtual_addr, size_t size);
|
void UnmapMemory(VAddr virtual_addr, size_t size);
|
||||||
|
|
||||||
int QueryProtection(VAddr addr, void** start, void** end, u32* prot);
|
int QueryProtection(VAddr addr, void** start, void** end, u32* prot);
|
||||||
|
|
||||||
|
int VirtualQuery(VAddr addr, int flags, Libraries::Kernel::OrbisVirtualQueryInfo* info);
|
||||||
|
|
||||||
int DirectMemoryQuery(PAddr addr, bool find_next, Libraries::Kernel::OrbisQueryInfo* out_info);
|
int DirectMemoryQuery(PAddr addr, bool find_next, Libraries::Kernel::OrbisQueryInfo* out_info);
|
||||||
|
|
||||||
VAddr Reserve(size_t size, u64 alignment) {
|
int DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment,
|
||||||
return reinterpret_cast<VAddr>(impl.Reserve(size, alignment));
|
PAddr* phys_addr_out, size_t* size_out);
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<vk::Buffer, size_t> GetVulkanBuffer(VAddr addr);
|
std::pair<vk::Buffer, size_t> GetVulkanBuffer(VAddr addr);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VMAHandle FindVMA(VAddr target) {
|
VMAHandle FindVMA(VAddr target) {
|
||||||
// Return first the VMA with base >= target.
|
return std::prev(vma_map.upper_bound(target));
|
||||||
const auto it = vma_map.lower_bound(target);
|
|
||||||
if (it != vma_map.end() && it->first == target) {
|
|
||||||
return it;
|
|
||||||
}
|
}
|
||||||
return std::prev(it);
|
|
||||||
|
DMemHandle FindDmemArea(PAddr target) {
|
||||||
|
return std::prev(dmem_map.upper_bound(target));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Handle>
|
||||||
|
Handle MergeAdjacent(auto& handle_map, Handle iter) {
|
||||||
|
const auto next_vma = std::next(iter);
|
||||||
|
if (next_vma != handle_map.end() && iter->second.CanMergeWith(next_vma->second)) {
|
||||||
|
iter->second.size += next_vma->second.size;
|
||||||
|
handle_map.erase(next_vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iter != handle_map.begin()) {
|
||||||
|
auto prev_vma = std::prev(iter);
|
||||||
|
if (prev_vma->second.CanMergeWith(iter->second)) {
|
||||||
|
prev_vma->second.size += iter->second.size;
|
||||||
|
handle_map.erase(iter);
|
||||||
|
iter = prev_vma;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return iter;
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualMemoryArea& AddMapping(VAddr virtual_addr, size_t size);
|
VirtualMemoryArea& AddMapping(VAddr virtual_addr, size_t size);
|
||||||
|
|
||||||
|
DirectMemoryArea& AddDmemAllocation(PAddr addr, size_t size);
|
||||||
|
|
||||||
VMAHandle Split(VMAHandle vma_handle, size_t offset_in_vma);
|
VMAHandle Split(VMAHandle vma_handle, size_t offset_in_vma);
|
||||||
|
|
||||||
VMAHandle MergeAdjacent(VMAHandle iter);
|
DMemHandle Split(DMemHandle dmem_handle, size_t offset_in_area);
|
||||||
|
|
||||||
void MapVulkanMemory(VAddr addr, size_t size);
|
void MapVulkanMemory(VAddr addr, size_t size);
|
||||||
|
|
||||||
|
@ -143,8 +179,9 @@ private:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
AddressSpace impl;
|
AddressSpace impl;
|
||||||
std::vector<DirectMemoryArea> allocations;
|
DMemMap dmem_map;
|
||||||
VMAMap vma_map;
|
VMAMap vma_map;
|
||||||
|
std::recursive_mutex mutex;
|
||||||
|
|
||||||
struct MappedMemory {
|
struct MappedMemory {
|
||||||
vk::UniqueBuffer buffer;
|
vk::UniqueBuffer buffer;
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/string_util.h"
|
#include "common/string_util.h"
|
||||||
#include "core/aerolib/aerolib.h"
|
#include "core/aerolib/aerolib.h"
|
||||||
|
#include "core/memory.h"
|
||||||
#include "core/module.h"
|
#include "core/module.h"
|
||||||
#include "core/tls.h"
|
#include "core/tls.h"
|
||||||
#include "core/virtual_memory.h"
|
#include "core/virtual_memory.h"
|
||||||
|
@ -81,8 +82,11 @@ void Module::LoadModuleToMemory() {
|
||||||
aligned_base_size = Common::AlignUp(base_size, BlockAlign);
|
aligned_base_size = Common::AlignUp(base_size, BlockAlign);
|
||||||
|
|
||||||
// Map module segments (and possible TLS trampolines)
|
// Map module segments (and possible TLS trampolines)
|
||||||
base_virtual_addr = VirtualMemory::memory_alloc(LoadAddress, aligned_base_size + TrampolineSize,
|
auto* memory = Core::Memory::Instance();
|
||||||
VirtualMemory::MemoryMode::ExecuteReadWrite);
|
void** out_addr = reinterpret_cast<void**>(&base_virtual_addr);
|
||||||
|
const auto name = file.filename().string();
|
||||||
|
memory->MapMemory(out_addr, LoadAddress, aligned_base_size + TrampolineSize,
|
||||||
|
MemoryProt::CpuReadWrite, MemoryMapFlags::Fixed, VMAType::Code, name, true);
|
||||||
LoadAddress += CODE_BASE_INCR * (1 + aligned_base_size / CODE_BASE_INCR);
|
LoadAddress += CODE_BASE_INCR * (1 + aligned_base_size / CODE_BASE_INCR);
|
||||||
|
|
||||||
// Initialize trampoline generator.
|
// Initialize trampoline generator.
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "core/libraries/libs.h"
|
#include "core/libraries/libs.h"
|
||||||
#include "core/libraries/videoout/video_out.h"
|
#include "core/libraries/videoout/video_out.h"
|
||||||
#include "core/linker.h"
|
#include "core/linker.h"
|
||||||
|
#include "core/memory.h"
|
||||||
#include "input/controller.h"
|
#include "input/controller.h"
|
||||||
#include "sdl_window.h"
|
#include "sdl_window.h"
|
||||||
|
|
||||||
|
@ -30,6 +31,8 @@ int main(int argc, char* argv[]) {
|
||||||
fmt::print("Usage: {} <elf or eboot.bin path>\n", argv[0]);
|
fmt::print("Usage: {} <elf or eboot.bin path>\n", argv[0]);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
// Initialize memory system as early as possible to reserve mappings.
|
||||||
|
[[maybe_unused]] const auto* memory = Core::Memory::Instance();
|
||||||
const auto config_dir = Common::FS::GetUserPath(Common::FS::PathType::UserDir);
|
const auto config_dir = Common::FS::GetUserPath(Common::FS::PathType::UserDir);
|
||||||
Config::load(config_dir / "config.toml");
|
Config::load(config_dir / "config.toml");
|
||||||
Common::Log::Initialize();
|
Common::Log::Initialize();
|
||||||
|
|
Loading…
Reference in New Issue