Make sure system managed memory is in correct location on macOS.
This commit is contained in:
parent
685b0bfd5e
commit
b3d97dcd89
|
@ -558,6 +558,9 @@ target_link_libraries(shadps4 PRIVATE magic_enum::magic_enum fmt::fmt toml11::to
|
||||||
target_link_libraries(shadps4 PRIVATE Boost::headers GPUOpen::VulkanMemoryAllocator sirit Vulkan::Headers xxHash::xxhash Zydis::Zydis glslang::SPIRV glslang::glslang SDL3::SDL3)
|
target_link_libraries(shadps4 PRIVATE Boost::headers GPUOpen::VulkanMemoryAllocator sirit Vulkan::Headers xxHash::xxhash Zydis::Zydis glslang::SPIRV glslang::glslang SDL3::SDL3)
|
||||||
|
|
||||||
if (APPLE)
|
if (APPLE)
|
||||||
|
# Reserve system-managed memory space.
|
||||||
|
target_link_options(shadps4 PRIVATE -Wl,-no_pie,-no_fixup_chains,-no_huge,-pagezero_size,0x400000,-segaddr,SYSTEM_MANAGED,0x400000,-image_base,0x10000000000)
|
||||||
|
|
||||||
# Link MoltenVK for Vulkan support
|
# Link MoltenVK for Vulkan support
|
||||||
find_library(MOLTENVK MoltenVK REQUIRED)
|
find_library(MOLTENVK MoltenVK REQUIRED)
|
||||||
target_link_libraries(shadps4 PRIVATE ${MOLTENVK})
|
target_link_libraries(shadps4 PRIVATE ${MOLTENVK})
|
||||||
|
|
|
@ -15,6 +15,11 @@
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
// Reserve space for the system-managed address space using a zerofill section.
|
||||||
|
asm(".zerofill SYSTEM_MANAGED,SYSTEM_MANAGED,__system_managed,0x800000000");
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
|
||||||
static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE;
|
static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE;
|
||||||
|
@ -52,18 +57,38 @@ struct AddressSpace::Impl {
|
||||||
// to a reasonable amount.
|
// to a reasonable amount.
|
||||||
static constexpr size_t ReductionOnFail = 1_GB;
|
static constexpr size_t ReductionOnFail = 1_GB;
|
||||||
static constexpr size_t MaxReductions = 10;
|
static constexpr size_t MaxReductions = 10;
|
||||||
virtual_size = SystemSize + UserSize + ReductionOnFail;
|
|
||||||
|
system_managed_size = SystemManagedSize;
|
||||||
|
system_reserved_size = SystemReservedSize + ReductionOnFail;
|
||||||
|
user_size = UserSize;
|
||||||
for (u32 i = 0; i < MaxReductions && !virtual_base; i++) {
|
for (u32 i = 0; i < MaxReductions && !virtual_base; i++) {
|
||||||
virtual_size -= ReductionOnFail;
|
system_reserved_size -= ReductionOnFail;
|
||||||
virtual_base = static_cast<u8*>(VirtualAlloc2(process, NULL, virtual_size,
|
virtual_base = static_cast<u8*>(
|
||||||
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
VirtualAlloc2(process, NULL, system_managed_size + system_reserved_size + user_size,
|
||||||
PAGE_NOACCESS, ¶m, 1));
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, ¶m, 1));
|
||||||
}
|
}
|
||||||
ASSERT_MSG(virtual_base, "Unable to reserve virtual address space!");
|
ASSERT_MSG(virtual_base, "Unable to reserve virtual address space!");
|
||||||
|
|
||||||
|
system_managed_base = virtual_base;
|
||||||
|
system_reserved_base = virtual_base + system_managed_size;
|
||||||
|
user_base = system_reserved_base + system_reserved_size;
|
||||||
|
|
||||||
|
LOG_INFO(Kernel_Vmm, "System managed virtual memory region: {} - {}",
|
||||||
|
fmt::ptr(system_managed_base),
|
||||||
|
fmt::ptr(system_managed_base + system_managed_size - 1));
|
||||||
|
LOG_INFO(Kernel_Vmm, "System reserved virtual memory region: {} - {}",
|
||||||
|
fmt::ptr(system_reserved_base),
|
||||||
|
fmt::ptr(system_reserved_base + system_reserved_size - 1));
|
||||||
|
LOG_INFO(Kernel_Vmm, "User virtual memory region: {} - {}", fmt::ptr(user_base),
|
||||||
|
fmt::ptr(user_base + user_size - 1));
|
||||||
|
|
||||||
// Initializer placeholder tracker
|
// Initializer placeholder tracker
|
||||||
const uintptr_t virtual_addr = reinterpret_cast<uintptr_t>(virtual_base);
|
const uintptr_t system_managed_addr = reinterpret_cast<uintptr_t>(system_managed_base);
|
||||||
placeholders.insert({virtual_addr, virtual_addr + virtual_size});
|
const uintptr_t system_reserved_addr = reinterpret_cast<uintptr_t>(system_reserved_base);
|
||||||
|
const uintptr_t user_addr = reinterpret_cast<uintptr_t>(user_base);
|
||||||
|
placeholders.insert({system_managed_addr, system_managed_addr + system_managed_size});
|
||||||
|
placeholders.insert({system_reserved_addr, system_reserved_addr + system_reserved_size});
|
||||||
|
placeholders.insert({user_addr, user_addr + user_size});
|
||||||
|
|
||||||
// Allocate backing file that represents the total physical memory.
|
// Allocate backing file that represents the total physical memory.
|
||||||
backing_handle =
|
backing_handle =
|
||||||
|
@ -215,7 +240,12 @@ struct AddressSpace::Impl {
|
||||||
HANDLE backing_handle{};
|
HANDLE backing_handle{};
|
||||||
u8* backing_base{};
|
u8* backing_base{};
|
||||||
u8* virtual_base{};
|
u8* virtual_base{};
|
||||||
size_t virtual_size{};
|
u8* system_managed_base{};
|
||||||
|
size_t system_managed_size{};
|
||||||
|
u8* system_reserved_base{};
|
||||||
|
size_t system_reserved_size{};
|
||||||
|
u8* user_base{};
|
||||||
|
size_t user_size{};
|
||||||
boost::icl::separate_interval_set<uintptr_t> placeholders;
|
boost::icl::separate_interval_set<uintptr_t> placeholders;
|
||||||
};
|
};
|
||||||
#else
|
#else
|
||||||
|
@ -244,29 +274,53 @@ enum PosixPageProtection {
|
||||||
struct AddressSpace::Impl {
|
struct AddressSpace::Impl {
|
||||||
Impl() {
|
Impl() {
|
||||||
// Allocate virtual address placeholder for our address space.
|
// Allocate virtual address placeholder for our address space.
|
||||||
void* hint_address = reinterpret_cast<void*>(SYSTEM_MANAGED_MIN);
|
system_managed_size = SystemManagedSize;
|
||||||
|
system_reserved_size = SystemReservedSize;
|
||||||
|
user_size = UserSize;
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
constexpr int virtual_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
|
system_managed_base = reinterpret_cast<u8*>(
|
||||||
|
mmap(reinterpret_cast<void*>(SYSTEM_MANAGED_MIN), system_managed_size,
|
||||||
|
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
|
||||||
|
-1, 0));
|
||||||
|
// Cannot guarantee enough space for these areas at the desired addresses, so not MAP_FIXED.
|
||||||
|
system_reserved_base = reinterpret_cast<u8*>(
|
||||||
|
mmap(reinterpret_cast<void*>(SYSTEM_RESERVED_MIN), system_reserved_size,
|
||||||
|
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0));
|
||||||
|
user_base = reinterpret_cast<u8*>(mmap(reinterpret_cast<void*>(USER_MIN), user_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0));
|
||||||
#else
|
#else
|
||||||
constexpr int virtual_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED;
|
const auto virtual_size = system_managed_size + system_reserved_size + user_size;
|
||||||
|
const auto virtual_base = reinterpret_cast<u8*>(
|
||||||
|
mmap(reinterpret_cast<void*>(SYSTEM_MANAGED_MIN), virtual_size, PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1, 0));
|
||||||
|
system_managed_base = virtual_base;
|
||||||
|
system_managed_base = virtual_base + (SYSTEM_RESERVED_MIN - SYSTEM_MANAGED_MIN);
|
||||||
|
user_base = virtual_base + (USER_MIN - SYSTEM_MANAGED_MIN);
|
||||||
#endif
|
#endif
|
||||||
virtual_size = SystemSize + UserSize;
|
if (system_managed_base == MAP_FAILED || system_reserved_base == MAP_FAILED ||
|
||||||
virtual_base = reinterpret_cast<u8*>(
|
user_base == MAP_FAILED) {
|
||||||
mmap(hint_address, virtual_size, PROT_READ | PROT_WRITE, virtual_flags, -1, 0));
|
|
||||||
if (virtual_base == MAP_FAILED) {
|
|
||||||
LOG_CRITICAL(Kernel_Vmm, "mmap failed: {}", strerror(errno));
|
LOG_CRITICAL(Kernel_Vmm, "mmap failed: {}", strerror(errno));
|
||||||
throw std::bad_alloc{};
|
throw std::bad_alloc{};
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef __APPLE__
|
LOG_INFO(Kernel_Vmm, "System managed virtual memory region: {} - {}",
|
||||||
madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
|
fmt::ptr(system_managed_base),
|
||||||
|
fmt::ptr(system_managed_base + system_managed_size - 1));
|
||||||
|
LOG_INFO(Kernel_Vmm, "System reserved virtual memory region: {} - {}",
|
||||||
|
fmt::ptr(system_reserved_base),
|
||||||
|
fmt::ptr(system_reserved_base + system_reserved_size - 1));
|
||||||
|
LOG_INFO(Kernel_Vmm, "User virtual memory region: {} - {}", fmt::ptr(user_base),
|
||||||
|
fmt::ptr(user_base + user_size - 1));
|
||||||
|
|
||||||
backing_fd = memfd_create("BackingDmem", 0);
|
const VAddr system_managed_addr = reinterpret_cast<VAddr>(system_managed_base);
|
||||||
if (backing_fd < 0) {
|
const VAddr system_reserved_addr = reinterpret_cast<VAddr>(system_managed_base);
|
||||||
LOG_CRITICAL(Kernel_Vmm, "memfd_create failed: {}", strerror(errno));
|
const VAddr user_addr = reinterpret_cast<VAddr>(user_base);
|
||||||
throw std::bad_alloc{};
|
m_free_regions.insert({system_managed_addr, system_managed_addr + system_managed_size});
|
||||||
}
|
m_free_regions.insert({system_reserved_addr, system_reserved_addr + system_reserved_size});
|
||||||
#else
|
m_free_regions.insert({user_addr, user_addr + user_size});
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
const auto shm_path = fmt::format("/BackingDmem{}", getpid());
|
const auto shm_path = fmt::format("/BackingDmem{}", getpid());
|
||||||
backing_fd = shm_open(shm_path.c_str(), O_RDWR | O_CREAT | O_EXCL, 0600);
|
backing_fd = shm_open(shm_path.c_str(), O_RDWR | O_CREAT | O_EXCL, 0600);
|
||||||
if (backing_fd < 0) {
|
if (backing_fd < 0) {
|
||||||
|
@ -274,6 +328,17 @@ struct AddressSpace::Impl {
|
||||||
throw std::bad_alloc{};
|
throw std::bad_alloc{};
|
||||||
}
|
}
|
||||||
shm_unlink(shm_path.c_str());
|
shm_unlink(shm_path.c_str());
|
||||||
|
#else
|
||||||
|
madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
|
||||||
|
|
||||||
|
const VAddr start_addr = reinterpret_cast<VAddr>(virtual_base);
|
||||||
|
m_free_regions.insert({start_addr, start_addr + virtual_size});
|
||||||
|
|
||||||
|
backing_fd = memfd_create("BackingDmem", 0);
|
||||||
|
if (backing_fd < 0) {
|
||||||
|
LOG_CRITICAL(Kernel_Vmm, "memfd_create failed: {}", strerror(errno));
|
||||||
|
throw std::bad_alloc{};
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Defined to extend the file with zeros
|
// Defined to extend the file with zeros
|
||||||
|
@ -291,9 +356,6 @@ struct AddressSpace::Impl {
|
||||||
LOG_CRITICAL(Kernel_Vmm, "mmap failed: {}", strerror(errno));
|
LOG_CRITICAL(Kernel_Vmm, "mmap failed: {}", strerror(errno));
|
||||||
throw std::bad_alloc{};
|
throw std::bad_alloc{};
|
||||||
}
|
}
|
||||||
|
|
||||||
const VAddr start_addr = reinterpret_cast<VAddr>(virtual_base);
|
|
||||||
m_free_regions.insert({start_addr, start_addr + virtual_size});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, PosixPageProtection prot,
|
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, PosixPageProtection prot,
|
||||||
|
@ -346,16 +408,24 @@ struct AddressSpace::Impl {
|
||||||
|
|
||||||
int backing_fd;
|
int backing_fd;
|
||||||
u8* backing_base{};
|
u8* backing_base{};
|
||||||
u8* virtual_base{};
|
u8* system_managed_base{};
|
||||||
size_t virtual_size{};
|
size_t system_managed_size{};
|
||||||
|
u8* system_reserved_base{};
|
||||||
|
size_t system_reserved_size{};
|
||||||
|
u8* user_base{};
|
||||||
|
size_t user_size{};
|
||||||
boost::icl::interval_set<VAddr> m_free_regions;
|
boost::icl::interval_set<VAddr> m_free_regions;
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
AddressSpace::AddressSpace() : impl{std::make_unique<Impl>()} {
|
AddressSpace::AddressSpace() : impl{std::make_unique<Impl>()} {
|
||||||
virtual_base = impl->virtual_base;
|
|
||||||
backing_base = impl->backing_base;
|
backing_base = impl->backing_base;
|
||||||
virtual_size = impl->virtual_size;
|
system_managed_base = impl->system_managed_base;
|
||||||
|
system_managed_size = impl->system_managed_size;
|
||||||
|
system_reserved_base = impl->system_reserved_base;
|
||||||
|
system_reserved_size = impl->system_reserved_size;
|
||||||
|
user_base = impl->user_base;
|
||||||
|
user_size = impl->user_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
AddressSpace::~AddressSpace() = default;
|
AddressSpace::~AddressSpace() = default;
|
||||||
|
|
|
@ -18,18 +18,21 @@ enum class MemoryPermission : u32 {
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
|
||||||
|
|
||||||
constexpr VAddr SYSTEM_RESERVED = 0x800000000ULL;
|
|
||||||
constexpr VAddr CODE_BASE_OFFSET = 0x100000000ULL;
|
constexpr VAddr CODE_BASE_OFFSET = 0x100000000ULL;
|
||||||
|
|
||||||
constexpr VAddr SYSTEM_MANAGED_MIN = 0x00000400000ULL;
|
constexpr VAddr SYSTEM_MANAGED_MIN = 0x00000400000ULL;
|
||||||
constexpr VAddr SYSTEM_MANAGED_MAX = 0x07FFFFBFFFULL;
|
constexpr VAddr SYSTEM_MANAGED_MAX = 0x07FFFFBFFFULL;
|
||||||
|
constexpr VAddr SYSTEM_RESERVED_MIN = 0x800000000ULL;
|
||||||
|
constexpr VAddr SYSTEM_RESERVED_MAX = 0xFFFFFFFFFULL;
|
||||||
constexpr VAddr USER_MIN = 0x1000000000ULL;
|
constexpr VAddr USER_MIN = 0x1000000000ULL;
|
||||||
constexpr VAddr USER_MAX = 0xFBFFFFFFFFULL;
|
constexpr VAddr USER_MAX = 0xFBFFFFFFFFULL;
|
||||||
|
|
||||||
|
static constexpr size_t SystemManagedSize = SYSTEM_MANAGED_MAX - SYSTEM_MANAGED_MIN + 1;
|
||||||
|
static constexpr size_t SystemReservedSize = SYSTEM_RESERVED_MAX - SYSTEM_RESERVED_MIN + 1;
|
||||||
// User area size is normally larger than this. However games are unlikely to map to high
|
// User area size is normally larger than this. However games are unlikely to map to high
|
||||||
// regions of that area, so by default we allocate a smaller virtual address space (about 1/4th).
|
// regions of that area, so by default we allocate a smaller virtual address space (about 1/4th).
|
||||||
// to save space on page tables.
|
// to save space on page tables.
|
||||||
static constexpr size_t UserSize = 1ULL << 39;
|
static constexpr size_t UserSize = 1ULL << 39;
|
||||||
static constexpr size_t SystemSize = USER_MIN - SYSTEM_MANAGED_MIN;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents the user virtual address space backed by a dmem memory block
|
* Represents the user virtual address space backed by a dmem memory block
|
||||||
|
@ -39,14 +42,34 @@ public:
|
||||||
explicit AddressSpace();
|
explicit AddressSpace();
|
||||||
~AddressSpace();
|
~AddressSpace();
|
||||||
|
|
||||||
[[nodiscard]] VAddr VirtualBase() noexcept {
|
[[nodiscard]] VAddr SystemManagedVirtualBase() noexcept {
|
||||||
return reinterpret_cast<VAddr>(virtual_base);
|
return reinterpret_cast<VAddr>(system_managed_base);
|
||||||
}
|
}
|
||||||
[[nodiscard]] const u8* VirtualBase() const noexcept {
|
[[nodiscard]] const u8* SystemManagedVirtualBase() const noexcept {
|
||||||
return virtual_base;
|
return system_managed_base;
|
||||||
}
|
}
|
||||||
[[nodiscard]] size_t VirtualSize() const noexcept {
|
[[nodiscard]] size_t SystemManagedVirtualSize() const noexcept {
|
||||||
return virtual_size;
|
return system_managed_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] VAddr SystemReservedVirtualBase() noexcept {
|
||||||
|
return reinterpret_cast<VAddr>(system_reserved_base);
|
||||||
|
}
|
||||||
|
[[nodiscard]] const u8* SystemReservedVirtualBase() const noexcept {
|
||||||
|
return system_reserved_base;
|
||||||
|
}
|
||||||
|
[[nodiscard]] size_t SystemReservedVirtualSize() const noexcept {
|
||||||
|
return system_reserved_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] VAddr UserVirtualBase() noexcept {
|
||||||
|
return reinterpret_cast<VAddr>(user_base);
|
||||||
|
}
|
||||||
|
[[nodiscard]] const u8* UserVirtualBase() const noexcept {
|
||||||
|
return user_base;
|
||||||
|
}
|
||||||
|
[[nodiscard]] size_t UserVirtualSize() const noexcept {
|
||||||
|
return user_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -74,8 +97,12 @@ private:
|
||||||
struct Impl;
|
struct Impl;
|
||||||
std::unique_ptr<Impl> impl;
|
std::unique_ptr<Impl> impl;
|
||||||
u8* backing_base{};
|
u8* backing_base{};
|
||||||
u8* virtual_base{};
|
u8* system_managed_base{};
|
||||||
size_t virtual_size{};
|
size_t system_managed_size{};
|
||||||
|
u8* system_reserved_base{};
|
||||||
|
size_t system_reserved_size{};
|
||||||
|
u8* user_base{};
|
||||||
|
size_t user_size{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Core
|
} // namespace Core
|
||||||
|
|
|
@ -17,12 +17,21 @@ MemoryManager::MemoryManager() {
|
||||||
dmem_map.emplace(0, DirectMemoryArea{0, SCE_KERNEL_MAIN_DMEM_SIZE});
|
dmem_map.emplace(0, DirectMemoryArea{0, SCE_KERNEL_MAIN_DMEM_SIZE});
|
||||||
|
|
||||||
// Insert a virtual memory area that covers the entire area we manage.
|
// Insert a virtual memory area that covers the entire area we manage.
|
||||||
const VAddr virtual_base = impl.VirtualBase();
|
const VAddr system_managed_base = impl.SystemManagedVirtualBase();
|
||||||
const size_t virtual_size = impl.VirtualSize();
|
const size_t system_managed_size = impl.SystemManagedVirtualSize();
|
||||||
vma_map.emplace(virtual_base, VirtualMemoryArea{virtual_base, virtual_size});
|
const VAddr system_reserved_base = impl.SystemReservedVirtualBase();
|
||||||
|
const size_t system_reserved_size = impl.SystemReservedVirtualSize();
|
||||||
|
const VAddr user_base = impl.UserVirtualBase();
|
||||||
|
const size_t user_size = impl.UserVirtualSize();
|
||||||
|
vma_map.emplace(system_managed_base,
|
||||||
|
VirtualMemoryArea{system_managed_base, system_managed_size});
|
||||||
|
vma_map.emplace(system_reserved_base,
|
||||||
|
VirtualMemoryArea{system_reserved_base, system_reserved_size});
|
||||||
|
vma_map.emplace(user_base, VirtualMemoryArea{user_base, user_size});
|
||||||
|
|
||||||
// Log initialization.
|
// Log initialization.
|
||||||
LOG_INFO(Kernel_Vmm, "Usable memory address space {}_GB", virtual_size >> 30);
|
LOG_INFO(Kernel_Vmm, "Usable memory address space: {}_GB",
|
||||||
|
(system_managed_size + system_reserved_size + user_size) >> 30);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryManager::~MemoryManager() = default;
|
MemoryManager::~MemoryManager() = default;
|
||||||
|
@ -112,7 +121,7 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
||||||
|
|
||||||
// When virtual addr is zero, force it to virtual_base. The guest cannot pass Fixed
|
// When virtual addr is zero, force it to virtual_base. The guest cannot pass Fixed
|
||||||
// flag so we will take the branch that searches for free (or reserved) mappings.
|
// flag so we will take the branch that searches for free (or reserved) mappings.
|
||||||
virtual_addr = (virtual_addr == 0) ? impl.VirtualBase() : virtual_addr;
|
virtual_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr;
|
||||||
alignment = alignment > 0 ? alignment : 16_KB;
|
alignment = alignment > 0 ? alignment : 16_KB;
|
||||||
|
|
||||||
VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr;
|
VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr;
|
||||||
|
@ -166,7 +175,7 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
||||||
int MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
int MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
||||||
MemoryMapFlags flags, uintptr_t fd, size_t offset) {
|
MemoryMapFlags flags, uintptr_t fd, size_t offset) {
|
||||||
if (virtual_addr == 0) {
|
if (virtual_addr == 0) {
|
||||||
virtual_addr = impl.VirtualBase();
|
virtual_addr = impl.SystemManagedVirtualBase();
|
||||||
} else {
|
} else {
|
||||||
LOG_INFO(Kernel_Vmm, "Virtual addr {:#x} with size {:#x}", virtual_addr, size);
|
LOG_INFO(Kernel_Vmm, "Virtual addr {:#x} with size {:#x}", virtual_addr, size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,9 +132,10 @@ public:
|
||||||
return total_flexible_size - flexible_usage;
|
return total_flexible_size - flexible_usage;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the offset of the mapped virtual memory base from where it usually would be mapped.
|
/// Returns the offset of the mapped virtual system managed memory base from where it usually
|
||||||
[[nodiscard]] u64 VirtualOffset() noexcept {
|
/// would be mapped.
|
||||||
return impl.VirtualBase() - SYSTEM_MANAGED_MIN;
|
[[nodiscard]] VAddr SystemReservedVirtualBase() noexcept {
|
||||||
|
return impl.SystemReservedVirtualBase();
|
||||||
}
|
}
|
||||||
|
|
||||||
PAddr Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
PAddr Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
||||||
|
|
|
@ -16,7 +16,7 @@ namespace Core {
|
||||||
|
|
||||||
using EntryFunc = PS4_SYSV_ABI int (*)(size_t args, const void* argp, void* param);
|
using EntryFunc = PS4_SYSV_ABI int (*)(size_t args, const void* argp, void* param);
|
||||||
|
|
||||||
static u64 LoadAddress = SYSTEM_RESERVED + CODE_BASE_OFFSET;
|
static u64 LoadOffset = CODE_BASE_OFFSET;
|
||||||
static constexpr u64 CODE_BASE_INCR = 0x010000000u;
|
static constexpr u64 CODE_BASE_INCR = 0x010000000u;
|
||||||
|
|
||||||
static u64 GetAlignedSize(const elf_program_header& phdr) {
|
static u64 GetAlignedSize(const elf_program_header& phdr) {
|
||||||
|
@ -84,10 +84,10 @@ void Module::LoadModuleToMemory(u32& max_tls_index) {
|
||||||
|
|
||||||
// Map module segments (and possible TLS trampolines)
|
// Map module segments (and possible TLS trampolines)
|
||||||
void** out_addr = reinterpret_cast<void**>(&base_virtual_addr);
|
void** out_addr = reinterpret_cast<void**>(&base_virtual_addr);
|
||||||
memory->MapMemory(out_addr, memory->VirtualOffset() + LoadAddress,
|
memory->MapMemory(out_addr, memory->SystemReservedVirtualBase() + LoadOffset,
|
||||||
aligned_base_size + TrampolineSize, MemoryProt::CpuReadWrite,
|
aligned_base_size + TrampolineSize, MemoryProt::CpuReadWrite,
|
||||||
MemoryMapFlags::Fixed, VMAType::Code, name, true);
|
MemoryMapFlags::Fixed, VMAType::Code, name, true);
|
||||||
LoadAddress += CODE_BASE_INCR * (1 + aligned_base_size / CODE_BASE_INCR);
|
LoadOffset += CODE_BASE_INCR * (1 + aligned_base_size / CODE_BASE_INCR);
|
||||||
|
|
||||||
// Initialize trampoline generator.
|
// Initialize trampoline generator.
|
||||||
void* trampoline_addr = std::bit_cast<void*>(base_virtual_addr + aligned_base_size);
|
void* trampoline_addr = std::bit_cast<void*>(base_virtual_addr + aligned_base_size);
|
||||||
|
|
Loading…
Reference in New Issue