Merge 258e5f0ad9
into ad8373095a
This commit is contained in:
commit
5cf1e678ea
|
@ -1 +1 @@
|
||||||
Subproject commit d59c84d388c805022e2bddea08aa41cbe7e43e55
|
Subproject commit 4422273e8464d20b9d8dd403cbfc3049e09a5f23
|
|
@ -1 +1 @@
|
||||||
Subproject commit cc0bee4fd46ea1f5db147d63ea545208cc9e8405
|
Subproject commit 26d403e46102269e5314199cd313e82e4e17d99a
|
|
@ -1 +1 @@
|
||||||
Subproject commit aabb091ae37068498751fd58202a9854408ecb0e
|
Subproject commit a78d6bac84bc9f7c1ec3a797a84c4e9d1381a492
|
|
@ -15,6 +15,7 @@
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#endif
|
#endif
|
||||||
|
#include "libraries/error_codes.h"
|
||||||
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
// Reserve space for the system address space using a zerofill section.
|
// Reserve space for the system address space using a zerofill section.
|
||||||
|
@ -231,27 +232,36 @@ struct AddressSpace::Impl {
|
||||||
|
|
||||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||||
DWORD new_flags{};
|
DWORD new_flags{};
|
||||||
if (read && write) {
|
|
||||||
|
if (read && write && execute) {
|
||||||
|
new_flags = PAGE_EXECUTE_READWRITE;
|
||||||
|
} else if (read && write) {
|
||||||
new_flags = PAGE_READWRITE;
|
new_flags = PAGE_READWRITE;
|
||||||
} else if (read && !write) {
|
} else if (read && !write) {
|
||||||
new_flags = PAGE_READONLY;
|
new_flags = PAGE_READONLY;
|
||||||
} else if (!read && !write) {
|
} else if (execute && !read && not write) {
|
||||||
|
new_flags = PAGE_EXECUTE;
|
||||||
|
} else if (!read && !write && !execute) {
|
||||||
new_flags = PAGE_NOACCESS;
|
new_flags = PAGE_NOACCESS;
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write);
|
LOG_CRITICAL(Common_Memory,
|
||||||
|
"Unsupported protection flag combination for address {:#x}, size {}",
|
||||||
|
virtual_addr, size);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const VAddr virtual_end = virtual_addr + size;
|
DWORD old_flags{};
|
||||||
auto [it, end] = placeholders.equal_range({virtual_addr, virtual_end});
|
bool success =
|
||||||
while (it != end) {
|
VirtualProtect(reinterpret_cast<void*>(virtual_addr), size, new_flags, &old_flags);
|
||||||
const size_t offset = std::max(it->lower(), virtual_addr);
|
|
||||||
const size_t protect_length = std::min(it->upper(), virtual_end) - offset;
|
if (!success) {
|
||||||
DWORD old_flags{};
|
LOG_ERROR(Common_Memory,
|
||||||
if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) {
|
"Failed to change virtual memory protection for address {:#x}, size {}",
|
||||||
LOG_CRITICAL(Common_Memory, "Failed to change virtual memory protect rules");
|
virtual_addr, size);
|
||||||
}
|
|
||||||
++it;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use assert to ensure success in debug builds
|
||||||
|
assert(success && "Failed to change virtual memory protection");
|
||||||
}
|
}
|
||||||
|
|
||||||
HANDLE process{};
|
HANDLE process{};
|
||||||
|
@ -493,7 +503,13 @@ void AddressSpace::Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VA
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
||||||
return impl->Protect(virtual_addr, size, true, true, true);
|
const bool read = True(perms & MemoryPermission::Read);
|
||||||
|
|
||||||
|
const bool write = True(perms & MemoryPermission::Write);
|
||||||
|
|
||||||
|
const bool execute = True(perms & MemoryPermission::Execute);
|
||||||
|
|
||||||
|
return impl->Protect(virtual_addr, size, read, write, execute);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Core
|
} // namespace Core
|
||||||
|
|
|
@ -454,6 +454,8 @@ void LibKernel_Register(Core::Loader::SymbolsResolver* sym) {
|
||||||
LIB_FUNCTION("F6e0kwo4cnk", "libkernel", 1, "libkernel", 1, 1, sceKernelTriggerUserEvent);
|
LIB_FUNCTION("F6e0kwo4cnk", "libkernel", 1, "libkernel", 1, 1, sceKernelTriggerUserEvent);
|
||||||
LIB_FUNCTION("LJDwdSNTnDg", "libkernel", 1, "libkernel", 1, 1, sceKernelDeleteUserEvent);
|
LIB_FUNCTION("LJDwdSNTnDg", "libkernel", 1, "libkernel", 1, 1, sceKernelDeleteUserEvent);
|
||||||
LIB_FUNCTION("mJ7aghmgvfc", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventId);
|
LIB_FUNCTION("mJ7aghmgvfc", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventId);
|
||||||
|
LIB_FUNCTION("9bfdLIyuwCY", "libkernel", 1, "libkernel", 1, 1, sceKernelMTypeProtect);
|
||||||
|
LIB_FUNCTION("vSMAm3cxYTY", "libkernel", 1, "libkernel", 1, 1, sceKernelMProtect);
|
||||||
LIB_FUNCTION("23CPPI1tyBY", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventFilter);
|
LIB_FUNCTION("23CPPI1tyBY", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventFilter);
|
||||||
|
|
||||||
// misc
|
// misc
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/singleton.h"
|
#include "common/singleton.h"
|
||||||
|
#include "core/address_space.h"
|
||||||
#include "core/libraries/error_codes.h"
|
#include "core/libraries/error_codes.h"
|
||||||
#include "core/libraries/kernel/memory_management.h"
|
#include "core/libraries/kernel/memory_management.h"
|
||||||
#include "core/linker.h"
|
#include "core/linker.h"
|
||||||
|
@ -216,6 +217,19 @@ int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void**
|
||||||
return memory->QueryProtection(std::bit_cast<VAddr>(addr), start, end, prot);
|
return memory->QueryProtection(std::bit_cast<VAddr>(addr), start, end, prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMProtect(const void* addr, size_t size, int prot) {
|
||||||
|
Core::MemoryManager* memory_manager = Core::Memory::Instance();
|
||||||
|
Core::MemoryProt protection_flags = static_cast<Core::MemoryProt>(prot);
|
||||||
|
return memory_manager->Protect(std::bit_cast<VAddr>(addr), size, protection_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMTypeProtect(const void* addr, size_t size, int mtype, int prot) {
|
||||||
|
Core::MemoryManager* memory_manager = Core::Memory::Instance();
|
||||||
|
Core::MemoryProt protection_flags = static_cast<Core::MemoryProt>(prot);
|
||||||
|
return memory_manager->MTypeProtect(std::bit_cast<VAddr>(addr), size,
|
||||||
|
static_cast<Core::VMAType>(mtype), protection_flags);
|
||||||
|
}
|
||||||
|
|
||||||
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
||||||
size_t infoSize) {
|
size_t infoSize) {
|
||||||
LOG_WARNING(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags);
|
LOG_WARNING(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags);
|
||||||
|
@ -282,6 +296,29 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn
|
||||||
|
|
||||||
if (result == 0)
|
if (result == 0)
|
||||||
processed++;
|
processed++;
|
||||||
|
} else if (entries[i].operation == MemoryOpTypes::ORBIS_KERNEL_MAP_OP_PROTECT) {
|
||||||
|
result = sceKernelMProtect(entries[i].start, entries[i].length, entries[i].protection);
|
||||||
|
LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}",
|
||||||
|
i, entries[i].operation, entries[i].length, result);
|
||||||
|
if (result != ORBIS_OK) {
|
||||||
|
LOG_ERROR(Kernel_Vmm, "BatchMap: MProtect failed on entry {} with result {}", i,
|
||||||
|
result);
|
||||||
|
}
|
||||||
|
if (result == 0) {
|
||||||
|
processed++;
|
||||||
|
}
|
||||||
|
} else if (entries[i].operation == MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT) {
|
||||||
|
result = sceKernelMTypeProtect(entries[i].start, entries[i].length, entries[i].type,
|
||||||
|
entries[i].protection);
|
||||||
|
LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}",
|
||||||
|
i, entries[i].operation, entries[i].length, result);
|
||||||
|
if (result != ORBIS_OK) {
|
||||||
|
LOG_ERROR(Kernel_Vmm, "BatchMap: MProtect failed on entry {} with result {}", i,
|
||||||
|
result);
|
||||||
|
}
|
||||||
|
if (result == 0) {
|
||||||
|
processed++;
|
||||||
|
}
|
||||||
} else if (entries[i].operation == MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE) {
|
} else if (entries[i].operation == MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE) {
|
||||||
result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length,
|
result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length,
|
||||||
entries[i].protection, flags, "");
|
entries[i].protection, flags, "");
|
||||||
|
@ -296,6 +333,8 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn
|
||||||
UNREACHABLE_MSG("called: Unimplemented Operation = {}", entries[i].operation);
|
UNREACHABLE_MSG("called: Unimplemented Operation = {}", entries[i].operation);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
LOG_INFO(Kernel_Vmm, "sceKernelBatchMap2 finished: processed = {}, result = {}", processed,
|
||||||
|
result);
|
||||||
if (numEntriesOut != NULL) { // can be zero. do not return an error code.
|
if (numEntriesOut != NULL) { // can be zero. do not return an error code.
|
||||||
*numEntriesOut = processed;
|
*numEntriesOut = processed;
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,6 +95,10 @@ s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len,
|
||||||
int flags);
|
int flags);
|
||||||
int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot);
|
int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot);
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMProtect(const void* addr, size_t size, int prot);
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMTypeProtect(const void* addr, size_t size, int mtype, int prot);
|
||||||
|
|
||||||
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
||||||
size_t infoSize);
|
size_t infoSize);
|
||||||
s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* sizeOut);
|
s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* sizeOut);
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include "core/libraries/error_codes.h"
|
#include "core/libraries/error_codes.h"
|
||||||
#include "core/libraries/kernel/memory_management.h"
|
#include "core/libraries/kernel/memory_management.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
@ -269,6 +270,118 @@ int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* pr
|
||||||
return ORBIS_OK;
|
return ORBIS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
// Find the virtual memory area that contains the specified address range.
|
||||||
|
auto it = FindVMA(addr);
|
||||||
|
if (it == vma_map.end() || !it->second.Contains(addr, size)) {
|
||||||
|
LOG_ERROR(Core, "Address range not mapped");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualMemoryArea& vma = it->second;
|
||||||
|
if (vma.type == VMAType::Free) {
|
||||||
|
LOG_ERROR(Core, "Cannot change protection on free memory region");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate protection flags
|
||||||
|
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
|
||||||
|
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
|
||||||
|
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
|
||||||
|
|
||||||
|
MemoryProt invalid_flags = prot & ~valid_flags;
|
||||||
|
if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) {
|
||||||
|
LOG_ERROR(Core, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", u32(prot),
|
||||||
|
invalid_flags);
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change protection
|
||||||
|
vma.prot = prot;
|
||||||
|
|
||||||
|
// Set permissions
|
||||||
|
Core::MemoryPermission perms{};
|
||||||
|
|
||||||
|
if (True(prot & MemoryProt::CpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::CpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::Write;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl.Protect(addr, size, perms);
|
||||||
|
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MemoryManager::MTypeProtect(VAddr addr, size_t size, VMAType mtype, MemoryProt prot) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
// Find the virtual memory area that contains the specified address range.
|
||||||
|
auto it = FindVMA(addr);
|
||||||
|
if (it == vma_map.end() || !it->second.Contains(addr, size)) {
|
||||||
|
LOG_ERROR(Core, "Address range not mapped");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualMemoryArea& vma = it->second;
|
||||||
|
|
||||||
|
if (vma.type == VMAType::Free) {
|
||||||
|
LOG_ERROR(Core, "Cannot change protection on free memory region");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate protection flags
|
||||||
|
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
|
||||||
|
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
|
||||||
|
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
|
||||||
|
|
||||||
|
MemoryProt invalid_flags = prot & ~valid_flags;
|
||||||
|
if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) {
|
||||||
|
LOG_ERROR(Core, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", u32(prot),
|
||||||
|
invalid_flags);
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change type and protection
|
||||||
|
vma.type = mtype;
|
||||||
|
vma.prot = prot;
|
||||||
|
|
||||||
|
// Set permissions
|
||||||
|
Core::MemoryPermission perms{};
|
||||||
|
|
||||||
|
if (True(prot & MemoryProt::CpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::CpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::Write;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl.Protect(addr, size, perms);
|
||||||
|
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
||||||
::Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
::Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
||||||
std::scoped_lock lk{mutex};
|
std::scoped_lock lk{mutex};
|
||||||
|
|
|
@ -30,6 +30,7 @@ enum class MemoryProt : u32 {
|
||||||
GpuWrite = 32,
|
GpuWrite = 32,
|
||||||
GpuReadWrite = 38,
|
GpuReadWrite = 38,
|
||||||
};
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryProt)
|
||||||
|
|
||||||
enum class MemoryMapFlags : u32 {
|
enum class MemoryMapFlags : u32 {
|
||||||
NoFlags = 0,
|
NoFlags = 0,
|
||||||
|
@ -161,9 +162,14 @@ public:
|
||||||
|
|
||||||
int QueryProtection(VAddr addr, void** start, void** end, u32* prot);
|
int QueryProtection(VAddr addr, void** start, void** end, u32* prot);
|
||||||
|
|
||||||
int VirtualQuery(VAddr addr, int flags, Libraries::Kernel::OrbisVirtualQueryInfo* info);
|
int Protect(VAddr addr, size_t size, MemoryProt prot);
|
||||||
|
|
||||||
int DirectMemoryQuery(PAddr addr, bool find_next, Libraries::Kernel::OrbisQueryInfo* out_info);
|
int MTypeProtect(VAddr addr, size_t size, VMAType mtype, MemoryProt prot);
|
||||||
|
|
||||||
|
int VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info);
|
||||||
|
|
||||||
|
int DirectMemoryQuery(PAddr addr, bool find_next,
|
||||||
|
::Libraries::Kernel::OrbisQueryInfo* out_info);
|
||||||
|
|
||||||
int DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment,
|
int DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment,
|
||||||
PAddr* phys_addr_out, size_t* size_out);
|
PAddr* phys_addr_out, size_t* size_out);
|
||||||
|
|
Loading…
Reference in New Issue