Address review comments around memory and patches.
This commit is contained in:
parent
b3d97dcd89
commit
f943ce2710
|
@ -58,20 +58,28 @@ struct AddressSpace::Impl {
|
||||||
static constexpr size_t ReductionOnFail = 1_GB;
|
static constexpr size_t ReductionOnFail = 1_GB;
|
||||||
static constexpr size_t MaxReductions = 10;
|
static constexpr size_t MaxReductions = 10;
|
||||||
|
|
||||||
system_managed_size = SystemManagedSize;
|
size_t reduction = 0;
|
||||||
system_reserved_size = SystemReservedSize + ReductionOnFail;
|
for (u32 i = 0; i < MaxReductions; i++) {
|
||||||
user_size = UserSize;
|
virtual_base = static_cast<u8*>(VirtualAlloc2(
|
||||||
for (u32 i = 0; i < MaxReductions && !virtual_base; i++) {
|
process, NULL, SystemManagedSize + SystemReservedSize + UserSize - reduction,
|
||||||
system_reserved_size -= ReductionOnFail;
|
|
||||||
virtual_base = static_cast<u8*>(
|
|
||||||
VirtualAlloc2(process, NULL, system_managed_size + system_reserved_size + user_size,
|
|
||||||
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, ¶m, 1));
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, ¶m, 1));
|
||||||
|
if (virtual_base) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
reduction += ReductionOnFail;
|
||||||
}
|
}
|
||||||
ASSERT_MSG(virtual_base, "Unable to reserve virtual address space!");
|
ASSERT_MSG(virtual_base, "Unable to reserve virtual address space!");
|
||||||
|
|
||||||
system_managed_base = virtual_base;
|
system_managed_base = virtual_base;
|
||||||
system_reserved_base = virtual_base + system_managed_size;
|
system_managed_size = SystemManagedSize - reduction;
|
||||||
user_base = system_reserved_base + system_reserved_size;
|
system_reserved_base =
|
||||||
|
virtual_base + (SYSTEM_RESERVED_MIN - SYSTEM_MANAGED_MIN) - reduction;
|
||||||
|
system_reserved_size = SystemReservedSize;
|
||||||
|
user_base = virtual_base + (USER_MIN - SYSTEM_MANAGED_MIN) - reduction;
|
||||||
|
user_size = UserSize;
|
||||||
|
|
||||||
|
ASSERT_MSG(user_base == reinterpret_cast<u8*>(USER_MIN),
|
||||||
|
"Unexpected user address space location: {}", fmt::ptr(user_base));
|
||||||
|
|
||||||
LOG_INFO(Kernel_Vmm, "System managed virtual memory region: {} - {}",
|
LOG_INFO(Kernel_Vmm, "System managed virtual memory region: {} - {}",
|
||||||
fmt::ptr(system_managed_base),
|
fmt::ptr(system_managed_base),
|
||||||
|
@ -277,23 +285,24 @@ struct AddressSpace::Impl {
|
||||||
system_managed_size = SystemManagedSize;
|
system_managed_size = SystemManagedSize;
|
||||||
system_reserved_size = SystemReservedSize;
|
system_reserved_size = SystemReservedSize;
|
||||||
user_size = UserSize;
|
user_size = UserSize;
|
||||||
|
|
||||||
|
constexpr int protection_flags = PROT_READ | PROT_WRITE;
|
||||||
|
constexpr int base_map_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
system_managed_base = reinterpret_cast<u8*>(
|
system_managed_base = reinterpret_cast<u8*>(
|
||||||
mmap(reinterpret_cast<void*>(SYSTEM_MANAGED_MIN), system_managed_size,
|
mmap(reinterpret_cast<void*>(SYSTEM_MANAGED_MIN), system_managed_size, protection_flags,
|
||||||
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
|
base_map_flags | MAP_FIXED, -1, 0));
|
||||||
-1, 0));
|
|
||||||
// Cannot guarantee enough space for these areas at the desired addresses, so not MAP_FIXED.
|
// Cannot guarantee enough space for these areas at the desired addresses, so not MAP_FIXED.
|
||||||
system_reserved_base = reinterpret_cast<u8*>(
|
system_reserved_base = reinterpret_cast<u8*>(
|
||||||
mmap(reinterpret_cast<void*>(SYSTEM_RESERVED_MIN), system_reserved_size,
|
mmap(reinterpret_cast<void*>(SYSTEM_RESERVED_MIN), system_reserved_size,
|
||||||
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0));
|
protection_flags, base_map_flags, -1, 0));
|
||||||
user_base = reinterpret_cast<u8*>(mmap(reinterpret_cast<void*>(USER_MIN), user_size,
|
user_base = reinterpret_cast<u8*>(mmap(reinterpret_cast<void*>(USER_MIN), user_size,
|
||||||
PROT_READ | PROT_WRITE,
|
protection_flags, base_map_flags, -1, 0));
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0));
|
|
||||||
#else
|
#else
|
||||||
const auto virtual_size = system_managed_size + system_reserved_size + user_size;
|
const auto virtual_size = system_managed_size + system_reserved_size + user_size;
|
||||||
const auto virtual_base = reinterpret_cast<u8*>(
|
const auto virtual_base =
|
||||||
mmap(reinterpret_cast<void*>(SYSTEM_MANAGED_MIN), virtual_size, PROT_READ | PROT_WRITE,
|
reinterpret_cast<u8*>(mmap(reinterpret_cast<void*>(SYSTEM_MANAGED_MIN), virtual_size,
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1, 0));
|
protection_flags, base_map_flags | MAP_FIXED, -1, 0));
|
||||||
system_managed_base = virtual_base;
|
system_managed_base = virtual_base;
|
||||||
system_managed_base = virtual_base + (SYSTEM_RESERVED_MIN - SYSTEM_MANAGED_MIN);
|
system_managed_base = virtual_base + (SYSTEM_RESERVED_MIN - SYSTEM_MANAGED_MIN);
|
||||||
user_base = virtual_base + (USER_MIN - SYSTEM_MANAGED_MIN);
|
user_base = virtual_base + (USER_MIN - SYSTEM_MANAGED_MIN);
|
||||||
|
@ -331,9 +340,6 @@ struct AddressSpace::Impl {
|
||||||
#else
|
#else
|
||||||
madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
|
madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
|
||||||
|
|
||||||
const VAddr start_addr = reinterpret_cast<VAddr>(virtual_base);
|
|
||||||
m_free_regions.insert({start_addr, start_addr + virtual_size});
|
|
||||||
|
|
||||||
backing_fd = memfd_create("BackingDmem", 0);
|
backing_fd = memfd_create("BackingDmem", 0);
|
||||||
if (backing_fd < 0) {
|
if (backing_fd < 0) {
|
||||||
LOG_CRITICAL(Kernel_Vmm, "memfd_create failed: {}", strerror(errno));
|
LOG_CRITICAL(Kernel_Vmm, "memfd_create failed: {}", strerror(errno));
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
using namespace Xbyak::util;
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
|
||||||
static Xbyak::Reg ZydisToXbyakRegister(const ZydisRegister reg) {
|
static Xbyak::Reg ZydisToXbyakRegister(const ZydisRegister reg) {
|
||||||
|
@ -54,7 +56,7 @@ static Xbyak::Address ZydisToXbyakMemoryOperand(const ZydisDecodedOperand& opera
|
||||||
expression = expression + operand.mem.disp.value;
|
expression = expression + operand.mem.disp.value;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Xbyak::util::ptr[expression];
|
return ptr[expression];
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::unique_ptr<Xbyak::Operand> ZydisToXbyakOperand(const ZydisDecodedOperand& operand) {
|
static std::unique_ptr<Xbyak::Operand> ZydisToXbyakOperand(const ZydisDecodedOperand& operand) {
|
||||||
|
@ -125,8 +127,8 @@ static void SaveRegisters(Xbyak::CodeGenerator& c, const std::initializer_list<X
|
||||||
for (const auto& reg : regs) {
|
for (const auto& reg : regs) {
|
||||||
const auto offset = reinterpret_cast<void*>(register_save_slots[index++] * sizeof(void*));
|
const auto offset = reinterpret_cast<void*>(register_save_slots[index++] * sizeof(void*));
|
||||||
|
|
||||||
c.putSeg(Xbyak::util::gs);
|
c.putSeg(gs);
|
||||||
c.mov(Xbyak::util::qword[offset], reg.cvt64());
|
c.mov(qword[offset], reg.cvt64());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,8 +143,8 @@ static void RestoreRegisters(Xbyak::CodeGenerator& c,
|
||||||
for (const auto& reg : regs) {
|
for (const auto& reg : regs) {
|
||||||
const auto offset = reinterpret_cast<void*>(register_save_slots[index++] * sizeof(void*));
|
const auto offset = reinterpret_cast<void*>(register_save_slots[index++] * sizeof(void*));
|
||||||
|
|
||||||
c.putSeg(Xbyak::util::gs);
|
c.putSeg(gs);
|
||||||
c.mov(reg.cvt64(), Xbyak::util::qword[offset]);
|
c.mov(reg.cvt64(), qword[offset]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,20 +279,20 @@ static void GenerateTcbAccess(const ZydisDecodedOperand* operands, Xbyak::CodeGe
|
||||||
const u32 tls_index = slot < TlsMinimumAvailable ? slot : slot - TlsMinimumAvailable;
|
const u32 tls_index = slot < TlsMinimumAvailable ? slot : slot - TlsMinimumAvailable;
|
||||||
|
|
||||||
// Load the pointer to the table of TLS slots.
|
// Load the pointer to the table of TLS slots.
|
||||||
c.putSeg(Xbyak::util::gs);
|
c.putSeg(gs);
|
||||||
c.mov(dst, Xbyak::util::ptr[reinterpret_cast<void*>(teb_offset)]);
|
c.mov(dst, ptr[reinterpret_cast<void*>(teb_offset)]);
|
||||||
// Load the pointer to our buffer.
|
// Load the pointer to our buffer.
|
||||||
c.mov(dst, Xbyak::util::qword[dst + tls_index * sizeof(LPVOID)]);
|
c.mov(dst, qword[dst + tls_index * sizeof(LPVOID)]);
|
||||||
#elif defined(__APPLE__)
|
#elif defined(__APPLE__)
|
||||||
// The following logic is based on the Darwin implementation of _os_tsd_get_direct, used by
|
// The following logic is based on the Darwin implementation of _os_tsd_get_direct, used by
|
||||||
// pthread_getspecific https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L89-L96
|
// pthread_getspecific https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L89-L96
|
||||||
c.putSeg(Xbyak::util::gs);
|
c.putSeg(gs);
|
||||||
c.mov(dst, Xbyak::util::qword[reinterpret_cast<void*>(slot * sizeof(void*))]);
|
c.mov(dst, qword[reinterpret_cast<void*>(slot * sizeof(void*))]);
|
||||||
#else
|
#else
|
||||||
const auto src = ZydisToXbyakMemoryOperand(operands[1]);
|
const auto src = ZydisToXbyakMemoryOperand(operands[1]);
|
||||||
|
|
||||||
// Replace fs read with gs read.
|
// Replace fs read with gs read.
|
||||||
c.putSeg(Xbyak::util::gs);
|
c.putSeg(gs);
|
||||||
c.mov(dst, src);
|
c.mov(dst, src);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue