core: Implement new memory manager (#133)
* core: Implement new memory manager * ci: Attempt to fix linux build * code: Fix a few build errors
This commit is contained in:
parent
67f6d8b2e4
commit
55855b4195
|
@ -7,6 +7,7 @@ if [[ -z $GITHUB_WORKSPACE ]]; then
|
|||
GITHUB_WORKSPACE="${PWD%/*}"
|
||||
fi
|
||||
|
||||
export Qt6_DIR="/usr/lib/qt6"
|
||||
export PATH="$Qt6_DIR/bin:$PATH"
|
||||
|
||||
# Prepare Tools for building the AppImage
|
||||
|
|
|
@ -12,11 +12,10 @@ on:
|
|||
env:
|
||||
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
|
||||
BUILD_TYPE: Release
|
||||
CLANG_VER: 17
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
@ -25,29 +24,10 @@ jobs:
|
|||
|
||||
- name: Install misc packages
|
||||
run: >
|
||||
sudo apt-get update && sudo apt install libx11-dev libgl1-mesa-glx mesa-common-dev libfuse2
|
||||
libwayland-dev libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-icccm4 libxcb-image0-dev
|
||||
libxcb-cursor-dev libxxhash-dev libvulkan-dev
|
||||
|
||||
- name: Install newer Clang
|
||||
run: |
|
||||
wget https://apt.llvm.org/llvm.sh
|
||||
chmod +x ./llvm.sh
|
||||
sudo ./llvm.sh ${{env.CLANG_VER}}
|
||||
|
||||
- name: Install Qt
|
||||
uses: jurplel/install-qt-action@v3
|
||||
with:
|
||||
version: 6.7.0
|
||||
host: linux
|
||||
target: desktop
|
||||
#arch: clang++-17
|
||||
dir: ${{ runner.temp }}
|
||||
#modules: qtcharts qt3d
|
||||
setup-python: false
|
||||
sudo apt-get update && sudo apt install libx11-dev libxext-dev libwayland-dev libfuse2 clang build-essential qt6-base-dev
|
||||
|
||||
- name: Configure CMake
|
||||
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_C_COMPILER=clang-${{env.CLANG_VER}} -DCMAKE_CXX_COMPILER=clang++-${{env.CLANG_VER}} -DENABLE_QT_GUI=ON
|
||||
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DENABLE_QT_GUI=ON
|
||||
|
||||
- name: Build
|
||||
run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} --parallel
|
||||
|
|
|
@ -13,11 +13,10 @@ on:
|
|||
env:
|
||||
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
|
||||
BUILD_TYPE: Release
|
||||
CLANG_VER: 17
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
@ -26,16 +25,10 @@ jobs:
|
|||
|
||||
- name: Install misc packages
|
||||
run: >
|
||||
sudo apt-get update && sudo apt install libx11-dev libgl1-mesa-glx mesa-common-dev libfuse2 libwayland-dev libxxhash-dev libvulkan-dev
|
||||
|
||||
- name: Install newer Clang
|
||||
run: |
|
||||
wget https://apt.llvm.org/llvm.sh
|
||||
chmod +x ./llvm.sh
|
||||
sudo ./llvm.sh ${{env.CLANG_VER}}
|
||||
sudo apt-get update && sudo apt install libx11-dev libxext-dev libwayland-dev libfuse2 clang build-essential
|
||||
|
||||
- name: Configure CMake
|
||||
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_C_COMPILER=clang-${{env.CLANG_VER}} -DCMAKE_CXX_COMPILER=clang++-${{env.CLANG_VER}}
|
||||
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++
|
||||
|
||||
- name: Build
|
||||
run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} --parallel
|
||||
|
|
|
@ -101,11 +101,7 @@ set(GNM_LIB src/core/libraries/gnmdriver/gnmdriver.cpp
|
|||
src/core/libraries/gnmdriver/gnmdriver.h
|
||||
)
|
||||
|
||||
set(KERNEL_LIB src/core/libraries/kernel/memory/flexible_memory.cpp
|
||||
src/core/libraries/kernel/memory/flexible_memory.h
|
||||
src/core/libraries/kernel/memory/kernel_memory.cpp
|
||||
src/core/libraries/kernel/memory/kernel_memory.h
|
||||
src/core/libraries/kernel/cpu_management.cpp
|
||||
set(KERNEL_LIB src/core/libraries/kernel/cpu_management.cpp
|
||||
src/core/libraries/kernel/cpu_management.h
|
||||
src/core/libraries/kernel/event_queue.cpp
|
||||
src/core/libraries/kernel/event_queue.h
|
||||
|
@ -244,6 +240,8 @@ set(CORE src/core/aerolib/stubs.cpp
|
|||
src/core/aerolib/stubs.h
|
||||
src/core/aerolib/aerolib.cpp
|
||||
src/core/aerolib/aerolib.h
|
||||
src/core/address_space.cpp
|
||||
src/core/address_space.h
|
||||
src/core/crypto/crypto.cpp
|
||||
src/core/crypto/crypto.h
|
||||
src/core/crypto/keys.h
|
||||
|
@ -277,6 +275,8 @@ set(CORE src/core/aerolib/stubs.cpp
|
|||
${MISC_LIBS}
|
||||
src/core/linker.cpp
|
||||
src/core/linker.h
|
||||
src/core/memory.cpp
|
||||
src/core/memory.h
|
||||
src/core/tls.cpp
|
||||
src/core/tls.h
|
||||
src/core/virtual_memory.cpp
|
||||
|
@ -382,6 +382,7 @@ else()
|
|||
src/main.cpp
|
||||
src/sdl_window.h
|
||||
src/sdl_window.cpp
|
||||
src/common/scope_exit.h
|
||||
)
|
||||
endif()
|
||||
|
||||
|
@ -414,8 +415,14 @@ if (WIN32)
|
|||
endif()
|
||||
# Target Windows 10 RS5
|
||||
add_definitions(-DNTDDI_VERSION=0x0A000006 -D_WIN32_WINNT=0x0A00 -DWINVER=0x0A00)
|
||||
# Increase stack,commit
|
||||
# Increase stack commit area
|
||||
target_link_options(shadps4 PRIVATE /STACK:0x200000,0x200000)
|
||||
# Disable ASLR so we can reserve the user area
|
||||
if (MSVC)
|
||||
target_link_options(shadps4 PRIVATE /DYNAMICBASE:NO)
|
||||
else()
|
||||
target_link_options(shadps4 PRIVATE -Wl,--disable-dynamicbase)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (WIN32)
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit dfb313f8357b8f6601fa7420be1a39a51ba86f77
|
||||
Subproject commit 2dd57a940b6d1b733cbd1abbc3f842da476d3d48
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
void assert_fail_impl() {
|
||||
Common::Log::Stop();
|
||||
std::fflush(stdout);
|
||||
Crash();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace detail {
|
||||
template <class F>
|
||||
class ScopeGuard {
|
||||
private:
|
||||
F f;
|
||||
bool active;
|
||||
|
||||
public:
|
||||
constexpr ScopeGuard(F f_) : f(std::move(f_)), active(true) {}
|
||||
constexpr ~ScopeGuard() {
|
||||
if (active) {
|
||||
f();
|
||||
}
|
||||
}
|
||||
constexpr void Cancel() {
|
||||
active = false;
|
||||
}
|
||||
|
||||
constexpr ScopeGuard(ScopeGuard&& rhs) : f(std::move(rhs.f)), active(rhs.active) {
|
||||
rhs.Cancel();
|
||||
}
|
||||
|
||||
ScopeGuard& operator=(ScopeGuard&& rhs) = delete;
|
||||
};
|
||||
|
||||
template <class F>
|
||||
constexpr ScopeGuard<F> MakeScopeGuard(F f) {
|
||||
return ScopeGuard<F>(std::move(f));
|
||||
}
|
||||
|
||||
enum class ScopeGuardOnExit {};
|
||||
|
||||
template <typename F>
|
||||
constexpr ScopeGuard<F> operator+(ScopeGuardOnExit, F&& f) {
|
||||
return ScopeGuard<F>(std::forward<F>(f));
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
#define CONCATENATE_IMPL(s1, s2) s1##s2
|
||||
#define CONCATENATE(s1, s2) CONCATENATE_IMPL(s1, s2)
|
||||
|
||||
#ifdef __COUNTER__
|
||||
#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __COUNTER__)
|
||||
#else
|
||||
#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __LINE__)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* This macro is similar to SCOPE_EXIT, except the object is caller managed. This is intended to be
|
||||
* used when the caller might want to cancel the ScopeExit.
|
||||
*/
|
||||
#define SCOPE_GUARD detail::ScopeGuardOnExit() + [&]()
|
||||
|
||||
/**
|
||||
* This macro allows you to conveniently specify a block of code that will run on scope exit. Handy
|
||||
* for doing ad-hoc clean-up tasks in a function with multiple returns.
|
||||
*
|
||||
* Example usage:
|
||||
* \code
|
||||
* const int saved_val = g_foo;
|
||||
* g_foo = 55;
|
||||
* SCOPE_EXIT{ g_foo = saved_val; };
|
||||
*
|
||||
* if (Bar()) {
|
||||
* return 0;
|
||||
* } else {
|
||||
* return 20;
|
||||
* }
|
||||
* \endcode
|
||||
*/
|
||||
#define SCOPE_EXIT auto ANONYMOUS_VARIABLE(SCOPE_EXIT_STATE_) = SCOPE_GUARD
|
|
@ -0,0 +1,274 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <boost/icl/separate_interval_set.hpp>
|
||||
#include "common/assert.h"
|
||||
#include "common/error.h"
|
||||
#include "core/address_space.h"
|
||||
#include "core/libraries/kernel/memory_management.h"
|
||||
#include "core/virtual_memory.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
namespace Core {
|
||||
|
||||
static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE;
|
||||
static constexpr size_t VirtualSize = USER_MAX - USER_MIN + 1;
|
||||
|
||||
#ifdef _WIN32
|
||||
struct AddressSpace::Impl {
|
||||
Impl() : process{GetCurrentProcess()} {
|
||||
// Allocate backing file that represents the total physical memory.
|
||||
backing_handle =
|
||||
CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ,
|
||||
PAGE_READWRITE, SEC_COMMIT, BackingSize, nullptr, nullptr, 0);
|
||||
ASSERT(backing_handle);
|
||||
// Allocate a virtual memory for the backing file map as placeholder
|
||||
backing_base = static_cast<u8*>(VirtualAlloc2(process, nullptr, BackingSize,
|
||||
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
||||
PAGE_NOACCESS, nullptr, 0));
|
||||
// Map backing placeholder. This will commit the pages
|
||||
void* const ret = MapViewOfFile3(backing_handle, process, backing_base, 0, BackingSize,
|
||||
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0);
|
||||
ASSERT(ret == backing_base);
|
||||
// Allocate virtual address placeholder for our address space.
|
||||
MEM_ADDRESS_REQUIREMENTS req{};
|
||||
MEM_EXTENDED_PARAMETER param{};
|
||||
req.LowestStartingAddress = reinterpret_cast<PVOID>(USER_MIN);
|
||||
req.HighestEndingAddress = reinterpret_cast<PVOID>(USER_MAX);
|
||||
req.Alignment = 0;
|
||||
param.Type = MemExtendedParameterAddressRequirements;
|
||||
param.Pointer = &req;
|
||||
virtual_base = static_cast<u8*>(VirtualAlloc2(process, nullptr, VirtualSize,
|
||||
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
||||
PAGE_NOACCESS, ¶m, 1));
|
||||
ASSERT(virtual_base);
|
||||
|
||||
const uintptr_t virtual_addr = reinterpret_cast<uintptr_t>(virtual_base);
|
||||
placeholders.insert({virtual_addr, virtual_addr + VirtualSize});
|
||||
}
|
||||
|
||||
~Impl() {
|
||||
if (virtual_base) {
|
||||
if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) {
|
||||
LOG_CRITICAL(Render, "Failed to free virtual memory");
|
||||
}
|
||||
}
|
||||
if (backing_base) {
|
||||
if (!UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) {
|
||||
LOG_CRITICAL(Render, "Failed to unmap backing memory placeholder");
|
||||
}
|
||||
if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) {
|
||||
LOG_CRITICAL(Render, "Failed to free backing memory");
|
||||
}
|
||||
}
|
||||
if (!CloseHandle(backing_handle)) {
|
||||
LOG_CRITICAL(Render, "Failed to free backing memory file handle");
|
||||
}
|
||||
}
|
||||
|
||||
void* MapUser(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot) {
|
||||
const auto it = placeholders.find(virtual_addr);
|
||||
ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region");
|
||||
ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + size <= it->upper(),
|
||||
"Map range must be fully contained in a placeholder");
|
||||
|
||||
// Windows only allows splitting a placeholder into two.
|
||||
// This means that if the map range is fully
|
||||
// contained the the placeholder we need to perform two split operations,
|
||||
// one at the start and at the end.
|
||||
const VAddr placeholder_start = it->lower();
|
||||
const VAddr placeholder_end = it->upper();
|
||||
const VAddr virtual_end = virtual_addr + size;
|
||||
|
||||
// If the placeholder doesn't exactly start at virtual_addr, split it at the start.
|
||||
if (placeholder_start != virtual_addr) {
|
||||
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
|
||||
virtual_addr - placeholder_start, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
|
||||
}
|
||||
|
||||
// If the placeholder doesn't exactly end at virtual_end, split it at the end.
|
||||
if (placeholder_end != virtual_end) {
|
||||
VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_end),
|
||||
placeholder_end - virtual_end, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
|
||||
}
|
||||
|
||||
// Remove the placeholder.
|
||||
placeholders.erase({virtual_addr, virtual_end});
|
||||
|
||||
// Perform the map.
|
||||
void* ptr = nullptr;
|
||||
if (phys_addr) {
|
||||
ptr = MapViewOfFile3(backing_handle, process, reinterpret_cast<PVOID>(virtual_addr),
|
||||
phys_addr, size, MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
|
||||
} else {
|
||||
ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
|
||||
MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
|
||||
}
|
||||
ASSERT(ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* MapPrivate(VAddr virtual_addr, size_t size, u64 alignment, ULONG prot) {
|
||||
// Map a private allocation
|
||||
MEM_ADDRESS_REQUIREMENTS req{};
|
||||
MEM_EXTENDED_PARAMETER param{};
|
||||
// req.LowestStartingAddress =
|
||||
// (virtual_addr == 0 ? reinterpret_cast<PVOID>(SYSTEM_MANAGED_MIN)
|
||||
// : reinterpret_cast<PVOID>(virtual_addr));
|
||||
req.HighestEndingAddress = reinterpret_cast<PVOID>(SYSTEM_MANAGED_MAX);
|
||||
req.Alignment = alignment;
|
||||
param.Type = MemExtendedParameterAddressRequirements;
|
||||
param.Pointer = &req;
|
||||
ULONG alloc_type = MEM_COMMIT | MEM_RESERVE | (alignment > 2_MB ? MEM_LARGE_PAGES : 0);
|
||||
void* const ptr = VirtualAlloc2(process, nullptr, size, alloc_type, prot, ¶m, 1);
|
||||
ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg());
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void UnmapUser(VAddr virtual_addr, size_t size) {
|
||||
const bool ret = UnmapViewOfFile2(process, reinterpret_cast<PVOID>(virtual_addr),
|
||||
MEM_PRESERVE_PLACEHOLDER);
|
||||
ASSERT_MSG(ret, "Unmap operation on virtual_addr={:#X} failed", virtual_addr);
|
||||
|
||||
// The unmap call will create a new placeholder region. We need to see if we can coalesce it
|
||||
// with neighbors.
|
||||
VAddr placeholder_start = virtual_addr;
|
||||
VAddr placeholder_end = virtual_addr + size;
|
||||
|
||||
// Check if a placeholder exists right before us.
|
||||
const auto left_it = placeholders.find(virtual_addr - 1);
|
||||
if (left_it != placeholders.end()) {
|
||||
ASSERT_MSG(left_it->upper() == virtual_addr,
|
||||
"Left placeholder does not end at virtual_addr!");
|
||||
placeholder_start = left_it->lower();
|
||||
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
|
||||
placeholder_end - placeholder_start,
|
||||
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
|
||||
}
|
||||
|
||||
// Check if a placeholder exists right after us.
|
||||
const auto right_it = placeholders.find(placeholder_end + 1);
|
||||
if (right_it != placeholders.end()) {
|
||||
ASSERT_MSG(right_it->lower() == placeholder_end,
|
||||
"Right placeholder does not start at virtual_end!");
|
||||
placeholder_end = right_it->upper();
|
||||
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
|
||||
placeholder_end - placeholder_start,
|
||||
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
|
||||
}
|
||||
|
||||
// Insert the new placeholder.
|
||||
placeholders.insert({placeholder_start, placeholder_end});
|
||||
}
|
||||
|
||||
void UnmapPrivate(VAddr virtual_addr, size_t size) {
|
||||
const bool ret =
|
||||
VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_addr), 0, MEM_RELEASE);
|
||||
ASSERT_MSG(ret, "{}", Common::GetLastErrorMsg());
|
||||
}
|
||||
|
||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||
DWORD new_flags{};
|
||||
if (read && write) {
|
||||
new_flags = PAGE_READWRITE;
|
||||
} else if (read && !write) {
|
||||
new_flags = PAGE_READONLY;
|
||||
} else if (!read && !write) {
|
||||
new_flags = PAGE_NOACCESS;
|
||||
} else {
|
||||
UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write);
|
||||
}
|
||||
|
||||
const VAddr virtual_end = virtual_addr + size;
|
||||
auto [it, end] = placeholders.equal_range({virtual_addr, virtual_end});
|
||||
while (it != end) {
|
||||
const size_t offset = std::max(it->lower(), virtual_addr);
|
||||
const size_t protect_length = std::min(it->upper(), virtual_end) - offset;
|
||||
DWORD old_flags{};
|
||||
if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) {
|
||||
LOG_CRITICAL(Common_Memory, "Failed to change virtual memory protect rules");
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
HANDLE process{};
|
||||
HANDLE backing_handle{};
|
||||
u8* backing_base{};
|
||||
u8* virtual_base{};
|
||||
boost::icl::separate_interval_set<uintptr_t> placeholders;
|
||||
};
|
||||
#else
|
||||
|
||||
enum PosixPageProtection {
|
||||
PAGE_NOACCESS = 0,
|
||||
PAGE_READONLY = PROT_READ,
|
||||
PAGE_READWRITE = PROT_READ | PROT_WRITE,
|
||||
PAGE_EXECUTE = PROT_EXEC,
|
||||
PAGE_EXECUTE_READ = PROT_EXEC | PROT_READ,
|
||||
PAGE_EXECUTE_READWRITE = PROT_EXEC | PROT_READ | PROT_WRITE
|
||||
};
|
||||
|
||||
struct AddressSpace::Impl {
|
||||
Impl() {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void* MapUser(VAddr virtual_addr, PAddr phys_addr, size_t size, PosixPageProtection prot) {
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void* MapPrivate(VAddr virtual_addr, size_t size, u64 alignment, PosixPageProtection prot) {
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void UnmapUser(VAddr virtual_addr, size_t size) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void UnmapPrivate(VAddr virtual_addr, size_t size) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
u8* backing_base{};
|
||||
u8* virtual_base{};
|
||||
};
|
||||
#endif
|
||||
|
||||
AddressSpace::AddressSpace() : impl{std::make_unique<Impl>()} {
|
||||
virtual_base = impl->virtual_base;
|
||||
backing_base = impl->backing_base;
|
||||
}
|
||||
|
||||
AddressSpace::~AddressSpace() = default;
|
||||
|
||||
void* AddressSpace::Map(VAddr virtual_addr, size_t size, u64 alignment, PAddr phys_addr) {
|
||||
if (virtual_addr >= USER_MIN) {
|
||||
return impl->MapUser(virtual_addr, phys_addr, size, PAGE_READWRITE);
|
||||
}
|
||||
return impl->MapPrivate(virtual_addr, size, alignment, PAGE_READWRITE);
|
||||
}
|
||||
|
||||
void AddressSpace::Unmap(VAddr virtual_addr, size_t size) {
|
||||
if (virtual_addr >= USER_MIN) {
|
||||
return impl->UnmapUser(virtual_addr, size);
|
||||
}
|
||||
return impl->UnmapPrivate(virtual_addr, size);
|
||||
}
|
||||
|
||||
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
||||
return impl->Protect(virtual_addr, size, true, true, true);
|
||||
}
|
||||
|
||||
} // namespace Core
|
|
@ -0,0 +1,59 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "common/enum.h"
|
||||
#include "common/types.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
enum class MemoryPermission : u32 {
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
ReadWrite = Read | Write,
|
||||
Execute = 1 << 2,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
|
||||
|
||||
/**
|
||||
* Represents the user virtual address space backed by a dmem memory block
|
||||
*/
|
||||
class AddressSpace {
|
||||
public:
|
||||
explicit AddressSpace();
|
||||
~AddressSpace();
|
||||
|
||||
[[nodiscard]] u8* VirtualBase() noexcept {
|
||||
return virtual_base;
|
||||
}
|
||||
[[nodiscard]] const u8* VirtualBase() const noexcept {
|
||||
return virtual_base;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Maps memory to the specified virtual address.
|
||||
* @param virtual_addr The base address to place the mapping.
|
||||
* If zero is provided an address in system managed area is picked.
|
||||
* @param size The size of the area to map.
|
||||
* @param phys_addr The offset of the backing file handle to map.
|
||||
* The same backing region may be aliased into different virtual regions.
|
||||
* If zero is provided the mapping is considered as private.
|
||||
* @return A pointer to the mapped memory.
|
||||
*/
|
||||
void* Map(VAddr virtual_addr, size_t size, u64 alignment = 0, PAddr phys_addr = 0);
|
||||
|
||||
/// Unmaps specified virtual memory area.
|
||||
void Unmap(VAddr virtual_addr, size_t size);
|
||||
|
||||
void Protect(VAddr virtual_addr, size_t size, MemoryPermission perms);
|
||||
|
||||
private:
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
u8* backing_base{};
|
||||
u8* virtual_base{};
|
||||
};
|
||||
|
||||
} // namespace Core
|
|
@ -11,12 +11,12 @@
|
|||
#include "core/libraries/kernel/event_queues.h"
|
||||
#include "core/libraries/kernel/file_system.h"
|
||||
#include "core/libraries/kernel/libkernel.h"
|
||||
#include "core/libraries/kernel/memory/kernel_memory.h"
|
||||
#include "core/libraries/kernel/memory_management.h"
|
||||
#include "core/libraries/kernel/thread_management.h"
|
||||
#include "core/libraries/kernel/time_management.h"
|
||||
#include "core/libraries/libs.h"
|
||||
#include "core/linker.h"
|
||||
#include "core/memory.h"
|
||||
#ifdef _WIN64
|
||||
#include <io.h>
|
||||
#include <windows.h>
|
||||
|
@ -43,7 +43,9 @@ static PS4_SYSV_ABI void stack_chk_fail() {
|
|||
}
|
||||
|
||||
int PS4_SYSV_ABI sceKernelMunmap(void* addr, size_t len) {
|
||||
LOG_ERROR(Kernel_Vmm, "(DUMMY) called");
|
||||
LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}", fmt::ptr(addr), len);
|
||||
auto* memory = Core::Memory::Instance();
|
||||
memory->UnmapMemory(std::bit_cast<VAddr>(addr), len);
|
||||
return SCE_OK;
|
||||
}
|
||||
|
||||
|
@ -188,6 +190,8 @@ void LibKernel_Register(Core::Loader::SymbolsResolver* sym) {
|
|||
LIB_FUNCTION("L-Q3LEjIbgA", "libkernel", 1, "libkernel", 1, 1, sceKernelMapDirectMemory);
|
||||
LIB_FUNCTION("MBuItvba6z8", "libkernel", 1, "libkernel", 1, 1, sceKernelReleaseDirectMemory);
|
||||
LIB_FUNCTION("cQke9UuBQOk", "libkernel", 1, "libkernel", 1, 1, sceKernelMunmap);
|
||||
LIB_FUNCTION("mL8NDH86iQI", "libkernel", 1, "libkernel", 1, 1, sceKernelMapNamedFlexibleMemory);
|
||||
LIB_FUNCTION("IWIBBdTHit4", "libkernel", 1, "libkernel", 1, 1, sceKernelMapFlexibleMemory);
|
||||
// equeue
|
||||
LIB_FUNCTION("D0OdFMjp46I", "libkernel", 1, "libkernel", 1, 1, sceKernelCreateEqueue);
|
||||
LIB_FUNCTION("jpFjmgAC5AE", "libkernel", 1, "libkernel", 1, 1, sceKernelDeleteEqueue);
|
||||
|
@ -205,7 +209,6 @@ void LibKernel_Register(Core::Loader::SymbolsResolver* sym) {
|
|||
Libraries::Kernel::fileSystemSymbolsRegister(sym);
|
||||
Libraries::Kernel::timeSymbolsRegister(sym);
|
||||
Libraries::Kernel::pthreadSymbolsRegister(sym);
|
||||
Libraries::Kernel::RegisterKernelMemory(sym);
|
||||
|
||||
// temp
|
||||
LIB_FUNCTION("NWtTN10cJzE", "libSceLibcInternalExt", 1, "libSceLibcInternal", 1, 1,
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "flexible_memory.h"
|
||||
|
||||
namespace Libraries::Kernel {
|
||||
bool FlexibleMemory::Map(u64 virtual_addr, std::size_t len, int prot,
|
||||
VirtualMemory::MemoryMode cpu_mode) {
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
AllocatedBlock block{};
|
||||
block.map_virtual_addr = virtual_addr;
|
||||
block.map_size = len;
|
||||
block.prot = prot;
|
||||
block.cpu_mode = cpu_mode;
|
||||
|
||||
allocated_blocks.push_back(block);
|
||||
allocated_total += len;
|
||||
|
||||
return true;
|
||||
}
|
||||
} // namespace Libraries::Kernel
|
|
@ -1,33 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include "common/types.h"
|
||||
#include "core/virtual_memory.h"
|
||||
|
||||
namespace Libraries::Kernel {
|
||||
|
||||
class FlexibleMemory {
|
||||
public:
|
||||
struct AllocatedBlock {
|
||||
u64 map_virtual_addr;
|
||||
u64 map_size;
|
||||
int prot;
|
||||
VirtualMemory::MemoryMode cpu_mode;
|
||||
};
|
||||
|
||||
FlexibleMemory(){};
|
||||
virtual ~FlexibleMemory(){};
|
||||
|
||||
public:
|
||||
bool Map(u64 virtual_addr, std::size_t len, int prot, VirtualMemory::MemoryMode cpu_mode);
|
||||
|
||||
private:
|
||||
std::vector<AllocatedBlock> allocated_blocks;
|
||||
u64 allocated_total = 0;
|
||||
std::mutex mutex;
|
||||
};
|
||||
} // namespace Libraries::Kernel
|
|
@ -1,78 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <common/assert.h>
|
||||
#include <common/singleton.h>
|
||||
#include <core/libraries/error_codes.h>
|
||||
#include <core/libraries/libs.h>
|
||||
#include <core/virtual_memory.h>
|
||||
#include "flexible_memory.h"
|
||||
#include "kernel_memory.h"
|
||||
|
||||
namespace Libraries::Kernel {
|
||||
|
||||
bool Is16KBMultiple(u64 n) {
|
||||
return ((n % (16ull * 1024) == 0));
|
||||
}
|
||||
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||
int flags, const char* name) {
|
||||
|
||||
LOG_INFO(Kernel_Vmm, "len = {:#x}, prot = {:#x}, flags = {:#x}, name = {}", len, prot, flags,
|
||||
name);
|
||||
|
||||
if (len == 0 || !Is16KBMultiple(len)) {
|
||||
LOG_ERROR(Kernel_Vmm, "len is 0 or not 16kb multiple");
|
||||
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||
}
|
||||
|
||||
static constexpr size_t MaxNameSize = 32;
|
||||
if (std::strlen(name) > MaxNameSize) {
|
||||
LOG_ERROR(Kernel_Vmm, "name exceeds 32 bytes!");
|
||||
return ORBIS_KERNEL_ERROR_ENAMETOOLONG;
|
||||
}
|
||||
|
||||
if (name == nullptr) {
|
||||
LOG_ERROR(Kernel_Vmm, "name is invalid!");
|
||||
return ORBIS_KERNEL_ERROR_EFAULT;
|
||||
}
|
||||
|
||||
VirtualMemory::MemoryMode cpu_mode = VirtualMemory::MemoryMode::NoAccess;
|
||||
|
||||
switch (prot) {
|
||||
case 0x3:
|
||||
cpu_mode = VirtualMemory::MemoryMode::ReadWrite;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
auto in_addr = reinterpret_cast<u64>(*addr_in_out);
|
||||
auto out_addr = VirtualMemory::memory_alloc(in_addr, len, cpu_mode);
|
||||
*addr_in_out = reinterpret_cast<void*>(out_addr);
|
||||
|
||||
auto* flexible_memory = Common::Singleton<FlexibleMemory>::Instance();
|
||||
|
||||
if (!flexible_memory->Map(out_addr, len, prot, cpu_mode)) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (out_addr == 0) {
|
||||
LOG_ERROR(Kernel_Vmm, "Can't allocate address");
|
||||
return ORBIS_KERNEL_ERROR_ENOMEM;
|
||||
}
|
||||
LOG_INFO(Kernel_Vmm, "in_addr = {:#x} out_addr = {:#x}", in_addr, out_addr);
|
||||
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||
int flags) {
|
||||
return sceKernelMapNamedFlexibleMemory(addr_in_out, len, prot, flags, "");
|
||||
}
|
||||
|
||||
void RegisterKernelMemory(Core::Loader::SymbolsResolver* sym) {
|
||||
LIB_FUNCTION("mL8NDH86iQI", "libkernel", 1, "libkernel", 1, 1, sceKernelMapNamedFlexibleMemory);
|
||||
LIB_FUNCTION("IWIBBdTHit4", "libkernel", 1, "libkernel", 1, 1, sceKernelMapFlexibleMemory);
|
||||
}
|
||||
|
||||
} // namespace Libraries::Kernel
|
|
@ -1,18 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/types.h"
|
||||
|
||||
namespace Core::Loader {
|
||||
class SymbolsResolver;
|
||||
}
|
||||
|
||||
namespace Libraries::Kernel {
|
||||
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addrInOut, std::size_t len, int prot,
|
||||
int flags, const char* name);
|
||||
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||
int flags);
|
||||
void RegisterKernelMemory(Core::Loader::SymbolsResolver* sym);
|
||||
} // namespace Libraries::Kernel
|
|
@ -3,13 +3,11 @@
|
|||
|
||||
#include <bit>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/singleton.h"
|
||||
#include "core/libraries/error_codes.h"
|
||||
#include "core/libraries/kernel/memory_management.h"
|
||||
#include "core/libraries/kernel/physical_memory.h"
|
||||
#include "core/virtual_memory.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Libraries::Kernel {
|
||||
|
||||
|
@ -43,15 +41,10 @@ int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u
|
|||
return SCE_KERNEL_ERROR_EINVAL;
|
||||
}
|
||||
|
||||
u64 physical_addr = 0;
|
||||
auto* physical_memory = Common::Singleton<PhysicalMemory>::Instance();
|
||||
if (!physical_memory->Alloc(searchStart, searchEnd, len, alignment, &physical_addr,
|
||||
memoryType)) {
|
||||
LOG_CRITICAL(Kernel_Vmm, "Unable to allocate physical memory");
|
||||
return SCE_KERNEL_ERROR_EAGAIN;
|
||||
}
|
||||
*physAddrOut = static_cast<s64>(physical_addr);
|
||||
LOG_INFO(Kernel_Vmm, "physAddrOut = {:#x}", physical_addr);
|
||||
auto* memory = Core::Memory::Instance();
|
||||
PAddr phys_addr = memory->Allocate(searchStart, searchEnd, len, alignment, memoryType);
|
||||
*physAddrOut = static_cast<s64>(phys_addr);
|
||||
LOG_INFO(Kernel_Vmm, "physAddrOut = {:#x}", phys_addr);
|
||||
return SCE_OK;
|
||||
}
|
||||
|
||||
|
@ -77,40 +70,48 @@ int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int fl
|
|||
}
|
||||
}
|
||||
|
||||
VirtualMemory::MemoryMode cpu_mode = VirtualMemory::MemoryMode::NoAccess;
|
||||
|
||||
switch (prot) {
|
||||
case 0x03:
|
||||
cpu_mode = VirtualMemory::MemoryMode::ReadWrite;
|
||||
break;
|
||||
case 0x32:
|
||||
case 0x33: // SCE_KERNEL_PROT_CPU_READ|SCE_KERNEL_PROT_CPU_WRITE|SCE_KERNEL_PROT_GPU_READ|SCE_KERNEL_PROT_GPU_ALL
|
||||
cpu_mode = VirtualMemory::MemoryMode::ReadWrite;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
const VAddr in_addr = reinterpret_cast<VAddr>(*addr);
|
||||
const auto mem_prot = static_cast<Core::MemoryProt>(prot);
|
||||
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
|
||||
auto* memory = Core::Memory::Instance();
|
||||
return memory->MapMemory(addr, in_addr, len, mem_prot, map_flags, Core::VMAType::Direct, "",
|
||||
directMemoryStart, alignment);
|
||||
}
|
||||
|
||||
auto in_addr = reinterpret_cast<u64>(*addr);
|
||||
u64 out_addr = 0;
|
||||
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||
int flags, const char* name) {
|
||||
|
||||
if (flags == 0) {
|
||||
out_addr = VirtualMemory::memory_alloc_aligned(in_addr, len, cpu_mode, alignment);
|
||||
}
|
||||
LOG_INFO(Kernel_Vmm, "in_addr = {:#x}, out_addr = {:#x}", in_addr, out_addr);
|
||||
|
||||
*addr = reinterpret_cast<void*>(out_addr); // return out_addr to first functions parameter
|
||||
|
||||
if (out_addr == 0) {
|
||||
return SCE_KERNEL_ERROR_ENOMEM;
|
||||
if (len == 0 || !Common::is16KBAligned(len)) {
|
||||
LOG_ERROR(Kernel_Vmm, "len is 0 or not 16kb multiple");
|
||||
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||
}
|
||||
|
||||
auto* physical_memory = Common::Singleton<PhysicalMemory>::Instance();
|
||||
if (!physical_memory->Map(out_addr, directMemoryStart, len, prot, cpu_mode)) {
|
||||
UNREACHABLE();
|
||||
static constexpr size_t MaxNameSize = 32;
|
||||
if (std::strlen(name) > MaxNameSize) {
|
||||
LOG_ERROR(Kernel_Vmm, "name exceeds 32 bytes!");
|
||||
return ORBIS_KERNEL_ERROR_ENAMETOOLONG;
|
||||
}
|
||||
|
||||
return SCE_OK;
|
||||
if (name == nullptr) {
|
||||
LOG_ERROR(Kernel_Vmm, "name is invalid!");
|
||||
return ORBIS_KERNEL_ERROR_EFAULT;
|
||||
}
|
||||
|
||||
const VAddr in_addr = reinterpret_cast<VAddr>(*addr_in_out);
|
||||
const auto mem_prot = static_cast<Core::MemoryProt>(prot);
|
||||
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
|
||||
auto* memory = Core::Memory::Instance();
|
||||
const int ret = memory->MapMemory(addr_in_out, in_addr, len, mem_prot, map_flags,
|
||||
Core::VMAType::Flexible, name);
|
||||
|
||||
LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}",
|
||||
fmt::ptr(*addr_in_out), len, prot, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||
int flags) {
|
||||
return sceKernelMapNamedFlexibleMemory(addr_in_out, len, prot, flags, "");
|
||||
}
|
||||
|
||||
} // namespace Libraries::Kernel
|
||||
|
|
|
@ -35,5 +35,9 @@ int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u
|
|||
u64 alignment, int memoryType, s64* physAddrOut);
|
||||
int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int flags,
|
||||
s64 directMemoryStart, u64 alignment);
|
||||
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addrInOut, std::size_t len, int prot,
|
||||
int flags, const char* name);
|
||||
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,
|
||||
int flags);
|
||||
|
||||
} // namespace Libraries::Kernel
|
||||
|
|
|
@ -17,7 +17,7 @@ void* PS4_SYSV_ABI internal_memcpy(void* dest, const void* src, size_t n) {
|
|||
return std::memcpy(dest, src, n);
|
||||
}
|
||||
|
||||
int PS4_SYSV_ABI internal_memcpy_s(void* dest, rsize_t destsz, const void* src, rsize_t count) {
|
||||
int PS4_SYSV_ABI internal_memcpy_s(void* dest, size_t destsz, const void* src, size_t count) {
|
||||
#ifdef _WIN64
|
||||
return memcpy_s(dest, destsz, src, count);
|
||||
#else
|
||||
|
@ -26,7 +26,7 @@ int PS4_SYSV_ABI internal_memcpy_s(void* dest, rsize_t destsz, const void* src,
|
|||
#endif
|
||||
}
|
||||
|
||||
int PS4_SYSV_ABI internal_strcpy_s(char* dest, rsize_t dest_size, const char* src) {
|
||||
int PS4_SYSV_ABI internal_strcpy_s(char* dest, size_t dest_size, const char* src) {
|
||||
#ifdef _WIN64
|
||||
return strcpy_s(dest, dest_size, src);
|
||||
#else
|
||||
|
|
|
@ -12,8 +12,8 @@ class SymbolsResolver;
|
|||
namespace Libraries::LibcInternal {
|
||||
void* PS4_SYSV_ABI internal_memset(void* s, int c, size_t n);
|
||||
void* PS4_SYSV_ABI internal_memcpy(void* dest, const void* src, size_t n);
|
||||
int PS4_SYSV_ABI internal_memcpy_s(void* dest, rsize_t destsz, const void* src, rsize_t count);
|
||||
int PS4_SYSV_ABI internal_strcpy_s(char* dest, rsize_t dest_size, const char* src);
|
||||
int PS4_SYSV_ABI internal_memcpy_s(void* dest, size_t destsz, const void* src, size_t count);
|
||||
int PS4_SYSV_ABI internal_strcpy_s(char* dest, size_t dest_size, const char* src);
|
||||
int PS4_SYSV_ABI internal_memcmp(const void* s1, const void* s2, size_t n);
|
||||
float PS4_SYSV_ABI internal_expf(float x);
|
||||
|
||||
|
|
|
@ -0,0 +1,174 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/libraries/error_codes.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
MemoryManager::MemoryManager() {
|
||||
// Insert a virtual memory area that covers the user area.
|
||||
const size_t user_size = USER_MAX - USER_MIN - 1;
|
||||
vma_map.emplace(USER_MIN, VirtualMemoryArea{USER_MIN, user_size});
|
||||
|
||||
// Insert a virtual memory area that covers the system managed area.
|
||||
const size_t sys_size = SYSTEM_MANAGED_MAX - SYSTEM_MANAGED_MIN - 1;
|
||||
vma_map.emplace(SYSTEM_MANAGED_MIN, VirtualMemoryArea{SYSTEM_MANAGED_MIN, sys_size});
|
||||
}
|
||||
|
||||
MemoryManager::~MemoryManager() = default;
|
||||
|
||||
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
||||
int memory_type) {
|
||||
PAddr free_addr = 0;
|
||||
|
||||
// Iterate through allocated blocked and find the next free position
|
||||
for (const auto& block : allocations) {
|
||||
const PAddr end = block.base + block.size;
|
||||
free_addr = std::max(end, free_addr);
|
||||
}
|
||||
|
||||
// Align free position
|
||||
free_addr = Common::alignUp(free_addr, alignment);
|
||||
ASSERT(free_addr >= search_start && free_addr + size <= search_end);
|
||||
|
||||
// Add the allocated region to the list and commit its pages.
|
||||
allocations.emplace_back(free_addr, size, memory_type);
|
||||
return free_addr;
|
||||
}
|
||||
|
||||
void MemoryManager::Free(PAddr phys_addr, size_t size) {
|
||||
const auto it = std::ranges::find_if(allocations, [&](const auto& alloc) {
|
||||
return alloc.base == phys_addr && alloc.size == size;
|
||||
});
|
||||
ASSERT(it != allocations.end());
|
||||
|
||||
// Free the ranges.
|
||||
allocations.erase(it);
|
||||
}
|
||||
|
||||
int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
||||
MemoryMapFlags flags, VMAType type, std::string_view name,
|
||||
PAddr phys_addr, u64 alignment) {
|
||||
VAddr mapped_addr = alignment > 0 ? Common::alignUp(virtual_addr, alignment) : virtual_addr;
|
||||
SCOPE_EXIT {
|
||||
auto& new_vma = AddMapping(mapped_addr, size);
|
||||
new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce);
|
||||
new_vma.prot = prot;
|
||||
new_vma.name = name;
|
||||
new_vma.type = type;
|
||||
};
|
||||
|
||||
// When virtual addr is zero let the address space manager pick the address.
|
||||
// Alignment matters here as we let the OS pick the address.
|
||||
if (virtual_addr == 0) {
|
||||
*out_addr = impl.Map(virtual_addr, size, alignment);
|
||||
mapped_addr = std::bit_cast<VAddr>(*out_addr);
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
// Fixed mapping means the virtual address must exactly match the provided one.
|
||||
if (True(flags & MemoryMapFlags::Fixed) && True(flags & MemoryMapFlags::NoOverwrite)) {
|
||||
// This should return SCE_KERNEL_ERROR_ENOMEM but shouldn't normally happen.
|
||||
const auto& vma = FindVMA(mapped_addr)->second;
|
||||
const u32 remaining_size = vma.base + vma.size - mapped_addr;
|
||||
ASSERT_MSG(vma.type == VMAType::Free && remaining_size >= size);
|
||||
}
|
||||
|
||||
// Find the first free area starting with provided virtual address.
|
||||
if (False(flags & MemoryMapFlags::Fixed)) {
|
||||
auto it = FindVMA(mapped_addr);
|
||||
while (it->second.type != VMAType::Free || it->second.size < size) {
|
||||
it++;
|
||||
}
|
||||
ASSERT(it != vma_map.end());
|
||||
if (alignment > 0) {
|
||||
ASSERT_MSG(it->second.base % alignment == 0, "Free region base is not aligned");
|
||||
}
|
||||
mapped_addr = it->second.base;
|
||||
}
|
||||
|
||||
// Perform the mapping.
|
||||
*out_addr = impl.Map(mapped_addr, size);
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
||||
// TODO: Partial unmaps are technically supported by the guest.
|
||||
const auto it = vma_map.find(virtual_addr);
|
||||
ASSERT_MSG(it != vma_map.end() && it->first == virtual_addr,
|
||||
"Attempting to unmap partially mapped range");
|
||||
|
||||
// Mark region as free and attempt to coalesce it with neighbours.
|
||||
auto& vma = it->second;
|
||||
vma.type = VMAType::Free;
|
||||
vma.prot = MemoryProt::NoAccess;
|
||||
vma.phys_base = 0;
|
||||
MergeAdjacent(it);
|
||||
|
||||
// Unmap the memory region.
|
||||
impl.Unmap(virtual_addr, size);
|
||||
}
|
||||
|
||||
VirtualMemoryArea& MemoryManager::AddMapping(VAddr virtual_addr, size_t size) {
|
||||
auto vma_handle = FindVMA(virtual_addr);
|
||||
ASSERT_MSG(vma_handle != vma_map.end(), "Virtual address not in vm_map");
|
||||
|
||||
const VirtualMemoryArea& vma = vma_handle->second;
|
||||
ASSERT_MSG(vma.type == VMAType::Free, "Adding a mapping to already mapped region");
|
||||
|
||||
const VAddr start_in_vma = virtual_addr - vma.base;
|
||||
const VAddr end_in_vma = start_in_vma + size;
|
||||
ASSERT_MSG(end_in_vma <= vma.size, "Mapping cannot fit inside free region");
|
||||
|
||||
if (end_in_vma != vma.size) {
|
||||
// Split VMA at the end of the allocated region
|
||||
Split(vma_handle, end_in_vma);
|
||||
}
|
||||
if (start_in_vma != 0) {
|
||||
// Split VMA at the start of the allocated region
|
||||
vma_handle = Split(vma_handle, start_in_vma);
|
||||
}
|
||||
|
||||
return vma_handle->second;
|
||||
}
|
||||
|
||||
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, u32 offset_in_vma) {
|
||||
auto& old_vma = vma_handle->second;
|
||||
ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0);
|
||||
|
||||
auto new_vma = old_vma;
|
||||
old_vma.size = offset_in_vma;
|
||||
new_vma.base += offset_in_vma;
|
||||
new_vma.size -= offset_in_vma;
|
||||
|
||||
if (new_vma.type == VMAType::Direct) {
|
||||
new_vma.phys_base += offset_in_vma;
|
||||
}
|
||||
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
||||
}
|
||||
|
||||
MemoryManager::VMAHandle MemoryManager::MergeAdjacent(VMAHandle iter) {
|
||||
const auto next_vma = std::next(iter);
|
||||
if (next_vma != vma_map.end() && iter->second.CanMergeWith(next_vma->second)) {
|
||||
iter->second.size += next_vma->second.size;
|
||||
vma_map.erase(next_vma);
|
||||
}
|
||||
|
||||
if (iter != vma_map.begin()) {
|
||||
auto prev_vma = std::prev(iter);
|
||||
if (prev_vma->second.CanMergeWith(iter->second)) {
|
||||
prev_vma->second.size += iter->second.size;
|
||||
vma_map.erase(iter);
|
||||
iter = prev_vma;
|
||||
}
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
} // namespace Core
|
|
@ -0,0 +1,128 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
#include <boost/icl/split_interval_map.hpp>
|
||||
#include "common/enum.h"
|
||||
#include "common/singleton.h"
|
||||
#include "common/types.h"
|
||||
#include "core/address_space.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
enum class MemoryProt : u32 {
|
||||
NoAccess = 0,
|
||||
CpuRead = 1,
|
||||
CpuReadWrite = 2,
|
||||
GpuRead = 16,
|
||||
GpuWrite = 32,
|
||||
GpuReadWrite = 38,
|
||||
};
|
||||
|
||||
enum class MemoryMapFlags : u32 {
|
||||
NoFlags = 0,
|
||||
Fixed = 0x10,
|
||||
NoOverwrite = 0x0080,
|
||||
NoCoalesce = 0x400000,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryMapFlags)
|
||||
|
||||
enum class VMAType : u32 {
|
||||
Free = 0,
|
||||
Reserved = 1,
|
||||
Direct = 2,
|
||||
Flexible = 3,
|
||||
Pooled = 4,
|
||||
Stack = 5,
|
||||
};
|
||||
|
||||
struct DirectMemoryArea {
|
||||
PAddr base = 0;
|
||||
size_t size = 0;
|
||||
int memory_type = 0;
|
||||
};
|
||||
|
||||
struct VirtualMemoryArea {
|
||||
VAddr base = 0;
|
||||
size_t size = 0;
|
||||
PAddr phys_base = 0;
|
||||
VMAType type = VMAType::Free;
|
||||
MemoryProt prot = MemoryProt::NoAccess;
|
||||
bool disallow_merge = false;
|
||||
std::string name = "";
|
||||
|
||||
bool CanMergeWith(const VirtualMemoryArea& next) const {
|
||||
if (disallow_merge || next.disallow_merge) {
|
||||
return false;
|
||||
}
|
||||
if (base + size != next.base) {
|
||||
return false;
|
||||
}
|
||||
if (type == VMAType::Direct && phys_base + size != next.phys_base) {
|
||||
return false;
|
||||
}
|
||||
if (prot != next.prot || type != next.type) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr VAddr SYSTEM_RESERVED = 0x800000000u;
|
||||
constexpr VAddr CODE_BASE_OFFSET = 0x100000000u;
|
||||
constexpr VAddr SYSTEM_MANAGED_MIN = 0x0000040000u;
|
||||
constexpr VAddr SYSTEM_MANAGED_MAX = 0x07FFFFBFFFu;
|
||||
constexpr VAddr USER_MIN = 0x1000000000u;
|
||||
constexpr VAddr USER_MAX = 0xFBFFFFFFFFu;
|
||||
|
||||
class MemoryManager {
|
||||
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
|
||||
using VMAHandle = VMAMap::iterator;
|
||||
|
||||
public:
|
||||
explicit MemoryManager();
|
||||
~MemoryManager();
|
||||
|
||||
PAddr Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
||||
int memory_type);
|
||||
|
||||
void Free(PAddr phys_addr, size_t size);
|
||||
|
||||
int MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
||||
MemoryMapFlags flags, VMAType type, std::string_view name = "",
|
||||
PAddr phys_addr = 0, u64 alignment = 0);
|
||||
|
||||
void UnmapMemory(VAddr virtual_addr, size_t size);
|
||||
|
||||
private:
|
||||
bool HasOverlap(VAddr addr, size_t size) const {
|
||||
return vma_map.find(addr) != vma_map.end();
|
||||
}
|
||||
|
||||
VMAHandle FindVMA(VAddr target) {
|
||||
// Return first the VMA with base >= target.
|
||||
const auto it = vma_map.lower_bound(target);
|
||||
if (it->first == target) {
|
||||
return it;
|
||||
}
|
||||
return std::prev(it);
|
||||
}
|
||||
|
||||
VirtualMemoryArea& AddMapping(VAddr virtual_addr, size_t size);
|
||||
|
||||
VMAHandle Split(VMAHandle vma_handle, u32 offset_in_vma);
|
||||
|
||||
VMAHandle MergeAdjacent(VMAHandle iter);
|
||||
|
||||
private:
|
||||
AddressSpace impl;
|
||||
std::vector<DirectMemoryArea> allocations;
|
||||
VMAMap vma_map;
|
||||
};
|
||||
|
||||
using Memory = Common::Singleton<MemoryManager>;
|
||||
|
||||
} // namespace Core
|
|
@ -156,7 +156,7 @@ void PKGViewer::ProcessPKGInfo() {
|
|||
if (isFlagSet(pkg_content_flag, flag.first)) {
|
||||
if (!flagss.isEmpty())
|
||||
flagss.append(", ");
|
||||
flagss.append(flag.second);
|
||||
flagss.append(QString::fromStdString(flag.second));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -331,8 +331,8 @@ void RendererVulkan::Present(Frame* frame) {
|
|||
.pSignalSemaphores = &present_ready,
|
||||
};
|
||||
|
||||
try {
|
||||
std::scoped_lock submit_lock{scheduler.submit_mutex};
|
||||
try {
|
||||
instance.GetGraphicsQueue().submit(submit_info, frame->present_done);
|
||||
} catch (vk::DeviceLostError& err) {
|
||||
LOG_CRITICAL(Render_Vulkan, "Device lost during present submit: {}", err.what());
|
||||
|
|
Loading…
Reference in New Issue