2024-05-16 14:55:50 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
2024-06-11 12:14:33 +02:00
|
|
|
#include "common/debug.h"
|
2024-05-16 14:55:50 +02:00
|
|
|
#include "common/scope_exit.h"
|
|
|
|
#include "core/libraries/error_codes.h"
|
2024-05-30 17:07:36 +02:00
|
|
|
#include "core/libraries/kernel/memory_management.h"
|
2024-05-16 14:55:50 +02:00
|
|
|
#include "core/memory.h"
|
2024-05-25 14:33:15 +02:00
|
|
|
#include "video_core/renderer_vulkan/vk_instance.h"
|
2024-05-16 14:55:50 +02:00
|
|
|
|
|
|
|
namespace Core {
|
|
|
|
|
|
|
|
MemoryManager::MemoryManager() {
|
2024-06-10 01:13:44 +02:00
|
|
|
// Insert an area that covers direct memory physical block.
|
|
|
|
dmem_map.emplace(0, DirectMemoryArea{0, SCE_KERNEL_MAIN_DMEM_SIZE});
|
2024-05-16 14:55:50 +02:00
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
// Insert a virtual memory area that covers the entire area we manage.
|
|
|
|
const VAddr virtual_base = impl.VirtualBase();
|
|
|
|
const size_t virtual_size = impl.VirtualSize();
|
|
|
|
vma_map.emplace(virtual_base, VirtualMemoryArea{virtual_base, virtual_size});
|
2024-07-01 12:35:35 +02:00
|
|
|
|
|
|
|
// Log initialization.
|
|
|
|
LOG_INFO(Kernel_Vmm, "Usable memory address space {}_GB", virtual_size >> 30);
|
2024-05-16 14:55:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
MemoryManager::~MemoryManager() = default;
|
|
|
|
|
|
|
|
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
|
|
|
int memory_type) {
|
2024-06-10 01:13:44 +02:00
|
|
|
std::scoped_lock lk{mutex};
|
2024-05-16 14:55:50 +02:00
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
auto dmem_area = FindDmemArea(search_start);
|
|
|
|
|
|
|
|
const auto is_suitable = [&] {
|
|
|
|
return dmem_area->second.is_free && dmem_area->second.size >= size;
|
|
|
|
};
|
|
|
|
while (!is_suitable() && dmem_area->second.GetEnd() <= search_end) {
|
|
|
|
dmem_area++;
|
2024-05-16 14:55:50 +02:00
|
|
|
}
|
2024-06-10 01:13:44 +02:00
|
|
|
ASSERT_MSG(is_suitable(), "Unable to find free direct memory area");
|
2024-05-16 14:55:50 +02:00
|
|
|
|
|
|
|
// Align free position
|
2024-06-10 01:13:44 +02:00
|
|
|
PAddr free_addr = dmem_area->second.base;
|
2024-06-07 15:26:43 +02:00
|
|
|
free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr;
|
2024-05-16 14:55:50 +02:00
|
|
|
|
|
|
|
// Add the allocated region to the list and commit its pages.
|
2024-06-10 01:13:44 +02:00
|
|
|
auto& area = AddDmemAllocation(free_addr, size);
|
|
|
|
area.memory_type = memory_type;
|
|
|
|
area.is_free = false;
|
2024-05-16 14:55:50 +02:00
|
|
|
return free_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::Free(PAddr phys_addr, size_t size) {
|
2024-06-10 01:13:44 +02:00
|
|
|
std::scoped_lock lk{mutex};
|
|
|
|
|
|
|
|
const auto dmem_area = FindDmemArea(phys_addr);
|
|
|
|
ASSERT(dmem_area != dmem_map.end() && dmem_area->second.base == phys_addr &&
|
|
|
|
dmem_area->second.size == size);
|
|
|
|
|
|
|
|
// Release any dmem mappings that reference this physical block.
|
|
|
|
std::vector<std::pair<VAddr, u64>> remove_list;
|
|
|
|
for (const auto& [addr, mapping] : vma_map) {
|
|
|
|
if (mapping.type != VMAType::Direct) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (mapping.phys_base <= phys_addr && phys_addr < mapping.phys_base + mapping.size) {
|
|
|
|
LOG_INFO(Kernel_Vmm, "Unmaping direct mapping {:#x} with size {:#x}", addr,
|
|
|
|
mapping.size);
|
|
|
|
// Unmaping might erase from vma_map. We can't do it here.
|
|
|
|
remove_list.emplace_back(addr, mapping.size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const auto& [addr, size] : remove_list) {
|
|
|
|
UnmapMemory(addr, size);
|
|
|
|
}
|
2024-05-16 14:55:50 +02:00
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
// Mark region as free and attempt to coalesce it with neighbours.
|
|
|
|
auto& area = dmem_area->second;
|
|
|
|
area.is_free = true;
|
|
|
|
area.memory_type = 0;
|
|
|
|
MergeAdjacent(dmem_map, dmem_area);
|
2024-05-16 14:55:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
|
|
|
MemoryMapFlags flags, VMAType type, std::string_view name,
|
2024-06-10 01:13:44 +02:00
|
|
|
bool is_exec, PAddr phys_addr, u64 alignment) {
|
|
|
|
std::scoped_lock lk{mutex};
|
2024-06-21 17:22:37 +02:00
|
|
|
if (type == VMAType::Flexible && flexible_usage + size > total_flexible_size) {
|
2024-06-15 13:36:07 +02:00
|
|
|
return SCE_KERNEL_ERROR_ENOMEM;
|
|
|
|
}
|
2024-06-10 01:13:44 +02:00
|
|
|
|
|
|
|
// When virtual addr is zero, force it to virtual_base. The guest cannot pass Fixed
|
|
|
|
// flag so we will take the branch that searches for free (or reserved) mappings.
|
|
|
|
virtual_addr = (virtual_addr == 0) ? impl.VirtualBase() : virtual_addr;
|
2024-06-15 13:36:07 +02:00
|
|
|
alignment = alignment > 0 ? alignment : 16_KB;
|
2024-06-10 01:13:44 +02:00
|
|
|
|
2024-06-05 21:08:18 +02:00
|
|
|
VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr;
|
2024-05-16 14:55:50 +02:00
|
|
|
SCOPE_EXIT {
|
|
|
|
auto& new_vma = AddMapping(mapped_addr, size);
|
|
|
|
new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce);
|
|
|
|
new_vma.prot = prot;
|
|
|
|
new_vma.name = name;
|
|
|
|
new_vma.type = type;
|
2024-05-25 14:33:15 +02:00
|
|
|
|
|
|
|
if (type == VMAType::Direct) {
|
2024-06-10 01:13:44 +02:00
|
|
|
new_vma.phys_base = phys_addr;
|
2024-05-25 14:33:15 +02:00
|
|
|
MapVulkanMemory(mapped_addr, size);
|
|
|
|
}
|
2024-06-15 13:36:07 +02:00
|
|
|
if (type == VMAType::Flexible) {
|
2024-06-21 17:22:37 +02:00
|
|
|
flexible_usage += size;
|
2024-06-15 13:36:07 +02:00
|
|
|
}
|
2024-05-16 14:55:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Fixed mapping means the virtual address must exactly match the provided one.
|
|
|
|
if (True(flags & MemoryMapFlags::Fixed) && True(flags & MemoryMapFlags::NoOverwrite)) {
|
|
|
|
// This should return SCE_KERNEL_ERROR_ENOMEM but shouldn't normally happen.
|
|
|
|
const auto& vma = FindVMA(mapped_addr)->second;
|
2024-05-30 17:07:36 +02:00
|
|
|
const size_t remaining_size = vma.base + vma.size - mapped_addr;
|
2024-05-16 14:55:50 +02:00
|
|
|
ASSERT_MSG(vma.type == VMAType::Free && remaining_size >= size);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the first free area starting with provided virtual address.
|
|
|
|
if (False(flags & MemoryMapFlags::Fixed)) {
|
|
|
|
auto it = FindVMA(mapped_addr);
|
2024-06-15 13:36:07 +02:00
|
|
|
// If the VMA is free and contains the requested mapping we are done.
|
|
|
|
if (it->second.type == VMAType::Free && it->second.Contains(virtual_addr, size)) {
|
|
|
|
mapped_addr = virtual_addr;
|
|
|
|
} else {
|
|
|
|
// Search for the first free VMA that fits our mapping.
|
|
|
|
while (it->second.type != VMAType::Free || it->second.size < size) {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
ASSERT(it != vma_map.end());
|
|
|
|
const auto& vma = it->second;
|
|
|
|
mapped_addr = alignment > 0 ? Common::AlignUp(vma.base, alignment) : vma.base;
|
2024-05-16 14:55:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the mapping.
|
2024-06-10 01:13:44 +02:00
|
|
|
*out_addr = impl.Map(mapped_addr, size, alignment, phys_addr, is_exec);
|
2024-06-11 12:14:33 +02:00
|
|
|
TRACK_ALLOC(*out_addr, size, "VMEM");
|
2024-05-16 14:55:50 +02:00
|
|
|
return ORBIS_OK;
|
|
|
|
}
|
|
|
|
|
2024-06-15 13:36:07 +02:00
|
|
|
int MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
|
|
|
MemoryMapFlags flags, uintptr_t fd, size_t offset) {
|
|
|
|
ASSERT(virtual_addr == 0);
|
|
|
|
virtual_addr = impl.VirtualBase();
|
|
|
|
const size_t size_aligned = Common::AlignUp(size, 16_KB);
|
|
|
|
|
|
|
|
// Find first free area to map the file.
|
|
|
|
auto it = FindVMA(virtual_addr);
|
|
|
|
while (it->second.type != VMAType::Free || it->second.size < size_aligned) {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
ASSERT(it != vma_map.end());
|
|
|
|
|
|
|
|
// Map the file.
|
|
|
|
const VAddr mapped_addr = it->second.base;
|
|
|
|
impl.MapFile(mapped_addr, size, offset, fd);
|
|
|
|
|
|
|
|
// Add virtual memory area
|
|
|
|
auto& new_vma = AddMapping(mapped_addr, size_aligned);
|
|
|
|
new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce);
|
|
|
|
new_vma.prot = prot;
|
|
|
|
new_vma.name = "File";
|
|
|
|
new_vma.fd = fd;
|
|
|
|
new_vma.type = VMAType::File;
|
|
|
|
|
|
|
|
*out_addr = std::bit_cast<void*>(mapped_addr);
|
|
|
|
return ORBIS_OK;
|
|
|
|
}
|
|
|
|
|
2024-05-16 14:55:50 +02:00
|
|
|
void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
2024-06-10 01:13:44 +02:00
|
|
|
std::scoped_lock lk{mutex};
|
|
|
|
|
2024-05-16 14:55:50 +02:00
|
|
|
// TODO: Partial unmaps are technically supported by the guest.
|
|
|
|
const auto it = vma_map.find(virtual_addr);
|
|
|
|
ASSERT_MSG(it != vma_map.end() && it->first == virtual_addr,
|
|
|
|
"Attempting to unmap partially mapped range");
|
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
const auto type = it->second.type;
|
2024-06-15 13:36:07 +02:00
|
|
|
const bool has_backing = type == VMAType::Direct || type == VMAType::File;
|
2024-06-10 01:13:44 +02:00
|
|
|
if (type == VMAType::Direct) {
|
2024-05-25 14:33:15 +02:00
|
|
|
UnmapVulkanMemory(virtual_addr, size);
|
|
|
|
}
|
2024-06-15 13:36:07 +02:00
|
|
|
if (type == VMAType::Flexible) {
|
2024-06-21 17:22:37 +02:00
|
|
|
flexible_usage -= size;
|
2024-06-15 13:36:07 +02:00
|
|
|
}
|
2024-05-25 14:33:15 +02:00
|
|
|
|
2024-05-16 14:55:50 +02:00
|
|
|
// Mark region as free and attempt to coalesce it with neighbours.
|
|
|
|
auto& vma = it->second;
|
|
|
|
vma.type = VMAType::Free;
|
|
|
|
vma.prot = MemoryProt::NoAccess;
|
|
|
|
vma.phys_base = 0;
|
2024-06-10 01:13:44 +02:00
|
|
|
MergeAdjacent(vma_map, it);
|
2024-05-16 14:55:50 +02:00
|
|
|
|
|
|
|
// Unmap the memory region.
|
2024-06-15 13:36:07 +02:00
|
|
|
impl.Unmap(virtual_addr, size, has_backing);
|
2024-06-11 12:14:33 +02:00
|
|
|
TRACK_FREE(virtual_addr, "VMEM");
|
2024-05-16 14:55:50 +02:00
|
|
|
}
|
|
|
|
|
2024-05-26 14:51:35 +02:00
|
|
|
int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
|
2024-06-10 01:13:44 +02:00
|
|
|
std::scoped_lock lk{mutex};
|
|
|
|
|
2024-05-26 14:51:35 +02:00
|
|
|
const auto it = FindVMA(addr);
|
|
|
|
const auto& vma = it->second;
|
|
|
|
ASSERT_MSG(vma.type != VMAType::Free, "Provided address is not mapped");
|
|
|
|
|
2024-06-26 17:04:28 +02:00
|
|
|
if (start != nullptr) {
|
|
|
|
*start = reinterpret_cast<void*>(vma.base);
|
|
|
|
}
|
|
|
|
if (end != nullptr) {
|
|
|
|
*end = reinterpret_cast<void*>(vma.base + vma.size);
|
|
|
|
}
|
|
|
|
if (prot != nullptr) {
|
|
|
|
*prot = static_cast<u32>(vma.prot);
|
|
|
|
}
|
2024-05-30 17:07:36 +02:00
|
|
|
return ORBIS_OK;
|
|
|
|
}
|
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
|
|
|
Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
2024-06-10 21:59:12 +02:00
|
|
|
std::scoped_lock lk{mutex};
|
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
auto it = FindVMA(addr);
|
|
|
|
if (it->second.type == VMAType::Free && flags == 1) {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
if (it->second.type == VMAType::Free) {
|
|
|
|
LOG_WARNING(Kernel_Vmm, "VirtualQuery on free memory region");
|
|
|
|
return ORBIS_KERNEL_ERROR_EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& vma = it->second;
|
|
|
|
info->start = vma.base;
|
|
|
|
info->end = vma.base + vma.size;
|
|
|
|
info->is_flexible.Assign(vma.type == VMAType::Flexible);
|
|
|
|
info->is_direct.Assign(vma.type == VMAType::Direct);
|
|
|
|
info->is_commited.Assign(vma.type != VMAType::Free);
|
|
|
|
if (vma.type == VMAType::Direct) {
|
|
|
|
const auto dmem_it = FindDmemArea(vma.phys_base);
|
|
|
|
ASSERT(dmem_it != dmem_map.end());
|
|
|
|
info->memory_type = dmem_it->second.memory_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ORBIS_OK;
|
|
|
|
}
|
|
|
|
|
2024-05-30 17:07:36 +02:00
|
|
|
int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
|
|
|
|
Libraries::Kernel::OrbisQueryInfo* out_info) {
|
2024-06-10 01:13:44 +02:00
|
|
|
std::scoped_lock lk{mutex};
|
|
|
|
|
|
|
|
auto dmem_area = FindDmemArea(addr);
|
2024-06-15 13:36:07 +02:00
|
|
|
while (dmem_area != dmem_map.end() && dmem_area->second.is_free && find_next) {
|
2024-06-10 01:13:44 +02:00
|
|
|
dmem_area++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dmem_area == dmem_map.end() || dmem_area->second.is_free) {
|
|
|
|
LOG_ERROR(Core, "Unable to find allocated direct memory region to query!");
|
|
|
|
return ORBIS_KERNEL_ERROR_EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& area = dmem_area->second;
|
|
|
|
out_info->start = area.base;
|
|
|
|
out_info->end = area.GetEnd();
|
|
|
|
out_info->memoryType = area.memory_type;
|
|
|
|
return ORBIS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment,
|
|
|
|
PAddr* phys_addr_out, size_t* size_out) {
|
|
|
|
std::scoped_lock lk{mutex};
|
|
|
|
|
|
|
|
auto dmem_area = FindDmemArea(search_start);
|
|
|
|
PAddr paddr{};
|
|
|
|
size_t max_size{};
|
|
|
|
while (dmem_area != dmem_map.end() && dmem_area->second.GetEnd() <= search_end) {
|
|
|
|
if (dmem_area->second.size > max_size) {
|
|
|
|
paddr = dmem_area->second.base;
|
|
|
|
max_size = dmem_area->second.size;
|
|
|
|
}
|
|
|
|
dmem_area++;
|
2024-05-30 17:07:36 +02:00
|
|
|
}
|
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
*phys_addr_out = alignment > 0 ? Common::AlignUp(paddr, alignment) : paddr;
|
|
|
|
*size_out = max_size;
|
2024-05-30 17:07:36 +02:00
|
|
|
return ORBIS_OK;
|
2024-05-26 14:51:35 +02:00
|
|
|
}
|
|
|
|
|
2024-05-25 14:33:15 +02:00
|
|
|
std::pair<vk::Buffer, size_t> MemoryManager::GetVulkanBuffer(VAddr addr) {
|
|
|
|
auto it = mapped_memories.upper_bound(addr);
|
|
|
|
it = std::prev(it);
|
|
|
|
ASSERT(it != mapped_memories.end() && it->first <= addr);
|
|
|
|
return std::make_pair(*it->second.buffer, addr - it->first);
|
|
|
|
}
|
|
|
|
|
2024-05-16 14:55:50 +02:00
|
|
|
VirtualMemoryArea& MemoryManager::AddMapping(VAddr virtual_addr, size_t size) {
|
|
|
|
auto vma_handle = FindVMA(virtual_addr);
|
|
|
|
ASSERT_MSG(vma_handle != vma_map.end(), "Virtual address not in vm_map");
|
|
|
|
|
|
|
|
const VirtualMemoryArea& vma = vma_handle->second;
|
2024-05-30 17:07:36 +02:00
|
|
|
ASSERT_MSG(vma.type == VMAType::Free && vma.base <= virtual_addr,
|
|
|
|
"Adding a mapping to already mapped region");
|
2024-05-16 14:55:50 +02:00
|
|
|
|
|
|
|
const VAddr start_in_vma = virtual_addr - vma.base;
|
|
|
|
const VAddr end_in_vma = start_in_vma + size;
|
|
|
|
ASSERT_MSG(end_in_vma <= vma.size, "Mapping cannot fit inside free region");
|
|
|
|
|
|
|
|
if (end_in_vma != vma.size) {
|
|
|
|
// Split VMA at the end of the allocated region
|
|
|
|
Split(vma_handle, end_in_vma);
|
|
|
|
}
|
|
|
|
if (start_in_vma != 0) {
|
|
|
|
// Split VMA at the start of the allocated region
|
|
|
|
vma_handle = Split(vma_handle, start_in_vma);
|
|
|
|
}
|
|
|
|
|
|
|
|
return vma_handle->second;
|
|
|
|
}
|
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
DirectMemoryArea& MemoryManager::AddDmemAllocation(PAddr addr, size_t size) {
|
|
|
|
auto dmem_handle = FindDmemArea(addr);
|
|
|
|
ASSERT_MSG(dmem_handle != dmem_map.end(), "Physical address not in dmem_map");
|
|
|
|
|
|
|
|
const DirectMemoryArea& area = dmem_handle->second;
|
|
|
|
ASSERT_MSG(area.is_free && area.base <= addr,
|
|
|
|
"Adding an allocation to already allocated region");
|
|
|
|
|
|
|
|
const PAddr start_in_area = addr - area.base;
|
|
|
|
const PAddr end_in_vma = start_in_area + size;
|
|
|
|
ASSERT_MSG(end_in_vma <= area.size, "Mapping cannot fit inside free region");
|
|
|
|
|
|
|
|
if (end_in_vma != area.size) {
|
|
|
|
// Split VMA at the end of the allocated region
|
|
|
|
Split(dmem_handle, end_in_vma);
|
|
|
|
}
|
|
|
|
if (start_in_area != 0) {
|
|
|
|
// Split VMA at the start of the allocated region
|
|
|
|
dmem_handle = Split(dmem_handle, start_in_area);
|
|
|
|
}
|
|
|
|
|
|
|
|
return dmem_handle->second;
|
|
|
|
}
|
|
|
|
|
2024-05-30 17:07:36 +02:00
|
|
|
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) {
|
2024-05-16 14:55:50 +02:00
|
|
|
auto& old_vma = vma_handle->second;
|
|
|
|
ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0);
|
|
|
|
|
|
|
|
auto new_vma = old_vma;
|
|
|
|
old_vma.size = offset_in_vma;
|
|
|
|
new_vma.base += offset_in_vma;
|
|
|
|
new_vma.size -= offset_in_vma;
|
|
|
|
|
|
|
|
if (new_vma.type == VMAType::Direct) {
|
|
|
|
new_vma.phys_base += offset_in_vma;
|
|
|
|
}
|
|
|
|
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
|
|
|
}
|
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, size_t offset_in_area) {
|
|
|
|
auto& old_area = dmem_handle->second;
|
|
|
|
ASSERT(offset_in_area < old_area.size && offset_in_area > 0);
|
2024-05-16 14:55:50 +02:00
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
auto new_area = old_area;
|
|
|
|
old_area.size = offset_in_area;
|
|
|
|
new_area.base += offset_in_area;
|
|
|
|
new_area.size -= offset_in_area;
|
2024-05-16 14:55:50 +02:00
|
|
|
|
2024-06-10 01:13:44 +02:00
|
|
|
return dmem_map.emplace_hint(std::next(dmem_handle), new_area.base, new_area);
|
|
|
|
};
|
2024-05-16 14:55:50 +02:00
|
|
|
|
2024-05-25 14:33:15 +02:00
|
|
|
void MemoryManager::MapVulkanMemory(VAddr addr, size_t size) {
|
2024-05-30 17:07:36 +02:00
|
|
|
return;
|
2024-05-25 14:33:15 +02:00
|
|
|
const vk::Device device = instance->GetDevice();
|
|
|
|
const auto memory_props = instance->GetPhysicalDevice().getMemoryProperties();
|
|
|
|
void* host_pointer = reinterpret_cast<void*>(addr);
|
|
|
|
const auto host_mem_props = device.getMemoryHostPointerPropertiesEXT(
|
|
|
|
vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT, host_pointer);
|
|
|
|
ASSERT(host_mem_props.memoryTypeBits != 0);
|
|
|
|
|
|
|
|
int mapped_memory_type = -1;
|
|
|
|
auto find_mem_type_with_flag = [&](const vk::MemoryPropertyFlags flags) {
|
|
|
|
u32 host_mem_types = host_mem_props.memoryTypeBits;
|
|
|
|
while (host_mem_types != 0) {
|
|
|
|
// Try to find a cached memory type
|
|
|
|
mapped_memory_type = std::countr_zero(host_mem_types);
|
|
|
|
host_mem_types -= (1 << mapped_memory_type);
|
|
|
|
|
|
|
|
if ((memory_props.memoryTypes[mapped_memory_type].propertyFlags & flags) == flags) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mapped_memory_type = -1;
|
|
|
|
};
|
|
|
|
|
|
|
|
// First try to find a memory that is both coherent and cached
|
|
|
|
find_mem_type_with_flag(vk::MemoryPropertyFlagBits::eHostCoherent |
|
|
|
|
vk::MemoryPropertyFlagBits::eHostCached);
|
|
|
|
if (mapped_memory_type == -1)
|
|
|
|
// Then only coherent (lower performance)
|
|
|
|
find_mem_type_with_flag(vk::MemoryPropertyFlagBits::eHostCoherent);
|
|
|
|
|
|
|
|
if (mapped_memory_type == -1) {
|
|
|
|
LOG_CRITICAL(Render_Vulkan, "No coherent memory available for memory mapping");
|
|
|
|
mapped_memory_type = std::countr_zero(host_mem_props.memoryTypeBits);
|
|
|
|
}
|
|
|
|
|
|
|
|
const vk::StructureChain alloc_info = {
|
|
|
|
vk::MemoryAllocateInfo{
|
|
|
|
.allocationSize = size,
|
|
|
|
.memoryTypeIndex = static_cast<uint32_t>(mapped_memory_type),
|
|
|
|
},
|
|
|
|
vk::ImportMemoryHostPointerInfoEXT{
|
|
|
|
.handleType = vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT,
|
|
|
|
.pHostPointer = host_pointer,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto [it, new_memory] = mapped_memories.try_emplace(addr);
|
|
|
|
ASSERT_MSG(new_memory, "Attempting to remap already mapped vulkan memory");
|
|
|
|
|
|
|
|
auto& memory = it->second;
|
|
|
|
memory.backing = device.allocateMemoryUnique(alloc_info.get());
|
|
|
|
|
|
|
|
constexpr vk::BufferUsageFlags MapFlags =
|
|
|
|
vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eVertexBuffer |
|
|
|
|
vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
|
2024-05-26 14:51:35 +02:00
|
|
|
vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer;
|
2024-05-25 14:33:15 +02:00
|
|
|
|
|
|
|
const vk::StructureChain buffer_info = {
|
|
|
|
vk::BufferCreateInfo{
|
|
|
|
.size = size,
|
|
|
|
.usage = MapFlags,
|
|
|
|
.sharingMode = vk::SharingMode::eExclusive,
|
|
|
|
},
|
|
|
|
vk::ExternalMemoryBufferCreateInfoKHR{
|
|
|
|
.handleTypes = vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT,
|
|
|
|
}};
|
|
|
|
memory.buffer = device.createBufferUnique(buffer_info.get());
|
|
|
|
device.bindBufferMemory(*memory.buffer, *memory.backing, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::UnmapVulkanMemory(VAddr addr, size_t size) {
|
2024-05-30 17:07:36 +02:00
|
|
|
return;
|
2024-05-25 14:33:15 +02:00
|
|
|
const auto it = mapped_memories.find(addr);
|
|
|
|
ASSERT(it != mapped_memories.end() && it->second.buffer_size == size);
|
|
|
|
mapped_memories.erase(it);
|
|
|
|
}
|
|
|
|
|
2024-05-16 14:55:50 +02:00
|
|
|
} // namespace Core
|