// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later #include "common/alignment.h" #include "common/assert.h" #include "common/debug.h" #include "core/libraries/error_codes.h" #include "core/libraries/kernel/memory_management.h" #include "core/memory.h" #include "video_core/renderer_vulkan/vk_rasterizer.h" namespace Core { MemoryManager::MemoryManager() { // Insert an area that covers direct memory physical block. dmem_map.emplace(0, DirectMemoryArea{0, SCE_KERNEL_MAIN_DMEM_SIZE}); // Insert a virtual memory area that covers the entire area we manage. const VAddr system_managed_base = impl.SystemManagedVirtualBase(); const size_t system_managed_size = impl.SystemManagedVirtualSize(); const VAddr system_reserved_base = impl.SystemReservedVirtualBase(); const size_t system_reserved_size = impl.SystemReservedVirtualSize(); const VAddr user_base = impl.UserVirtualBase(); const size_t user_size = impl.UserVirtualSize(); vma_map.emplace(system_managed_base, VirtualMemoryArea{system_managed_base, system_managed_size}); vma_map.emplace(system_reserved_base, VirtualMemoryArea{system_reserved_base, system_reserved_size}); vma_map.emplace(user_base, VirtualMemoryArea{user_base, user_size}); // Log initialization. LOG_INFO(Kernel_Vmm, "Usable memory address space: {}_GB", (system_managed_size + system_reserved_size + user_size) >> 30); } MemoryManager::~MemoryManager() = default; PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment, int memory_type) { std::scoped_lock lk{mutex}; auto dmem_area = FindDmemArea(search_start); const auto is_suitable = [&] { return dmem_area->second.is_free && dmem_area->second.size >= size; }; while (!is_suitable() && dmem_area->second.GetEnd() <= search_end) { dmem_area++; } ASSERT_MSG(is_suitable(), "Unable to find free direct memory area"); // Align free position PAddr free_addr = dmem_area->second.base; free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr; // Add the allocated region to the list and commit its pages. auto& area = CarveDmemArea(free_addr, size)->second; area.memory_type = memory_type; area.is_free = false; return free_addr; } void MemoryManager::Free(PAddr phys_addr, size_t size) { std::scoped_lock lk{mutex}; auto dmem_area = CarveDmemArea(phys_addr, size); ASSERT(dmem_area != dmem_map.end() && dmem_area->second.size >= size); // Release any dmem mappings that reference this physical block. std::vector> remove_list; for (const auto& [addr, mapping] : vma_map) { if (mapping.type != VMAType::Direct) { continue; } if (mapping.phys_base <= phys_addr && phys_addr < mapping.phys_base + mapping.size) { auto vma_segment_start_addr = phys_addr - mapping.phys_base + addr; LOG_INFO(Kernel_Vmm, "Unmaping direct mapping {:#x} with size {:#x}", vma_segment_start_addr, size); // Unmaping might erase from vma_map. We can't do it here. remove_list.emplace_back(vma_segment_start_addr, size); } } for (const auto& [addr, size] : remove_list) { UnmapMemory(addr, size); } // Mark region as free and attempt to coalesce it with neighbours. auto& area = dmem_area->second; area.is_free = true; area.memory_type = 0; MergeAdjacent(dmem_map, dmem_area); } int MemoryManager::Reserve(void** out_addr, VAddr virtual_addr, size_t size, MemoryMapFlags flags, u64 alignment) { std::scoped_lock lk{mutex}; virtual_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; alignment = alignment > 0 ? alignment : 16_KB; VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr; // Fixed mapping means the virtual address must exactly match the provided one. if (True(flags & MemoryMapFlags::Fixed)) { const auto& vma = FindVMA(mapped_addr)->second; // If the VMA is mapped, unmap the region first. if (vma.IsMapped()) { UnmapMemory(mapped_addr, size); } const size_t remaining_size = vma.base + vma.size - mapped_addr; ASSERT_MSG(vma.type == VMAType::Free && remaining_size >= size); } // Find the first free area starting with provided virtual address. if (False(flags & MemoryMapFlags::Fixed)) { mapped_addr = SearchFree(mapped_addr, size, alignment); } // Add virtual memory area const auto new_vma_handle = CarveVMA(mapped_addr, size); auto& new_vma = new_vma_handle->second; new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce); new_vma.prot = MemoryProt::NoAccess; new_vma.name = ""; new_vma.type = VMAType::Reserved; MergeAdjacent(vma_map, new_vma_handle); *out_addr = std::bit_cast(mapped_addr); return ORBIS_OK; } int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot, MemoryMapFlags flags, VMAType type, std::string_view name, bool is_exec, PAddr phys_addr, u64 alignment) { std::scoped_lock lk{mutex}; // Certain games perform flexible mappings on loop to determine // the available flexible memory size. Questionable but we need to handle this. if (type == VMAType::Flexible && flexible_usage + size > total_flexible_size) { return SCE_KERNEL_ERROR_ENOMEM; } // When virtual addr is zero, force it to virtual_base. The guest cannot pass Fixed // flag so we will take the branch that searches for free (or reserved) mappings. virtual_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; alignment = alignment > 0 ? alignment : 16_KB; VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr; // Fixed mapping means the virtual address must exactly match the provided one. if (True(flags & MemoryMapFlags::Fixed)) { // This should return SCE_KERNEL_ERROR_ENOMEM but shouldn't normally happen. const auto& vma = FindVMA(mapped_addr)->second; const size_t remaining_size = vma.base + vma.size - mapped_addr; ASSERT_MSG(!vma.IsMapped() && remaining_size >= size); } // Find the first free area starting with provided virtual address. if (False(flags & MemoryMapFlags::Fixed)) { mapped_addr = SearchFree(mapped_addr, size, alignment); } // Perform the mapping. *out_addr = impl.Map(mapped_addr, size, alignment, phys_addr, is_exec); TRACK_ALLOC(*out_addr, size, "VMEM"); auto& new_vma = CarveVMA(mapped_addr, size)->second; new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce); new_vma.prot = prot; new_vma.name = name; new_vma.type = type; new_vma.is_exec = is_exec; if (type == VMAType::Direct) { new_vma.phys_base = phys_addr; rasterizer->MapMemory(mapped_addr, size); } if (type == VMAType::Flexible) { flexible_usage += size; } return ORBIS_OK; } int MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot, MemoryMapFlags flags, uintptr_t fd, size_t offset) { VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; const size_t size_aligned = Common::AlignUp(size, 16_KB); // Find first free area to map the file. if (False(flags & MemoryMapFlags::Fixed)) { mapped_addr = SearchFree(mapped_addr, size_aligned, 1); } if (True(flags & MemoryMapFlags::Fixed)) { const auto& vma = FindVMA(virtual_addr)->second; const size_t remaining_size = vma.base + vma.size - virtual_addr; ASSERT_MSG(!vma.IsMapped() && remaining_size >= size); } // Map the file. impl.MapFile(mapped_addr, size, offset, std::bit_cast(prot), fd); // Add virtual memory area auto& new_vma = CarveVMA(mapped_addr, size_aligned)->second; new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce); new_vma.prot = prot; new_vma.name = "File"; new_vma.fd = fd; new_vma.type = VMAType::File; *out_addr = std::bit_cast(mapped_addr); return ORBIS_OK; } void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) { std::scoped_lock lk{mutex}; const auto it = FindVMA(virtual_addr); const auto& vma_base = it->second; ASSERT_MSG(vma_base.Contains(virtual_addr, size), "Existing mapping does not contain requested unmap range"); const auto vma_base_addr = vma_base.base; const auto vma_base_size = vma_base.size; const auto phys_base = vma_base.phys_base; const bool is_exec = vma_base.is_exec; const auto start_in_vma = virtual_addr - vma_base_addr; const auto type = vma_base.type; const bool has_backing = type == VMAType::Direct || type == VMAType::File; if (type == VMAType::Direct) { rasterizer->UnmapMemory(virtual_addr, size); } if (type == VMAType::Flexible) { flexible_usage -= size; } // Mark region as free and attempt to coalesce it with neighbours. const auto new_it = CarveVMA(virtual_addr, size); auto& vma = new_it->second; vma.type = VMAType::Free; vma.prot = MemoryProt::NoAccess; vma.phys_base = 0; vma.disallow_merge = false; vma.name = ""; MergeAdjacent(vma_map, new_it); // Unmap the memory region. impl.Unmap(vma_base_addr, vma_base_size, start_in_vma, start_in_vma + size, phys_base, is_exec, has_backing); TRACK_FREE(virtual_addr, "VMEM"); } int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) { std::scoped_lock lk{mutex}; const auto it = FindVMA(addr); const auto& vma = it->second; ASSERT_MSG(vma.type != VMAType::Free, "Provided address is not mapped"); if (start != nullptr) { *start = reinterpret_cast(vma.base); } if (end != nullptr) { *end = reinterpret_cast(vma.base + vma.size); } if (prot != nullptr) { *prot = static_cast(vma.prot); } return ORBIS_OK; } int MemoryManager::VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info) { std::scoped_lock lk{mutex}; auto it = FindVMA(addr); if (it->second.type == VMAType::Free && flags == 1) { it++; } if (it->second.type == VMAType::Free) { LOG_WARNING(Kernel_Vmm, "VirtualQuery on free memory region"); return ORBIS_KERNEL_ERROR_EACCES; } const auto& vma = it->second; info->start = vma.base; info->end = vma.base + vma.size; info->is_flexible.Assign(vma.type == VMAType::Flexible); info->is_direct.Assign(vma.type == VMAType::Direct); info->is_commited.Assign(vma.type != VMAType::Free); vma.name.copy(info->name.data(), std::min(info->name.size(), vma.name.size())); if (vma.type == VMAType::Direct) { const auto dmem_it = FindDmemArea(vma.phys_base); ASSERT(dmem_it != dmem_map.end()); info->offset = vma.phys_base; info->memory_type = dmem_it->second.memory_type; } return ORBIS_OK; } int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next, ::Libraries::Kernel::OrbisQueryInfo* out_info) { std::scoped_lock lk{mutex}; auto dmem_area = FindDmemArea(addr); while (dmem_area != dmem_map.end() && dmem_area->second.is_free && find_next) { dmem_area++; } if (dmem_area == dmem_map.end() || dmem_area->second.is_free) { LOG_ERROR(Core, "Unable to find allocated direct memory region to query!"); return ORBIS_KERNEL_ERROR_EACCES; } const auto& area = dmem_area->second; out_info->start = area.base; out_info->end = area.GetEnd(); out_info->memoryType = area.memory_type; return ORBIS_OK; } int MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment, PAddr* phys_addr_out, size_t* size_out) { std::scoped_lock lk{mutex}; auto dmem_area = FindDmemArea(search_start); PAddr paddr{}; size_t max_size{}; while (dmem_area != dmem_map.end() && dmem_area->second.GetEnd() <= search_end) { if (dmem_area->second.size > max_size) { paddr = dmem_area->second.base; max_size = dmem_area->second.size; } dmem_area++; } *phys_addr_out = alignment > 0 ? Common::AlignUp(paddr, alignment) : paddr; *size_out = max_size; return ORBIS_OK; } void MemoryManager::NameVirtualRange(VAddr virtual_addr, size_t size, std::string_view name) { auto it = FindVMA(virtual_addr); ASSERT_MSG(it->second.Contains(virtual_addr, size), "Range provided is not fully containted in vma"); it->second.name = name; } VAddr MemoryManager::SearchFree(VAddr virtual_addr, size_t size, u32 alignment) { // If the requested address is below the mapped range, start search from the lowest address auto min_search_address = impl.SystemManagedVirtualBase(); if (virtual_addr < min_search_address) { virtual_addr = min_search_address; } auto it = FindVMA(virtual_addr); ASSERT_MSG(it != vma_map.end(), "Specified mapping address was not found!"); // If the VMA is free and contains the requested mapping we are done. if (it->second.IsFree() && it->second.Contains(virtual_addr, size)) { return virtual_addr; } // Search for the first free VMA that fits our mapping. const auto is_suitable = [&] { if (!it->second.IsFree()) { return false; } const auto& vma = it->second; virtual_addr = Common::AlignUp(vma.base, alignment); // Sometimes the alignment itself might be larger than the VMA. if (virtual_addr > vma.base + vma.size) { return false; } const size_t remaining_size = vma.base + vma.size - virtual_addr; return remaining_size >= size; }; while (!is_suitable()) { it++; } return virtual_addr; } MemoryManager::VMAHandle MemoryManager::CarveVMA(VAddr virtual_addr, size_t size) { auto vma_handle = FindVMA(virtual_addr); ASSERT_MSG(vma_handle != vma_map.end(), "Virtual address not in vm_map"); const VirtualMemoryArea& vma = vma_handle->second; ASSERT_MSG(vma.base <= virtual_addr, "Adding a mapping to already mapped region"); const VAddr start_in_vma = virtual_addr - vma.base; const VAddr end_in_vma = start_in_vma + size; ASSERT_MSG(end_in_vma <= vma.size, "Mapping cannot fit inside free region"); if (end_in_vma != vma.size) { // Split VMA at the end of the allocated region Split(vma_handle, end_in_vma); } if (start_in_vma != 0) { // Split VMA at the start of the allocated region vma_handle = Split(vma_handle, start_in_vma); } return vma_handle; } MemoryManager::DMemHandle MemoryManager::CarveDmemArea(PAddr addr, size_t size) { auto dmem_handle = FindDmemArea(addr); ASSERT_MSG(dmem_handle != dmem_map.end(), "Physical address not in dmem_map"); const DirectMemoryArea& area = dmem_handle->second; ASSERT_MSG(area.base <= addr, "Adding an allocation to already allocated region"); const PAddr start_in_area = addr - area.base; const PAddr end_in_vma = start_in_area + size; ASSERT_MSG(end_in_vma <= area.size, "Mapping cannot fit inside free region"); if (end_in_vma != area.size) { // Split VMA at the end of the allocated region Split(dmem_handle, end_in_vma); } if (start_in_area != 0) { // Split VMA at the start of the allocated region dmem_handle = Split(dmem_handle, start_in_area); } return dmem_handle; } MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) { auto& old_vma = vma_handle->second; ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0); auto new_vma = old_vma; old_vma.size = offset_in_vma; new_vma.base += offset_in_vma; new_vma.size -= offset_in_vma; if (new_vma.type == VMAType::Direct) { new_vma.phys_base += offset_in_vma; } return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); } MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, size_t offset_in_area) { auto& old_area = dmem_handle->second; ASSERT(offset_in_area < old_area.size && offset_in_area > 0); auto new_area = old_area; old_area.size = offset_in_area; new_area.base += offset_in_area; new_area.size -= offset_in_area; return dmem_map.emplace_hint(std::next(dmem_handle), new_area.base, new_area); }; int MemoryManager::GetDirectMemoryType(PAddr addr, int* directMemoryTypeOut, void** directMemoryStartOut, void** directMemoryEndOut) { std::scoped_lock lk{mutex}; auto dmem_area = FindDmemArea(addr); if (dmem_area == dmem_map.end() || dmem_area->second.is_free) { LOG_ERROR(Core, "Unable to find allocated direct memory region to check type!"); return ORBIS_KERNEL_ERROR_ENOENT; } const auto& area = dmem_area->second; *directMemoryStartOut = reinterpret_cast(area.base); *directMemoryEndOut = reinterpret_cast(area.GetEnd()); *directMemoryTypeOut = area.memory_type; return ORBIS_OK; } } // namespace Core