diff --git a/src/common/config.cpp b/src/common/config.cpp index b7d99bbf..0d117c21 100644 --- a/src/common/config.cpp +++ b/src/common/config.cpp @@ -21,6 +21,8 @@ static bool isShowSplash = false; static bool isNullGpu = false; static bool shouldDumpShaders = false; static bool shouldDumpPM4 = false; +static bool vkValidation = false; +static bool vkValidationSync = false; bool isLleLibc() { return isLibc; @@ -69,6 +71,14 @@ bool dumpPM4() { return shouldDumpPM4; } +bool vkValidationEnabled() { + return vkValidation; +} + +bool vkValidationSyncEnabled() { + return vkValidationSync; +} + void load(const std::filesystem::path& path) { // If the configuration file does not exist, create it and return std::error_code error; @@ -110,6 +120,15 @@ void load(const std::filesystem::path& path) { shouldDumpPM4 = toml::find_or(gpu, "dumpPM4", false); } } + if (data.contains("Vulkan")) { + const auto vkResult = toml::expect(data.at("Vulkan")); + if (vkResult.is_ok()) { + auto vk = vkResult.unwrap(); + + vkValidation = toml::find_or(vk, "validation", true); + vkValidationSync = toml::find_or(vk, "validation_sync", true); + } + } if (data.contains("Debug")) { auto debugResult = toml::expect(data.at("Debug")); if (debugResult.is_ok()) { @@ -156,6 +175,8 @@ void save(const std::filesystem::path& path) { data["GPU"]["nullGpu"] = isNullGpu; data["GPU"]["dumpShaders"] = shouldDumpShaders; data["GPU"]["dumpPM4"] = shouldDumpPM4; + data["Vulkan"]["validation"] = vkValidation; + data["Vulkan"]["validation_sync"] = vkValidationSync; data["Debug"]["DebugDump"] = isDebugDump; data["LLE"]["libc"] = isLibc; diff --git a/src/common/config.h b/src/common/config.h index 53925379..8da3c6de 100644 --- a/src/common/config.h +++ b/src/common/config.h @@ -25,4 +25,7 @@ bool nullGpu(); bool dumpShaders(); bool dumpPM4(); +bool vkValidationEnabled(); +bool vkValidationSyncEnabled(); + }; // namespace Config diff --git a/src/video_core/amdgpu/resource.h b/src/video_core/amdgpu/resource.h index e9b7a553..58e54118 100644 --- a/src/video_core/amdgpu/resource.h +++ b/src/video_core/amdgpu/resource.h @@ -86,11 +86,27 @@ constexpr std::string_view NameOf(ImageType type) { } enum class TilingMode : u32 { + Depth_MicroTiled = 0x5u, Display_Linear = 0x8u, Display_MacroTiled = 0xAu, Texture_MicroTiled = 0xDu, }; +constexpr std::string_view NameOf(TilingMode type) { + switch (type) { + case TilingMode::Depth_MicroTiled: + return "Depth_MicroTiled"; + case TilingMode::Display_Linear: + return "Display_Linear"; + case TilingMode::Display_MacroTiled: + return "Display_MacroTiled"; + case TilingMode::Texture_MicroTiled: + return "Texture_MicroTiled"; + default: + return "Unknown"; + } +} + struct Image { union { BitField<0, 38, u64> base_address; diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 72ee6c9b..572316af 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -62,8 +62,8 @@ bool CanBlitToSwapchain(const vk::PhysicalDevice physical_device, vk::Format for } RendererVulkan::RendererVulkan(Frontend::WindowSDL& window_, AmdGpu::Liverpool* liverpool) - : window{window_}, instance{window, Config::getGpuId()}, scheduler{instance}, - swapchain{instance, window}, texture_cache{instance, scheduler} { + : window{window_}, instance{window, Config::getGpuId(), Config::vkValidationEnabled()}, + scheduler{instance}, swapchain{instance, window}, texture_cache{instance, scheduler} { rasterizer = std::make_unique(instance, scheduler, texture_cache, liverpool); const u32 num_images = swapchain.GetImageCount(); const vk::Device device = instance.GetDevice(); diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 66fee434..6e81a7c9 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp @@ -83,15 +83,6 @@ ComputePipeline::~ComputePipeline() = default; void ComputePipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& staging, VideoCore::TextureCache& texture_cache) const { - static constexpr u64 MinUniformAlignment = 64; - - const auto map_staging = [&](auto src, size_t size) { - const auto [data, offset, _] = staging.Map(size, MinUniformAlignment); - std::memcpy(data, reinterpret_cast(src), size); - staging.Commit(size); - return offset; - }; - // Bind resource buffers and textures. boost::container::static_vector buffer_infos; boost::container::static_vector image_infos; @@ -103,7 +94,8 @@ void ComputePipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& s const u32 size = vsharp.GetSize(); const VAddr addr = vsharp.base_address.Value(); texture_cache.OnCpuWrite(addr); - const u32 offset = map_staging(addr, size); + const u32 offset = + staging.Copy(addr, size, buffer.is_storage ? 4 : instance.UniformMinAlignment()); // const auto [vk_buffer, offset] = memory->GetVulkanBuffer(addr); buffer_infos.emplace_back(staging.Handle(), offset, size); set_writes.push_back({ diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 8c78a857..c92bf7fe 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include #include #include @@ -277,38 +278,7 @@ void GraphicsPipeline::BuildDescSetLayout() { void GraphicsPipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& staging, VideoCore::TextureCache& texture_cache) const { - static constexpr u64 MinUniformAlignment = 64; - - const auto map_staging = [&](auto src, size_t size) { - const auto [data, offset, _] = staging.Map(size, MinUniformAlignment); - std::memcpy(data, reinterpret_cast(src), size); - staging.Commit(size); - return offset; - }; - - std::array buffers; - std::array offsets; - VAddr base_address = 0; - u32 start_offset = 0; - - // Bind vertex buffer. - const auto& vs_info = stages[0]; - const size_t num_buffers = vs_info.vs_inputs.size(); - for (u32 i = 0; i < num_buffers; ++i) { - const auto& input = vs_info.vs_inputs[i]; - const auto buffer = vs_info.ReadUd(input.sgpr_base, input.dword_offset); - if (i == 0) { - start_offset = map_staging(buffer.base_address.Value(), buffer.GetSize()); - base_address = buffer.base_address; - } - buffers[i] = staging.Handle(); - offsets[i] = start_offset + buffer.base_address - base_address; - } - - const auto cmdbuf = scheduler.CommandBuffer(); - if (num_buffers > 0) { - cmdbuf.bindVertexBuffers(0, num_buffers, buffers.data(), offsets.data()); - } + BindVertexBuffers(staging); // Bind resource buffers and textures. boost::container::static_vector buffer_infos; @@ -320,7 +290,8 @@ void GraphicsPipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& for (const auto& buffer : stage.buffers) { const auto vsharp = stage.ReadUd(buffer.sgpr_base, buffer.dword_offset); const u32 size = vsharp.GetSize(); - const u32 offset = map_staging(vsharp.base_address.Value(), size); + const u32 offset = staging.Copy(vsharp.base_address.Value(), size, + buffer.is_storage ? 4 : instance.UniformMinAlignment()); buffer_infos.emplace_back(staging.Handle(), offset, size); set_writes.push_back({ .dstSet = VK_NULL_HANDLE, @@ -337,7 +308,7 @@ void GraphicsPipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& const auto tsharp = stage.ReadUd(image.sgpr_base, image.dword_offset); const auto& image_view = texture_cache.FindImageView(tsharp); image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view, - vk::ImageLayout::eGeneral); + vk::ImageLayout::eShaderReadOnlyOptimal); set_writes.push_back({ .dstSet = VK_NULL_HANDLE, .dstBinding = binding++, @@ -364,9 +335,76 @@ void GraphicsPipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& } if (!set_writes.empty()) { + const auto cmdbuf = scheduler.CommandBuffer(); cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eGraphics, *pipeline_layout, 0, set_writes); } } +void GraphicsPipeline::BindVertexBuffers(StreamBuffer& staging) const { + const auto& vs_info = stages[0]; + if (vs_info.vs_inputs.empty()) { + return; + } + + std::array host_buffers; + std::array host_offsets; + boost::container::static_vector guest_buffers; + + struct BufferRange { + VAddr base_address; + VAddr end_address; + u64 offset; // offset in the mapped memory + + size_t GetSize() const { + return end_address - base_address; + } + }; + + // Calculate buffers memory overlaps + std::vector ranges{}; + for (const auto& input : vs_info.vs_inputs) { + const auto& buffer = guest_buffers.emplace_back( + vs_info.ReadUd(input.sgpr_base, input.dword_offset)); + ranges.emplace_back(buffer.base_address.Value(), + buffer.base_address.Value() + buffer.GetSize()); + } + std::ranges::sort(ranges, [](const BufferRange& lhv, const BufferRange& rhv) { + return lhv.base_address < rhv.base_address; + }); + + boost::container::static_vector ranges_merged{ranges[0]}; + for (auto range : ranges) { + auto& prev_range = ranges.back(); + if (prev_range.end_address < range.base_address) { + ranges_merged.emplace_back(range); + } else { + ranges_merged.back().end_address = std::max(prev_range.end_address, range.end_address); + } + } + + // Map buffers + for (auto& range : ranges_merged) { + range.offset = staging.Copy(range.base_address, range.GetSize(), 4); + } + + // Bind vertex buffers + const size_t num_buffers = guest_buffers.size(); + for (u32 i = 0; i < num_buffers; ++i) { + const auto& buffer = guest_buffers[i]; + const auto& host_buffer = std::ranges::find_if( + ranges_merged.cbegin(), ranges_merged.cend(), + [&](const BufferRange& range) { return (buffer.base_address >= range.base_address); }); + assert(host_buffer != ranges_merged.cend()); + + host_buffers[i] = staging.Handle(); + host_offsets[i] = host_buffer->offset + buffer.base_address - host_buffer->base_address; + } + + if (num_buffers > 0) { + const auto cmdbuf = scheduler.CommandBuffer(); + cmdbuf.bindVertexBuffers(0, num_buffers, host_buffers.data(), host_offsets.data()); + } +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index 02c1fb5a..e93ea6f4 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h @@ -75,6 +75,7 @@ public: private: void BuildDescSetLayout(); + void BindVertexBuffers(StreamBuffer& staging) const; private: const Instance& instance; diff --git a/src/video_core/renderer_vulkan/vk_instance.cpp b/src/video_core/renderer_vulkan/vk_instance.cpp index dc10ec6c..355d2603 100644 --- a/src/video_core/renderer_vulkan/vk_instance.cpp +++ b/src/video_core/renderer_vulkan/vk_instance.cpp @@ -40,10 +40,13 @@ Instance::Instance(bool enable_validation, bool dump_command_buffers) dump_command_buffers)}, physical_devices{instance->enumeratePhysicalDevices()} {} -Instance::Instance(Frontend::WindowSDL& window, s32 physical_device_index) - : instance{CreateInstance(dl, window.getWindowInfo().type, true, false)}, - debug_callback{CreateDebugCallback(*instance)}, +Instance::Instance(Frontend::WindowSDL& window, s32 physical_device_index, + bool enable_validation /*= false*/) + : instance{CreateInstance(dl, window.getWindowInfo().type, enable_validation, false)}, physical_devices{instance->enumeratePhysicalDevices()} { + if (enable_validation) { + debug_callback = CreateDebugCallback(*instance); + } const std::size_t num_physical_devices = static_cast(physical_devices.size()); ASSERT_MSG(num_physical_devices > 0, "No physical devices found"); diff --git a/src/video_core/renderer_vulkan/vk_instance.h b/src/video_core/renderer_vulkan/vk_instance.h index 28af5405..797eb886 100644 --- a/src/video_core/renderer_vulkan/vk_instance.h +++ b/src/video_core/renderer_vulkan/vk_instance.h @@ -18,7 +18,8 @@ namespace Vulkan { class Instance { public: explicit Instance(bool validation = false, bool dump_command_buffers = false); - explicit Instance(Frontend::WindowSDL& window, s32 physical_device_index); + explicit Instance(Frontend::WindowSDL& window, s32 physical_device_index, + bool enable_validation = false); ~Instance(); /// Returns a formatted string for the driver version @@ -200,7 +201,7 @@ private: vk::PhysicalDeviceProperties properties; vk::PhysicalDeviceFeatures features; vk::DriverIdKHR driver_id; - vk::UniqueDebugUtilsMessengerEXT debug_callback; + vk::UniqueDebugUtilsMessengerEXT debug_callback{}; std::string vendor_name; VmaAllocator allocator{}; vk::Queue present_queue; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 3a14a02e..11cd5419 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -170,12 +170,7 @@ std::unique_ptr PipelineCache::CreateGraphicsPipeline() { // Set module name to hash in renderdoc const auto name = fmt::format("{}_{:#x}", stage, hash); - const vk::DebugUtilsObjectNameInfoEXT name_info = { - .objectType = vk::ObjectType::eShaderModule, - .objectHandle = std::bit_cast(stages[i]), - .pObjectName = name.c_str(), - }; - instance.GetDevice().setDebugUtilsObjectNameEXT(name_info); + Vulkan::SetObjectName(instance.GetDevice(), stages[i], name); if (Config::dumpShaders()) { DumpShader(spv_code, hash, stage, "spv"); diff --git a/src/video_core/renderer_vulkan/vk_platform.cpp b/src/video_core/renderer_vulkan/vk_platform.cpp index 5cc890f6..90b3f4a8 100644 --- a/src/video_core/renderer_vulkan/vk_platform.cpp +++ b/src/video_core/renderer_vulkan/vk_platform.cpp @@ -15,12 +15,16 @@ #include #include "common/assert.h" +#include "common/config.h" #include "common/logging/log.h" #include "sdl_window.h" #include "video_core/renderer_vulkan/vk_platform.h" namespace Vulkan { +static const char* const VALIDATION_LAYER_NAME = "VK_LAYER_KHRONOS_validation"; +static const char* const API_DUMP_LAYER_NAME = "VK_LAYER_LUNARG_api_dump"; + static VKAPI_ATTR VkBool32 VKAPI_CALL DebugUtilsCallback( VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT type, const VkDebugUtilsMessengerCallbackDataEXT* callback_data, void* user_data) { @@ -179,7 +183,7 @@ vk::UniqueInstance CreateInstance(vk::DynamicLoader& dl, Frontend::WindowSystemT VK_VERSION_MAJOR(available_version), VK_VERSION_MINOR(available_version))); } - const auto extensions = GetInstanceExtensions(window_type, enable_validation); + const auto extensions = GetInstanceExtensions(window_type, true); const vk::ApplicationInfo application_info = { .pApplicationName = "shadPS4", @@ -193,21 +197,37 @@ vk::UniqueInstance CreateInstance(vk::DynamicLoader& dl, Frontend::WindowSystemT std::array layers; if (enable_validation) { - layers[num_layers++] = "VK_LAYER_KHRONOS_validation"; + layers[num_layers++] = VALIDATION_LAYER_NAME; } if (dump_command_buffers) { - layers[num_layers++] = "VK_LAYER_LUNARG_api_dump"; + layers[num_layers++] = API_DUMP_LAYER_NAME; } - vk::InstanceCreateInfo instance_ci = { - .pApplicationInfo = &application_info, - .enabledLayerCount = num_layers, - .ppEnabledLayerNames = layers.data(), - .enabledExtensionCount = static_cast(extensions.size()), - .ppEnabledExtensionNames = extensions.data(), + vk::Bool32 enable_sync = + enable_validation && Config::vkValidationSyncEnabled() ? vk::True : vk::False; + vk::LayerSettingEXT layer_set = { + .pLayerName = VALIDATION_LAYER_NAME, + .pSettingName = "validate_sync", + .type = vk::LayerSettingTypeEXT::eBool32, + .valueCount = 1, + .pValues = &enable_sync, }; - auto instance = vk::createInstanceUnique(instance_ci); + vk::StructureChain instance_ci_chain = { + vk::InstanceCreateInfo{ + .pApplicationInfo = &application_info, + .enabledLayerCount = num_layers, + .ppEnabledLayerNames = layers.data(), + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data(), + }, + vk::LayerSettingsCreateInfoEXT{ + .settingCount = 1, + .pSettings = &layer_set, + }, + }; + + auto instance = vk::createInstanceUnique(instance_ci_chain.get()); VULKAN_HPP_DEFAULT_DISPATCHER.init(*instance); diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp index cad4daea..86a03a03 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp @@ -231,4 +231,12 @@ void StreamBuffer::WaitPendingOperations(u64 requested_upper_bound) { } } +u64 StreamBuffer::Copy(VAddr src, size_t size, size_t alignment /*= 0*/) { + static const u64 MinUniformAlignment = instance.UniformMinAlignment(); + const auto [data, offset, _] = Map(size, MinUniformAlignment); + std::memcpy(data, reinterpret_cast(src), size); + Commit(size); + return offset; +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h index 637f03d0..f7957ac0 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.h +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h @@ -40,6 +40,9 @@ public: /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. void Commit(u64 size); + /// Maps and commits a memory region with user provided data + u64 Copy(VAddr src, size_t size, size_t alignment = 0); + vk::Buffer Handle() const noexcept { return buffer; } diff --git a/src/video_core/texture_cache/image.cpp b/src/video_core/texture_cache/image.cpp index 7aa3062b..9bf3ec0a 100644 --- a/src/video_core/texture_cache/image.cpp +++ b/src/video_core/texture_cache/image.cpp @@ -88,6 +88,7 @@ ImageInfo::ImageInfo(const Libraries::VideoOut::BufferAttributeGroup& group) noe ImageInfo::ImageInfo(const AmdGpu::Liverpool::ColorBuffer& buffer, const AmdGpu::Liverpool::CbDbExtent& hint /*= {}*/) noexcept { is_tiled = buffer.IsTiled(); + tiling_mode = buffer.GetTilingMode(); pixel_format = LiverpoolToVK::SurfaceFormat(buffer.info.format, buffer.NumFormat()); type = vk::ImageType::e2D; size.width = hint.Valid() ? hint.width : buffer.Pitch(); @@ -186,7 +187,6 @@ Image::Image(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_, if (info.is_tiled) { ImageViewInfo view_info; view_info.format = DemoteImageFormatForDetiling(info.pixel_format); - view_info.used_for_detiling = true; view_for_detiler.emplace(*instance, view_info, image); } @@ -214,10 +214,11 @@ void Image::Transit(vk::ImageLayout dst_layout, vk::Flags ds }}; // Adjust pipieline stage - vk::PipelineStageFlagBits dst_pl_stage = (dst_mask == vk::AccessFlagBits::eTransferRead || - dst_mask == vk::AccessFlagBits::eTransferWrite) - ? vk::PipelineStageFlagBits::eTransfer - : vk::PipelineStageFlagBits::eAllGraphics; + vk::PipelineStageFlags dst_pl_stage = + (dst_mask == vk::AccessFlagBits::eTransferRead || + dst_mask == vk::AccessFlagBits::eTransferWrite) + ? vk::PipelineStageFlagBits::eTransfer + : vk::PipelineStageFlagBits::eAllGraphics | vk::PipelineStageFlagBits::eComputeShader; const auto cmdbuf = scheduler->CommandBuffer(); cmdbuf.pipelineBarrier(pl_stage, dst_pl_stage, vk::DependencyFlagBits::eByRegion, {}, {}, barrier); diff --git a/src/video_core/texture_cache/image_view.h b/src/video_core/texture_cache/image_view.h index aa4ec8ee..ae1f9ba0 100644 --- a/src/video_core/texture_cache/image_view.h +++ b/src/video_core/texture_cache/image_view.h @@ -24,7 +24,6 @@ struct ImageViewInfo { vk::Format format = vk::Format::eR8G8B8A8Unorm; SubresourceRange range; vk::ComponentMapping mapping{}; - bool used_for_detiling = false; auto operator<=>(const ImageViewInfo&) const = default; }; diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp index 39f89878..5c371112 100644 --- a/src/video_core/texture_cache/texture_cache.cpp +++ b/src/video_core/texture_cache/texture_cache.cpp @@ -146,10 +146,10 @@ ImageView& TextureCache::RegisterImageView(Image& image, const ImageViewInfo& vi } // All tiled images are created with storage usage flag. This makes set of formats (e.g. sRGB) - // impossible to use. However, during view creation, if an image isn't used as storage and not a - // target for the detiler, we can temporary remove its storage bit. + // impossible to use. However, during view creation, if an image isn't used as storage we can + // temporary remove its storage bit. std::optional usage_override; - if (!image.info.is_storage && !view_info.used_for_detiling) { + if (!image.info.is_storage) { usage_override = image.info.usage & ~vk::ImageUsageFlagBits::eStorage; } @@ -163,6 +163,12 @@ ImageView& TextureCache::RegisterImageView(Image& image, const ImageViewInfo& vi ImageView& TextureCache::FindImageView(const AmdGpu::Image& desc) { Image& image = FindImage(ImageInfo{desc}, desc.Address()); + if (image.info.is_storage) { + image.Transit(vk::ImageLayout::eGeneral, vk::AccessFlagBits::eShaderWrite); + } else { + image.Transit(vk::ImageLayout::eShaderReadOnlyOptimal, vk::AccessFlagBits::eShaderRead); + } + const ImageViewInfo view_info{desc}; return RegisterImageView(image, view_info); } @@ -172,6 +178,10 @@ ImageView& TextureCache::RenderTarget(const AmdGpu::Liverpool::ColorBuffer& buff const ImageInfo info{buffer, hint}; auto& image = FindImage(info, buffer.Address()); + image.Transit(vk::ImageLayout::eColorAttachmentOptimal, + vk::AccessFlagBits::eColorAttachmentWrite | + vk::AccessFlagBits::eColorAttachmentRead); + ImageViewInfo view_info; view_info.format = info.pixel_format; return RegisterImageView(image, view_info); @@ -184,12 +194,7 @@ void TextureCache::RefreshImage(Image& image) { { if (!tile_manager.TryDetile(image)) { // Upload data to the staging buffer. - const auto& [data, offset, _] = staging.Map(image.info.guest_size_bytes, 4); - const u8* image_data = reinterpret_cast(image.cpu_addr); - std::memcpy(data, image_data, image.info.guest_size_bytes); - staging.Commit(image.info.guest_size_bytes); - - const auto cmdbuf = scheduler.CommandBuffer(); + const auto offset = staging.Copy(image.cpu_addr, image.info.guest_size_bytes, 4); image.Transit(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits::eTransferWrite); // Copy to the image. @@ -207,6 +212,7 @@ void TextureCache::RefreshImage(Image& image) { .imageExtent = {image.info.size.width, image.info.size.height, 1}, }; + const auto cmdbuf = scheduler.CommandBuffer(); cmdbuf.copyBufferToImage(staging.Handle(), image.image, vk::ImageLayout::eTransferDstOptimal, image_copy); } diff --git a/src/video_core/texture_cache/tile_manager.cpp b/src/video_core/texture_cache/tile_manager.cpp index 54cbc5da..3fff9c11 100644 --- a/src/video_core/texture_cache/tile_manager.cpp +++ b/src/video_core/texture_cache/tile_manager.cpp @@ -222,12 +222,7 @@ TileManager::TileManager(const Vulkan::Instance& instance, Vulkan::Scheduler& sc // Set module debug name auto module_name = magic_enum::enum_name(static_cast(pl_id)); - const vk::DebugUtilsObjectNameInfoEXT name_info = { - .objectType = vk::ObjectType::eShaderModule, - .objectHandle = std::bit_cast(module), - .pObjectName = module_name.data(), - }; - instance.GetDevice().setDebugUtilsObjectNameEXT(name_info); + Vulkan::SetObjectName(instance.GetDevice(), module, module_name); const vk::PipelineShaderStageCreateInfo shader_ci = { .stage = vk::ShaderStageFlagBits::eCompute, @@ -299,21 +294,17 @@ bool TileManager::TryDetile(Image& image) { const auto* detiler = GetDetiler(image); if (!detiler) { - LOG_ERROR(Render_Vulkan, "Unsupported tiled image: {} {}", - vk::to_string(image.info.pixel_format), static_cast(image.info.tiling_mode)); + LOG_ERROR(Render_Vulkan, "Unsupported tiled image: {} ({})", + vk::to_string(image.info.pixel_format), NameOf(image.info.tiling_mode)); return false; } - const auto& [data, offset, _] = staging.Map(image.info.guest_size_bytes, 4); - const u8* image_data = reinterpret_cast(image.cpu_addr); - std::memcpy(data, image_data, image.info.guest_size_bytes); - staging.Commit(image.info.guest_size_bytes); + const auto offset = staging.Copy(image.cpu_addr, image.info.guest_size_bytes, 4); + image.Transit(vk::ImageLayout::eGeneral, vk::AccessFlagBits::eShaderWrite); auto cmdbuf = scheduler.CommandBuffer(); cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, *detiler->pl); - image.Transit(vk::ImageLayout::eGeneral, vk::AccessFlagBits::eShaderWrite); - const vk::DescriptorBufferInfo input_buffer_info{ .buffer = staging.Handle(), .offset = offset,