renderer_vulkan: fix for vertex buffer mapping offset

This commit is contained in:
psucien 2024-06-06 21:41:59 +02:00
parent 3faeba8f0c
commit 6814d5f108
1 changed files with 63 additions and 21 deletions

View File

@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <boost/container/small_vector.hpp>
#include <boost/container/static_vector.hpp>
@ -286,28 +287,69 @@ void GraphicsPipeline::BindResources(Core::MemoryManager* memory, StreamBuffer&
return offset;
};
std::array<vk::Buffer, MaxVertexBufferCount> buffers;
std::array<vk::DeviceSize, MaxVertexBufferCount> offsets;
VAddr base_address = 0;
u32 start_offset = 0;
// Bind vertex buffer.
const auto& vs_info = stages[0];
const size_t num_buffers = vs_info.vs_inputs.size();
for (u32 i = 0; i < num_buffers; ++i) {
const auto& input = vs_info.vs_inputs[i];
const auto buffer = vs_info.ReadUd<AmdGpu::Buffer>(input.sgpr_base, input.dword_offset);
if (i == 0) {
start_offset = map_staging(buffer.base_address.Value(), buffer.GetSize());
base_address = buffer.base_address;
}
buffers[i] = staging.Handle();
offsets[i] = start_offset + buffer.base_address - base_address;
}
const auto cmdbuf = scheduler.CommandBuffer();
const auto& vs_info = stages[0];
if (!vs_info.vs_inputs.empty()) {
std::array<vk::Buffer, MaxVertexBufferCount> host_buffers;
std::array<vk::DeviceSize, MaxVertexBufferCount> host_offsets;
boost::container::static_vector<AmdGpu::Buffer, MaxVertexBufferCount> guest_buffers;
struct BufferRange {
VAddr base_address;
VAddr end_address;
u64 offset; // offset in the mapped memory
size_t GetSize() const {
return end_address - base_address;
}
};
// Calculate buffers memory overlaps
std::vector<BufferRange> ranges{};
for (const auto& input : vs_info.vs_inputs) {
const auto& buffer = guest_buffers.emplace_back(
vs_info.ReadUd<AmdGpu::Buffer>(input.sgpr_base, input.dword_offset));
ranges.emplace_back(buffer.base_address.Value(),
buffer.base_address.Value() + buffer.GetSize());
}
std::ranges::sort(ranges, [](const BufferRange& lhv, const BufferRange& rhv) {
return lhv.base_address < rhv.base_address;
});
boost::container::static_vector<BufferRange, MaxVertexBufferCount> ranges_merged{ranges[0]};
for (auto range : ranges) {
auto& prev_range = ranges.back();
if (prev_range.end_address < range.base_address) {
ranges_merged.emplace_back(range);
} else {
ranges_merged.back().end_address =
std::max(prev_range.end_address, range.end_address);
}
}
// Map buffers
for (auto& range : ranges_merged) {
range.offset = map_staging(range.base_address, range.GetSize());
}
// Bind vertex buffers
const size_t num_buffers = guest_buffers.size();
for (u32 i = 0; i < num_buffers; ++i) {
const auto& buffer = guest_buffers[i];
const auto& host_buffer = std::ranges::find_if(
ranges_merged.cbegin(), ranges_merged.cend(), [&](const BufferRange& range) {
return (buffer.base_address >= range.base_address);
});
assert(host_buffer != ranges_merged.cend());
host_buffers[i] = staging.Handle();
host_offsets[i] = host_buffer->offset + buffer.base_address - host_buffer->base_address;
}
if (num_buffers > 0) {
cmdbuf.bindVertexBuffers(0, num_buffers, buffers.data(), offsets.data());
cmdbuf.bindVertexBuffers(0, num_buffers, host_buffers.data(), host_offsets.data());
}
}
// Bind resource buffers and textures.