Compare commits

...

3 Commits

Author SHA1 Message Date
IndecisiveTurtle 0dc05cfff2 vk_pipeline_cache: Remove some unnecessary checks 2024-08-20 23:58:11 +03:00
IndecisiveTurtle 3789dbe8e5 renderer_vulkan: Reduce number of compiled shaders 2024-08-19 19:52:50 +03:00
IndecisiveTurtle 68c465813c cfg: Add one more divergence case
* Seen in RDR shaders
2024-08-18 00:07:24 +03:00
10 changed files with 125 additions and 86 deletions

View File

@ -143,6 +143,7 @@ int PS4_SYSV_ABI sceKernelGettimeofday(OrbisKernelTimeval* tp) {
return ORBIS_KERNEL_ERROR_EFAULT; return ORBIS_KERNEL_ERROR_EFAULT;
} }
#ifdef _WIN64
auto now = std::chrono::system_clock::now(); auto now = std::chrono::system_clock::now();
auto duration = now.time_since_epoch(); auto duration = now.time_since_epoch();
auto seconds = std::chrono::duration_cast<std::chrono::seconds>(duration); auto seconds = std::chrono::duration_cast<std::chrono::seconds>(duration);
@ -150,6 +151,12 @@ int PS4_SYSV_ABI sceKernelGettimeofday(OrbisKernelTimeval* tp) {
tp->tv_sec = seconds.count(); tp->tv_sec = seconds.count();
tp->tv_usec = microsecs.count(); tp->tv_usec = microsecs.count();
#else
timeval tv;
gettimeofday(&tv, nullptr);
tp->tv_sec = tv.tv_sec;
tp->tv_usec = tv.tv_usec;
#endif
return ORBIS_OK; return ORBIS_OK;
} }

View File

@ -37,6 +37,7 @@ static IR::Condition MakeCondition(Opcode opcode) {
return IR::Condition::Execnz; return IR::Condition::Execnz;
case Opcode::S_AND_SAVEEXEC_B64: case Opcode::S_AND_SAVEEXEC_B64:
case Opcode::S_ANDN2_B64: case Opcode::S_ANDN2_B64:
case Opcode::V_CMPX_NE_U32:
return IR::Condition::Execnz; return IR::Condition::Execnz;
default: default:
return IR::Condition::True; return IR::Condition::True;
@ -93,7 +94,7 @@ void CFG::EmitDivergenceLabels() {
// While this instruction does not save EXEC it is often used paired // While this instruction does not save EXEC it is often used paired
// with SAVEEXEC to mask the threads that didn't pass the condition // with SAVEEXEC to mask the threads that didn't pass the condition
// of initial branch. // of initial branch.
inst.opcode == Opcode::S_ANDN2_B64; inst.opcode == Opcode::S_ANDN2_B64 || inst.opcode == Opcode::V_CMPX_NE_U32;
}; };
const auto is_close_scope = [](const GcnInst& inst) { const auto is_close_scope = [](const GcnInst& inst) {
// Closing an EXEC scope can be either a branch instruction // Closing an EXEC scope can be either a branch instruction
@ -187,7 +188,7 @@ void CFG::LinkBlocks() {
const auto end_inst{block.end_inst}; const auto end_inst{block.end_inst};
// Handle divergence block inserted here. // Handle divergence block inserted here.
if (end_inst.opcode == Opcode::S_AND_SAVEEXEC_B64 || if (end_inst.opcode == Opcode::S_AND_SAVEEXEC_B64 ||
end_inst.opcode == Opcode::S_ANDN2_B64) { end_inst.opcode == Opcode::S_ANDN2_B64 || end_inst.opcode == Opcode::V_CMPX_NE_U32) {
// Blocks are stored ordered by address in the set // Blocks are stored ordered by address in the set
auto next_it = std::next(it); auto next_it = std::next(it);
auto* target_block = &(*next_it); auto* target_block = &(*next_it);

View File

@ -12,18 +12,19 @@
namespace Vulkan { namespace Vulkan {
ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler_, ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler_,
vk::PipelineCache pipeline_cache, const Shader::Info* info_, vk::PipelineCache pipeline_cache, u64 compute_key_,
u64 compute_key_, vk::ShaderModule module) const Program* program)
: instance{instance_}, scheduler{scheduler_}, compute_key{compute_key_}, info{*info_} { : instance{instance_}, scheduler{scheduler_}, compute_key{compute_key_},
info{&program->pgm.info} {
const vk::PipelineShaderStageCreateInfo shader_ci = { const vk::PipelineShaderStageCreateInfo shader_ci = {
.stage = vk::ShaderStageFlagBits::eCompute, .stage = vk::ShaderStageFlagBits::eCompute,
.module = module, .module = program->module,
.pName = "main", .pName = "main",
}; };
u32 binding{}; u32 binding{};
boost::container::small_vector<vk::DescriptorSetLayoutBinding, 32> bindings; boost::container::small_vector<vk::DescriptorSetLayoutBinding, 32> bindings;
for (const auto& buffer : info.buffers) { for (const auto& buffer : info->buffers) {
bindings.push_back({ bindings.push_back({
.binding = binding++, .binding = binding++,
.descriptorType = buffer.is_storage ? vk::DescriptorType::eStorageBuffer .descriptorType = buffer.is_storage ? vk::DescriptorType::eStorageBuffer
@ -32,7 +33,7 @@ ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler
.stageFlags = vk::ShaderStageFlagBits::eCompute, .stageFlags = vk::ShaderStageFlagBits::eCompute,
}); });
} }
for (const auto& image : info.images) { for (const auto& image : info->images) {
bindings.push_back({ bindings.push_back({
.binding = binding++, .binding = binding++,
.descriptorType = image.is_storage ? vk::DescriptorType::eStorageImage .descriptorType = image.is_storage ? vk::DescriptorType::eStorageImage
@ -41,7 +42,7 @@ ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler
.stageFlags = vk::ShaderStageFlagBits::eCompute, .stageFlags = vk::ShaderStageFlagBits::eCompute,
}); });
} }
for (const auto& sampler : info.samplers) { for (const auto& sampler : info->samplers) {
bindings.push_back({ bindings.push_back({
.binding = binding++, .binding = binding++,
.descriptorType = vk::DescriptorType::eSampler, .descriptorType = vk::DescriptorType::eSampler,
@ -96,8 +97,8 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
Shader::PushData push_data{}; Shader::PushData push_data{};
u32 binding{}; u32 binding{};
for (const auto& buffer : info.buffers) { for (const auto& buffer : info->buffers) {
const auto vsharp = buffer.GetVsharp(info); const auto vsharp = buffer.GetVsharp(*info);
const VAddr address = vsharp.base_address; const VAddr address = vsharp.base_address;
// Most of the time when a metadata is updated with a shader it gets cleared. It means we // Most of the time when a metadata is updated with a shader it gets cleared. It means we
// can skip the whole dispatch and update the tracked state instead. Also, it is not // can skip the whole dispatch and update the tracked state instead. Also, it is not
@ -139,9 +140,9 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
}); });
} }
for (const auto& image_desc : info.images) { for (const auto& image_desc : info->images) {
const auto tsharp = const auto tsharp =
info.ReadUd<AmdGpu::Image>(image_desc.sgpr_base, image_desc.dword_offset); info->ReadUd<AmdGpu::Image>(image_desc.sgpr_base, image_desc.dword_offset);
VideoCore::ImageInfo image_info{tsharp}; VideoCore::ImageInfo image_info{tsharp};
VideoCore::ImageViewInfo view_info{tsharp, image_desc.is_storage}; VideoCore::ImageViewInfo view_info{tsharp, image_desc.is_storage};
const auto& image_view = texture_cache.FindTexture(image_info, view_info); const auto& image_view = texture_cache.FindTexture(image_info, view_info);
@ -161,8 +162,8 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (texture)"); LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (texture)");
} }
} }
for (const auto& sampler : info.samplers) { for (const auto& sampler : info->samplers) {
const auto ssharp = sampler.GetSsharp(info); const auto ssharp = sampler.GetSsharp(*info);
const auto vk_sampler = texture_cache.GetSampler(ssharp); const auto vk_sampler = texture_cache.GetSampler(ssharp);
image_infos.emplace_back(vk_sampler, VK_NULL_HANDLE, vk::ImageLayout::eGeneral); image_infos.emplace_back(vk_sampler, VK_NULL_HANDLE, vk::ImageLayout::eGeneral);
set_writes.push_back({ set_writes.push_back({

View File

@ -3,6 +3,7 @@
#pragma once #pragma once
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/runtime_info.h" #include "shader_recompiler/runtime_info.h"
#include "video_core/renderer_vulkan/vk_common.h" #include "video_core/renderer_vulkan/vk_common.h"
@ -16,11 +17,18 @@ namespace Vulkan {
class Instance; class Instance;
class Scheduler; class Scheduler;
struct Program {
Shader::IR::Program pgm;
std::vector<u32> spv;
vk::ShaderModule module;
u32 end_binding;
};
class ComputePipeline { class ComputePipeline {
public: public:
explicit ComputePipeline(const Instance& instance, Scheduler& scheduler, explicit ComputePipeline(const Instance& instance, Scheduler& scheduler,
vk::PipelineCache pipeline_cache, const Shader::Info* info, vk::PipelineCache pipeline_cache, u64 compute_key,
u64 compute_key, vk::ShaderModule module); const Program* program);
~ComputePipeline(); ~ComputePipeline();
[[nodiscard]] vk::Pipeline Handle() const noexcept { [[nodiscard]] vk::Pipeline Handle() const noexcept {
@ -37,7 +45,7 @@ private:
vk::UniquePipelineLayout pipeline_layout; vk::UniquePipelineLayout pipeline_layout;
vk::UniqueDescriptorSetLayout desc_layout; vk::UniqueDescriptorSetLayout desc_layout;
u64 compute_key; u64 compute_key;
Shader::Info info{}; const Shader::Info* info;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -19,15 +19,14 @@ namespace Vulkan {
GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& scheduler_, GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& scheduler_,
const GraphicsPipelineKey& key_, const GraphicsPipelineKey& key_,
vk::PipelineCache pipeline_cache, vk::PipelineCache pipeline_cache,
std::span<const Shader::Info*, MaxShaderStages> infos, std::span<const Program*, MaxShaderStages> programs)
std::array<vk::ShaderModule, MaxShaderStages> modules)
: instance{instance_}, scheduler{scheduler_}, key{key_} { : instance{instance_}, scheduler{scheduler_}, key{key_} {
const vk::Device device = instance.GetDevice(); const vk::Device device = instance.GetDevice();
for (u32 i = 0; i < MaxShaderStages; i++) { for (u32 i = 0; i < MaxShaderStages; i++) {
if (!infos[i]) { if (!programs[i]) {
continue; continue;
} }
stages[i] = *infos[i]; stages[i] = &programs[i]->pgm.info;
} }
BuildDescSetLayout(); BuildDescSetLayout();
@ -49,14 +48,14 @@ GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& schedul
boost::container::static_vector<vk::VertexInputBindingDescription, 32> bindings; boost::container::static_vector<vk::VertexInputBindingDescription, 32> bindings;
boost::container::static_vector<vk::VertexInputAttributeDescription, 32> attributes; boost::container::static_vector<vk::VertexInputAttributeDescription, 32> attributes;
const auto& vs_info = stages[u32(Shader::Stage::Vertex)]; const auto& vs_info = stages[u32(Shader::Stage::Vertex)];
for (const auto& input : vs_info.vs_inputs) { for (const auto& input : vs_info->vs_inputs) {
if (input.instance_step_rate == Shader::Info::VsInput::InstanceIdType::OverStepRate0 || if (input.instance_step_rate == Shader::Info::VsInput::InstanceIdType::OverStepRate0 ||
input.instance_step_rate == Shader::Info::VsInput::InstanceIdType::OverStepRate1) { input.instance_step_rate == Shader::Info::VsInput::InstanceIdType::OverStepRate1) {
// Skip attribute binding as the data will be pulled by shader // Skip attribute binding as the data will be pulled by shader
continue; continue;
} }
const auto buffer = vs_info.ReadUd<AmdGpu::Buffer>(input.sgpr_base, input.dword_offset); const auto buffer = vs_info->ReadUd<AmdGpu::Buffer>(input.sgpr_base, input.dword_offset);
attributes.push_back({ attributes.push_back({
.location = input.binding, .location = input.binding,
.binding = input.binding, .binding = input.binding,
@ -184,21 +183,21 @@ GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& schedul
.maxDepthBounds = key.depth_bounds_max, .maxDepthBounds = key.depth_bounds_max,
}; };
u32 shader_count{};
auto stage = u32(Shader::Stage::Vertex); auto stage = u32(Shader::Stage::Vertex);
std::array<vk::PipelineShaderStageCreateInfo, MaxShaderStages> shader_stages; boost::container::static_vector<vk::PipelineShaderStageCreateInfo, MaxShaderStages>
shader_stages[shader_count++] = vk::PipelineShaderStageCreateInfo{ shader_stages;
shader_stages.emplace_back(vk::PipelineShaderStageCreateInfo{
.stage = vk::ShaderStageFlagBits::eVertex, .stage = vk::ShaderStageFlagBits::eVertex,
.module = modules[stage], .module = programs[stage]->module,
.pName = "main", .pName = "main",
}; });
stage = u32(Shader::Stage::Fragment); stage = u32(Shader::Stage::Fragment);
if (modules[stage]) { if (programs[stage]) {
shader_stages[shader_count++] = vk::PipelineShaderStageCreateInfo{ shader_stages.emplace_back(vk::PipelineShaderStageCreateInfo{
.stage = vk::ShaderStageFlagBits::eFragment, .stage = vk::ShaderStageFlagBits::eFragment,
.module = modules[stage], .module = programs[stage]->module,
.pName = "main", .pName = "main",
}; });
} }
const auto it = std::ranges::find(key.color_formats, vk::Format::eUndefined); const auto it = std::ranges::find(key.color_formats, vk::Format::eUndefined);
@ -271,7 +270,7 @@ GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& schedul
const vk::GraphicsPipelineCreateInfo pipeline_info = { const vk::GraphicsPipelineCreateInfo pipeline_info = {
.pNext = &pipeline_rendering_ci, .pNext = &pipeline_rendering_ci,
.stageCount = shader_count, .stageCount = static_cast<u32>(shader_stages.size()),
.pStages = shader_stages.data(), .pStages = shader_stages.data(),
.pVertexInputState = &vertex_input_info, .pVertexInputState = &vertex_input_info,
.pInputAssemblyState = &input_assembly, .pInputAssemblyState = &input_assembly,
@ -297,8 +296,11 @@ GraphicsPipeline::~GraphicsPipeline() = default;
void GraphicsPipeline::BuildDescSetLayout() { void GraphicsPipeline::BuildDescSetLayout() {
u32 binding{}; u32 binding{};
boost::container::small_vector<vk::DescriptorSetLayoutBinding, 32> bindings; boost::container::small_vector<vk::DescriptorSetLayoutBinding, 32> bindings;
for (const auto& stage : stages) { for (const auto* stage : stages) {
for (const auto& buffer : stage.buffers) { if (!stage) {
continue;
}
for (const auto& buffer : stage->buffers) {
bindings.push_back({ bindings.push_back({
.binding = binding++, .binding = binding++,
.descriptorType = buffer.is_storage ? vk::DescriptorType::eStorageBuffer .descriptorType = buffer.is_storage ? vk::DescriptorType::eStorageBuffer
@ -307,7 +309,7 @@ void GraphicsPipeline::BuildDescSetLayout() {
.stageFlags = vk::ShaderStageFlagBits::eVertex | vk::ShaderStageFlagBits::eFragment, .stageFlags = vk::ShaderStageFlagBits::eVertex | vk::ShaderStageFlagBits::eFragment,
}); });
} }
for (const auto& image : stage.images) { for (const auto& image : stage->images) {
bindings.push_back({ bindings.push_back({
.binding = binding++, .binding = binding++,
.descriptorType = image.is_storage ? vk::DescriptorType::eStorageImage .descriptorType = image.is_storage ? vk::DescriptorType::eStorageImage
@ -316,7 +318,7 @@ void GraphicsPipeline::BuildDescSetLayout() {
.stageFlags = vk::ShaderStageFlagBits::eVertex | vk::ShaderStageFlagBits::eFragment, .stageFlags = vk::ShaderStageFlagBits::eVertex | vk::ShaderStageFlagBits::eFragment,
}); });
} }
for (const auto& sampler : stage.samplers) { for (const auto& sampler : stage->samplers) {
bindings.push_back({ bindings.push_back({
.binding = binding++, .binding = binding++,
.descriptorType = vk::DescriptorType::eSampler, .descriptorType = vk::DescriptorType::eSampler,
@ -343,13 +345,16 @@ void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
Shader::PushData push_data{}; Shader::PushData push_data{};
u32 binding{}; u32 binding{};
for (const auto& stage : stages) { for (const auto* stage : stages) {
if (stage.uses_step_rates) { if (!stage) {
continue;
}
if (stage->uses_step_rates) {
push_data.step0 = regs.vgt_instance_step_rate_0; push_data.step0 = regs.vgt_instance_step_rate_0;
push_data.step1 = regs.vgt_instance_step_rate_1; push_data.step1 = regs.vgt_instance_step_rate_1;
} }
for (const auto& buffer : stage.buffers) { for (const auto& buffer : stage->buffers) {
const auto vsharp = buffer.GetVsharp(stage); const auto vsharp = buffer.GetVsharp(*stage);
if (vsharp) { if (vsharp) {
const VAddr address = vsharp.base_address; const VAddr address = vsharp.base_address;
if (texture_cache.IsMeta(address)) { if (texture_cache.IsMeta(address)) {
@ -382,9 +387,9 @@ void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
} }
boost::container::static_vector<AmdGpu::Image, 16> tsharps; boost::container::static_vector<AmdGpu::Image, 16> tsharps;
for (const auto& image_desc : stage.images) { for (const auto& image_desc : stage->images) {
const auto& tsharp = tsharps.emplace_back( const auto& tsharp = tsharps.emplace_back(
stage.ReadUd<AmdGpu::Image>(image_desc.sgpr_base, image_desc.dword_offset)); stage->ReadUd<AmdGpu::Image>(image_desc.sgpr_base, image_desc.dword_offset));
VideoCore::ImageInfo image_info{tsharp}; VideoCore::ImageInfo image_info{tsharp};
VideoCore::ImageViewInfo view_info{tsharp, image_desc.is_storage}; VideoCore::ImageViewInfo view_info{tsharp, image_desc.is_storage};
const auto& image_view = texture_cache.FindTexture(image_info, view_info); const auto& image_view = texture_cache.FindTexture(image_info, view_info);
@ -404,8 +409,8 @@ void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a PS shader (texture)"); LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a PS shader (texture)");
} }
} }
for (const auto& sampler : stage.samplers) { for (const auto& sampler : stage->samplers) {
auto ssharp = sampler.GetSsharp(stage); auto ssharp = sampler.GetSsharp(*stage);
if (sampler.disable_aniso) { if (sampler.disable_aniso) {
const auto& tsharp = tsharps[sampler.associated_image]; const auto& tsharp = tsharps[sampler.associated_image];
if (tsharp.base_level == 0 && tsharp.last_level == 0) { if (tsharp.base_level == 0 && tsharp.last_level == 0) {

View File

@ -3,9 +3,9 @@
#include <xxhash.h> #include <xxhash.h>
#include "common/types.h" #include "common/types.h"
#include "shader_recompiler/runtime_info.h"
#include "video_core/renderer_vulkan/liverpool_to_vk.h" #include "video_core/renderer_vulkan/liverpool_to_vk.h"
#include "video_core/renderer_vulkan/vk_common.h" #include "video_core/renderer_vulkan/vk_common.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
namespace VideoCore { namespace VideoCore {
class BufferCache; class BufferCache;
@ -58,8 +58,7 @@ class GraphicsPipeline {
public: public:
explicit GraphicsPipeline(const Instance& instance, Scheduler& scheduler, explicit GraphicsPipeline(const Instance& instance, Scheduler& scheduler,
const GraphicsPipelineKey& key, vk::PipelineCache pipeline_cache, const GraphicsPipelineKey& key, vk::PipelineCache pipeline_cache,
std::span<const Shader::Info*, MaxShaderStages> infos, std::span<const Program*, MaxShaderStages> programs);
std::array<vk::ShaderModule, MaxShaderStages> modules);
~GraphicsPipeline(); ~GraphicsPipeline();
void BindResources(const Liverpool::Regs& regs, VideoCore::BufferCache& buffer_cache, void BindResources(const Liverpool::Regs& regs, VideoCore::BufferCache& buffer_cache,
@ -74,7 +73,7 @@ public:
} }
const Shader::Info& GetStage(Shader::Stage stage) const noexcept { const Shader::Info& GetStage(Shader::Stage stage) const noexcept {
return stages[u32(stage)]; return *stages[u32(stage)];
} }
bool IsEmbeddedVs() const noexcept { bool IsEmbeddedVs() const noexcept {
@ -99,7 +98,7 @@ private:
vk::UniquePipeline pipeline; vk::UniquePipeline pipeline;
vk::UniquePipelineLayout pipeline_layout; vk::UniquePipelineLayout pipeline_layout;
vk::UniqueDescriptorSetLayout desc_layout; vk::UniqueDescriptorSetLayout desc_layout;
std::array<Shader::Info, MaxShaderStages> stages{}; std::array<const Shader::Info*, MaxShaderStages> stages{};
GraphicsPipelineKey key; GraphicsPipelineKey key;
}; };

View File

@ -20,6 +20,10 @@ namespace Vulkan {
using Shader::VsOutput; using Shader::VsOutput;
[[nodiscard]] inline u64 HashCombine(const u64 seed, const u64 hash) {
return seed ^ (hash + 0x9e3779b9 + (seed << 6) + (seed >> 2));
}
void BuildVsOutputs(Shader::Info& info, const AmdGpu::Liverpool::VsOutputControl& ctl) { void BuildVsOutputs(Shader::Info& info, const AmdGpu::Liverpool::VsOutputControl& ctl) {
const auto add_output = [&](VsOutput x, VsOutput y, VsOutput z, VsOutput w) { const auto add_output = [&](VsOutput x, VsOutput y, VsOutput z, VsOutput w) {
if (x != VsOutput::None || y != VsOutput::None || z != VsOutput::None || if (x != VsOutput::None || y != VsOutput::None || z != VsOutput::None ||
@ -246,23 +250,14 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
} }
u32 binding{}; u32 binding{};
std::array<Shader::IR::Program, MaxShaderStages> programs;
std::array<const Shader::Info*, MaxShaderStages> infos{};
for (u32 i = 0; i < MaxShaderStages; i++) { for (u32 i = 0; i < MaxShaderStages; i++) {
if (!graphics_key.stage_hashes[i]) { if (!graphics_key.stage_hashes[i]) {
stages[i] = VK_NULL_HANDLE; programs[i] = nullptr;
continue; continue;
} }
auto* pgm = regs.ProgramForStage(i); auto* pgm = regs.ProgramForStage(i);
const auto code = pgm->Code(); const auto code = pgm->Code();
const auto it = module_map.find(graphics_key.stage_hashes[i]);
if (it != module_map.end()) {
stages[i] = *it->second;
continue;
}
// Dump shader code if requested. // Dump shader code if requested.
const auto stage = Shader::Stage{i}; const auto stage = Shader::Stage{i};
const u64 hash = graphics_key.stage_hashes[i]; const u64 hash = graphics_key.stage_hashes[i];
@ -273,39 +268,56 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
block_pool.ReleaseContents(); block_pool.ReleaseContents();
inst_pool.ReleaseContents(); inst_pool.ReleaseContents();
if (stage != Shader::Stage::Compute && stage != Shader::Stage::Fragment && if (stage != Shader::Stage::Fragment && stage != Shader::Stage::Vertex) {
stage != Shader::Stage::Vertex) {
LOG_ERROR(Render_Vulkan, "Unsupported shader stage {}. PL creation skipped.", stage); LOG_ERROR(Render_Vulkan, "Unsupported shader stage {}. PL creation skipped.", stage);
return {}; return {};
} }
const u64 lookup_hash = HashCombine(hash, binding);
auto it = program_cache.find(lookup_hash);
if (it != program_cache.end()) {
const Program* program = it.value().get();
ASSERT(program->pgm.info.stage == stage);
programs[i] = program;
binding = program->end_binding;
continue;
}
// Recompile shader to IR. // Recompile shader to IR.
try { try {
auto program = std::make_unique<Program>();
block_pool.ReleaseContents();
inst_pool.ReleaseContents();
LOG_INFO(Render_Vulkan, "Compiling {} shader {:#x}", stage, hash); LOG_INFO(Render_Vulkan, "Compiling {} shader {:#x}", stage, hash);
Shader::Info info = MakeShaderInfo(stage, pgm->user_data, regs); Shader::Info info = MakeShaderInfo(stage, pgm->user_data, regs);
info.pgm_base = pgm->Address<uintptr_t>(); info.pgm_base = pgm->Address<uintptr_t>();
info.pgm_hash = hash; info.pgm_hash = hash;
programs[i] = program->pgm =
Shader::TranslateProgram(inst_pool, block_pool, code, std::move(info), profile); Shader::TranslateProgram(inst_pool, block_pool, code, std::move(info), profile);
// Compile IR to SPIR-V // Compile IR to SPIR-V
auto spv_code = Shader::Backend::SPIRV::EmitSPIRV(profile, programs[i], binding); program->spv = Shader::Backend::SPIRV::EmitSPIRV(profile, program->pgm, binding);
if (Config::dumpShaders()) { if (Config::dumpShaders()) {
DumpShader(spv_code, hash, stage, "spv"); DumpShader(program->spv, hash, stage, "spv");
} }
stages[i] = CompileSPV(spv_code, instance.GetDevice());
infos[i] = &programs[i].info; // Compile module and set name to hash in renderdoc
program->end_binding = binding;
program->module = CompileSPV(program->spv, instance.GetDevice());
const auto name = fmt::format("{}_{:#x}", stage, hash);
Vulkan::SetObjectName(instance.GetDevice(), program->module, name);
// Cache program
const auto [it, _] = program_cache.emplace(lookup_hash, std::move(program));
programs[i] = it.value().get();
} catch (const Shader::Exception& e) { } catch (const Shader::Exception& e) {
UNREACHABLE_MSG("{}", e.what()); UNREACHABLE_MSG("{}", e.what());
} }
// Set module name to hash in renderdoc
const auto name = fmt::format("{}_{:#x}", stage, hash);
Vulkan::SetObjectName(instance.GetDevice(), stages[i], name);
} }
return std::make_unique<GraphicsPipeline>(instance, scheduler, graphics_key, *pipeline_cache, return std::make_unique<GraphicsPipeline>(instance, scheduler, graphics_key, *pipeline_cache,
infos, stages); programs);
} }
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline() { std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline() {
@ -322,26 +334,31 @@ std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline() {
// Recompile shader to IR. // Recompile shader to IR.
try { try {
auto program = std::make_unique<Program>();
LOG_INFO(Render_Vulkan, "Compiling cs shader {:#x}", compute_key); LOG_INFO(Render_Vulkan, "Compiling cs shader {:#x}", compute_key);
Shader::Info info = Shader::Info info =
MakeShaderInfo(Shader::Stage::Compute, cs_pgm.user_data, liverpool->regs); MakeShaderInfo(Shader::Stage::Compute, cs_pgm.user_data, liverpool->regs);
info.pgm_base = cs_pgm.Address<uintptr_t>(); info.pgm_base = cs_pgm.Address<uintptr_t>();
info.pgm_hash = compute_key; info.pgm_hash = compute_key;
auto program = program->pgm =
Shader::TranslateProgram(inst_pool, block_pool, code, std::move(info), profile); Shader::TranslateProgram(inst_pool, block_pool, code, std::move(info), profile);
// Compile IR to SPIR-V // Compile IR to SPIR-V
u32 binding{}; u32 binding{};
const auto spv_code = Shader::Backend::SPIRV::EmitSPIRV(profile, program, binding); program->spv = Shader::Backend::SPIRV::EmitSPIRV(profile, program->pgm, binding);
if (Config::dumpShaders()) { if (Config::dumpShaders()) {
DumpShader(spv_code, compute_key, Shader::Stage::Compute, "spv"); DumpShader(program->spv, compute_key, Shader::Stage::Compute, "spv");
} }
const auto module = CompileSPV(spv_code, instance.GetDevice());
// Set module name to hash in renderdoc // Compile module and set name to hash in renderdoc
program->module = CompileSPV(program->spv, instance.GetDevice());
const auto name = fmt::format("cs_{:#x}", compute_key); const auto name = fmt::format("cs_{:#x}", compute_key);
Vulkan::SetObjectName(instance.GetDevice(), module, name); Vulkan::SetObjectName(instance.GetDevice(), program->module, name);
return std::make_unique<ComputePipeline>(instance, scheduler, *pipeline_cache,
&program.info, compute_key, module); // Cache program
const auto [it, _] = program_cache.emplace(compute_key, std::move(program));
return std::make_unique<ComputePipeline>(instance, scheduler, *pipeline_cache, compute_key,
it.value().get());
} catch (const Shader::Exception& e) { } catch (const Shader::Exception& e) {
UNREACHABLE_MSG("{}", e.what()); UNREACHABLE_MSG("{}", e.what());
return nullptr; return nullptr;

View File

@ -5,6 +5,7 @@
#include <tsl/robin_map.h> #include <tsl/robin_map.h>
#include "shader_recompiler/ir/basic_block.h" #include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/profile.h" #include "shader_recompiler/profile.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h" #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h" #include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
@ -43,10 +44,10 @@ private:
AmdGpu::Liverpool* liverpool; AmdGpu::Liverpool* liverpool;
vk::UniquePipelineCache pipeline_cache; vk::UniquePipelineCache pipeline_cache;
vk::UniquePipelineLayout pipeline_layout; vk::UniquePipelineLayout pipeline_layout;
tsl::robin_map<size_t, vk::UniqueShaderModule> module_map; tsl::robin_map<size_t, std::unique_ptr<Program>> program_cache;
std::array<vk::ShaderModule, MaxShaderStages> stages{};
tsl::robin_map<size_t, std::unique_ptr<ComputePipeline>> compute_pipelines; tsl::robin_map<size_t, std::unique_ptr<ComputePipeline>> compute_pipelines;
tsl::robin_map<GraphicsPipelineKey, std::unique_ptr<GraphicsPipeline>> graphics_pipelines; tsl::robin_map<GraphicsPipelineKey, std::unique_ptr<GraphicsPipeline>> graphics_pipelines;
std::array<const Program*, MaxShaderStages> programs{};
Shader::Profile profile{}; Shader::Profile profile{};
GraphicsPipelineKey graphics_key{}; GraphicsPipelineKey graphics_key{};
u64 compute_key{}; u64 compute_key{};

View File

@ -153,7 +153,7 @@ void Rasterizer::BeginRendering() {
}; };
texture_cache.TouchMeta(htile_address, false); texture_cache.TouchMeta(htile_address, false);
state.has_depth = true; state.has_depth = true;
state.has_stencil = image.info.usage.stencil; state.has_stencil = regs.depth_control.stencil_enable;
} }
scheduler.BeginRendering(state); scheduler.BeginRendering(state);
} }

View File

@ -249,11 +249,11 @@ struct DetilerParams {
u32 sizes[14]; u32 sizes[14];
}; };
static constexpr size_t StreamBufferSize = 128_MB; static constexpr size_t StreamBufferSize = 1_GB;
TileManager::TileManager(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler) TileManager::TileManager(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler)
: instance{instance}, scheduler{scheduler}, : instance{instance}, scheduler{scheduler},
stream_buffer{instance, scheduler, MemoryUsage::Stream, StreamBufferSize} { stream_buffer{instance, scheduler, MemoryUsage::Upload, StreamBufferSize} {
static const std::array detiler_shaders{ static const std::array detiler_shaders{
HostShaders::DETILE_M8X1_COMP, HostShaders::DETILE_M8X2_COMP, HostShaders::DETILE_M8X1_COMP, HostShaders::DETILE_M8X2_COMP,
HostShaders::DETILE_M32X1_COMP, HostShaders::DETILE_M32X2_COMP, HostShaders::DETILE_M32X1_COMP, HostShaders::DETILE_M32X2_COMP,