video: Import new shader recompiler + display a triangle (#142)

This commit is contained in:
TheTurtle 2024-05-22 01:35:12 +03:00 committed by GitHub
parent 8cf64a33b2
commit 8730968385
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
103 changed files with 17793 additions and 729 deletions

3
.gitmodules vendored
View File

@ -56,3 +56,6 @@
[submodule "externals/zydis"]
path = externals/zydis
url = https://github.com/zyantific/zydis.git
[submodule "externals/sirit"]
path = externals/sirit
url = https://github.com/raphaelthegreat/sirit

View File

@ -218,6 +218,8 @@ set(COMMON src/common/logging/backend.cpp
src/common/io_file.h
src/common/error.cpp
src/common/error.h
src/common/scope_exit.h
src/common/func_traits.h
src/common/native_clock.cpp
src/common/native_clock.h
src/common/path_util.cpp
@ -286,6 +288,75 @@ set(CORE src/core/aerolib/stubs.cpp
src/core/virtual_memory.h
)
set(SHADER_RECOMPILER src/shader_recompiler/exception.h
src/shader_recompiler/object_pool.h
src/shader_recompiler/profile.h
src/shader_recompiler/recompiler.cpp
src/shader_recompiler/recompiler.h
src/shader_recompiler/runtime_info.h
src/shader_recompiler/backend/spirv/emit_spirv.cpp
src/shader_recompiler/backend/spirv/emit_spirv.h
src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp
src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp
src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp
src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp
src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp
src/shader_recompiler/backend/spirv/emit_spirv_select.cpp
src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp
src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
src/shader_recompiler/backend/spirv/spirv_emit_context.h
src/shader_recompiler/frontend/translate/data_share.cpp
src/shader_recompiler/frontend/translate/export.cpp
src/shader_recompiler/frontend/translate/flat_memory.cpp
src/shader_recompiler/frontend/translate/scalar_alu.cpp
src/shader_recompiler/frontend/translate/scalar_memory.cpp
src/shader_recompiler/frontend/translate/translate.cpp
src/shader_recompiler/frontend/translate/translate.h
src/shader_recompiler/frontend/translate/vector_alu.cpp
src/shader_recompiler/frontend/translate/vector_interpolation.cpp
src/shader_recompiler/frontend/translate/vector_memory.cpp
src/shader_recompiler/frontend/control_flow_graph.cpp
src/shader_recompiler/frontend/control_flow_graph.h
src/shader_recompiler/frontend/decode.cpp
src/shader_recompiler/frontend/decode.h
src/shader_recompiler/frontend/format.cpp
src/shader_recompiler/frontend/instruction.cpp
src/shader_recompiler/frontend/instruction.h
src/shader_recompiler/frontend/opcodes.h
src/shader_recompiler/frontend/structured_control_flow.cpp
src/shader_recompiler/frontend/structured_control_flow.h
src/shader_recompiler/ir/passes/ssa_rewrite_pass.cpp
src/shader_recompiler/ir/passes/resource_tracking_pass.cpp
src/shader_recompiler/ir/passes/constant_propogation_pass.cpp
src/shader_recompiler/ir/passes/passes.h
src/shader_recompiler/ir/abstract_syntax_list.h
src/shader_recompiler/ir/attribute.cpp
src/shader_recompiler/ir/attribute.h
src/shader_recompiler/ir/basic_block.cpp
src/shader_recompiler/ir/basic_block.h
src/shader_recompiler/ir/condition.h
src/shader_recompiler/ir/ir_emitter.cpp
src/shader_recompiler/ir/ir_emitter.h
src/shader_recompiler/ir/microinstruction.cpp
src/shader_recompiler/ir/opcodes.cpp
src/shader_recompiler/ir/opcodes.h
src/shader_recompiler/ir/opcodes.inc
src/shader_recompiler/ir/post_order.cpp
src/shader_recompiler/ir/post_order.h
src/shader_recompiler/ir/program.cpp
src/shader_recompiler/ir/program.h
src/shader_recompiler/ir/reg.h
src/shader_recompiler/ir/type.cpp
src/shader_recompiler/ir/type.h
src/shader_recompiler/ir/value.cpp
src/shader_recompiler/ir/value.h
)
set(VIDEO_CORE src/video_core/amdgpu/liverpool.cpp
src/video_core/amdgpu/liverpool.h
src/video_core/amdgpu/pixel_format.cpp
@ -293,18 +364,26 @@ set(VIDEO_CORE src/video_core/amdgpu/liverpool.cpp
src/video_core/amdgpu/pm4_cmds.h
src/video_core/amdgpu/pm4_opcodes.h
src/video_core/amdgpu/resource.h
src/video_core/renderer_vulkan/liverpool_to_vk.cpp
src/video_core/renderer_vulkan/liverpool_to_vk.h
src/video_core/renderer_vulkan/renderer_vulkan.cpp
src/video_core/renderer_vulkan/renderer_vulkan.h
src/video_core/renderer_vulkan/vk_common.cpp
src/video_core/renderer_vulkan/vk_common.h
src/video_core/renderer_vulkan/vk_descriptor_update_queue.cpp
src/video_core/renderer_vulkan/vk_descriptor_update_queue.h
src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
src/video_core/renderer_vulkan/vk_graphics_pipeline.h
src/video_core/renderer_vulkan/vk_instance.cpp
src/video_core/renderer_vulkan/vk_instance.h
src/video_core/renderer_vulkan/vk_master_semaphore.cpp
src/video_core/renderer_vulkan/vk_master_semaphore.h
src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
src/video_core/renderer_vulkan/vk_pipeline_cache.h
src/video_core/renderer_vulkan/vk_platform.cpp
src/video_core/renderer_vulkan/vk_platform.h
src/video_core/renderer_vulkan/vk_rasterizer.cpp
src/video_core/renderer_vulkan/vk_rasterizer.h
src/video_core/renderer_vulkan/vk_resource_pool.cpp
src/video_core/renderer_vulkan/vk_resource_pool.h
src/video_core/renderer_vulkan/vk_scheduler.cpp
@ -371,6 +450,7 @@ if (ENABLE_QT_GUI)
${QT_GUI}
${COMMON}
${CORE}
${SHADER_RECOMPILER}
${VIDEO_CORE}
src/sdl_window.h
src/sdl_window.cpp
@ -381,18 +461,18 @@ else()
${INPUT}
${COMMON}
${CORE}
${SHADER_RECOMPILER}
${VIDEO_CORE}
src/main.cpp
src/sdl_window.h
src/sdl_window.cpp
src/common/scope_exit.h
)
endif()
create_target_directory_groups(shadps4)
target_link_libraries(shadps4 PRIVATE magic_enum::magic_enum fmt::fmt toml11::toml11 tsl::robin_map xbyak)
target_link_libraries(shadps4 PRIVATE discord-rpc boost vma vulkan-headers xxhash Zydis SPIRV glslang SDL3-shared)
target_link_libraries(shadps4 PRIVATE discord-rpc boost vma sirit vulkan-headers xxhash Zydis SPIRV glslang SDL3-shared)
if (NOT ENABLE_QT_GUI)
target_link_libraries(shadps4 PRIVATE SDL3-shared)

View File

@ -87,3 +87,6 @@ if (WIN32)
add_subdirectory(winpthreads EXCLUDE_FROM_ALL)
target_include_directories(winpthreads INTERFACE winpthreads/include)
endif()
# sirit
add_subdirectory(sirit EXCLUDE_FROM_ALL)

2
externals/boost vendored

@ -1 +1 @@
Subproject commit 2dd57a940b6d1b733cbd1abbc3f842da476d3d48
Subproject commit 87b7817119982e8ad6068855fae31b11590514be

1
externals/sirit vendored Submodule

@ -0,0 +1 @@
Subproject commit 9c12a07e62dfa404727e7fc85dd83bba84cc830d

View File

@ -14,6 +14,7 @@ void assert_fail_impl() {
[[noreturn]] void unreachable_impl() {
Common::Log::Stop();
std::fflush(stdout);
Crash();
throw std::runtime_error("Unreachable code");
}

View File

@ -4,6 +4,7 @@
#pragma once
#include <type_traits>
#include "common/types.h"
#define DECLARE_ENUM_FLAG_OPERATORS(type) \
[[nodiscard]] constexpr type operator|(type a, type b) noexcept { \
@ -58,3 +59,103 @@
using T = std::underlying_type_t<type>; \
return static_cast<T>(key) == 0; \
}
namespace Common {
template <typename T>
class Flags {
public:
using IntType = std::underlying_type_t<T>;
Flags() {}
Flags(IntType t) : m_bits(t) {}
template <typename... Tx>
Flags(T f, Tx... fx) {
this->set(f, fx...);
}
template <typename... Tx>
void set(Tx... fx) {
m_bits |= bits(fx...);
}
void set(Flags flags) {
m_bits |= flags.m_bits;
}
template <typename... Tx>
void clr(Tx... fx) {
m_bits &= ~bits(fx...);
}
void clr(Flags flags) {
m_bits &= ~flags.m_bits;
}
template <typename... Tx>
bool any(Tx... fx) const {
return (m_bits & bits(fx...)) != 0;
}
template <typename... Tx>
bool all(Tx... fx) const {
const IntType mask = bits(fx...);
return (m_bits & mask) == mask;
}
bool test(T f) const {
return this->any(f);
}
bool isClear() const {
return m_bits == 0;
}
void clrAll() {
m_bits = 0;
}
u32 raw() const {
return m_bits;
}
Flags operator&(const Flags& other) const {
return Flags(m_bits & other.m_bits);
}
Flags operator|(const Flags& other) const {
return Flags(m_bits | other.m_bits);
}
Flags operator^(const Flags& other) const {
return Flags(m_bits ^ other.m_bits);
}
bool operator==(const Flags& other) const {
return m_bits == other.m_bits;
}
bool operator!=(const Flags& other) const {
return m_bits != other.m_bits;
}
private:
IntType m_bits = 0;
static IntType bit(T f) {
return IntType(1) << static_cast<IntType>(f);
}
template <typename... Tx>
static IntType bits(T f, Tx... fx) {
return bit(f) | bits(fx...);
}
static IntType bits() {
return 0;
}
};
} // namespace Common

34
src/common/func_traits.h Normal file
View File

@ -0,0 +1,34 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <tuple>
namespace Common {
template <class Func>
struct FuncTraits {};
template <class ReturnType_, class... Args>
struct FuncTraits<ReturnType_ (*)(Args...)> {
using ReturnType = ReturnType_;
static constexpr size_t NUM_ARGS = sizeof...(Args);
template <size_t I>
using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
};
template <typename Func>
struct LambdaTraits : LambdaTraits<decltype(&std::remove_reference_t<Func>::operator())> {};
template <typename ReturnType, typename LambdaType, typename... Args>
struct LambdaTraits<ReturnType (LambdaType::*)(Args...) const> {
template <size_t I>
using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
static constexpr size_t NUM_ARGS{sizeof...(Args)};
};
} // namespace Common

View File

@ -12,6 +12,9 @@
#include "video_core/amdgpu/pm4_cmds.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h"
extern Frontend::WindowSDL* g_window;
std::unique_ptr<Vulkan::RendererVulkan> renderer;
namespace Libraries::GnmDriver {
using namespace AmdGpu;
@ -1912,6 +1915,7 @@ int PS4_SYSV_ABI Func_F916890425496553() {
void RegisterlibSceGnmDriver(Core::Loader::SymbolsResolver* sym) {
liverpool = std::make_unique<AmdGpu::Liverpool>();
renderer = std::make_unique<Vulkan::RendererVulkan>(*g_window, liverpool.get());
LIB_FUNCTION("b0xyllnVY-I", "libSceGnmDriver", 1, "libSceGnmDriver", 1, 1, sceGnmAddEqEvent);
LIB_FUNCTION("b08AgtPlHPg", "libSceGnmDriver", 1, "libSceGnmDriver", 1, 1,

View File

@ -10,7 +10,7 @@
#include "video_core/renderer_vulkan/renderer_vulkan.h"
extern Frontend::WindowSDL* g_window;
extern std::unique_ptr<Vulkan::RendererVulkan> renderer;
namespace Libraries::VideoOut {
@ -41,8 +41,6 @@ VideoOutDriver::VideoOutDriver(u32 width, u32 height) {
main_port.resolution.fullHeight = height;
main_port.resolution.paneWidth = width;
main_port.resolution.paneHeight = height;
renderer = std::make_unique<Vulkan::RendererVulkan>(*g_window);
}
VideoOutDriver::~VideoOutDriver() = default;

View File

@ -10,8 +10,7 @@
namespace Vulkan {
struct Frame;
class RendererVulkan;
} // namespace Vulkan
}
namespace Libraries::VideoOut {
@ -84,7 +83,6 @@ private:
std::condition_variable_any submit_cond;
std::condition_variable done_cond;
std::queue<Request> requests;
std::unique_ptr<Vulkan::RendererVulkan> renderer;
bool is_neo{};
};

View File

@ -7,11 +7,9 @@
#include "common/logging/log.h"
#include "common/singleton.h"
#include "common/types.h"
#include "magic_enum.hpp"
#include <functional>
#include <mutex>
#include <optional>
#include <unordered_map>
#include <queue>
@ -81,7 +79,7 @@ private:
std::queue<IrqHandler> one_time_subscribers{};
std::mutex m_lock{};
};
std::array<IrqContext, magic_enum::enum_count<InterruptId>()> irq_contexts{};
std::array<IrqContext, 8> irq_contexts{};
};
using IrqC = Common::Singleton<IrqController>;

View File

@ -0,0 +1,18 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/types.h"
namespace Shader::Backend {
struct Bindings {
u32 unified{};
u32 uniform_buffer{};
u32 storage_buffer{};
u32 texture{};
u32 image{};
};
} // namespace Shader::Backend

View File

@ -0,0 +1,285 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <span>
#include <type_traits>
#include <utility>
#include <vector>
#include "common/func_traits.h"
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/program.h"
namespace Shader::Backend::SPIRV {
namespace {
template <auto func, typename... Args>
void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
inst->SetDefinition<Id>(func(ctx, std::forward<Args>(args)...));
}
template <typename ArgType>
ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
if constexpr (std::is_same_v<ArgType, Id>) {
return ctx.Def(arg);
} else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
return arg;
} else if constexpr (std::is_same_v<ArgType, u32>) {
return arg.U32();
} else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
return arg.Attribute();
} else if constexpr (std::is_same_v<ArgType, IR::ScalarReg>) {
return arg.ScalarReg();
} else if constexpr (std::is_same_v<ArgType, IR::VectorReg>) {
return arg.VectorReg();
}
}
template <auto func, bool is_first_arg_inst, size_t... I>
void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
using Traits = Common::FuncTraits<decltype(func)>;
if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
if constexpr (is_first_arg_inst) {
SetDefinition<func>(
ctx, inst, inst,
Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
} else {
SetDefinition<func>(
ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
}
} else {
if constexpr (is_first_arg_inst) {
func(ctx, inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
} else {
func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
}
}
}
template <auto func>
void Invoke(EmitContext& ctx, IR::Inst* inst) {
using Traits = Common::FuncTraits<decltype(func)>;
static_assert(Traits::NUM_ARGS >= 1, "Insufficient arguments");
if constexpr (Traits::NUM_ARGS == 1) {
Invoke<func, false>(ctx, inst, std::make_index_sequence<0>{});
} else {
using FirstArgType = typename Traits::template ArgType<1>;
static constexpr bool is_first_arg_inst = std::is_same_v<FirstArgType, IR::Inst*>;
using Indices = std::make_index_sequence<Traits::NUM_ARGS - (is_first_arg_inst ? 2 : 1)>;
Invoke<func, is_first_arg_inst>(ctx, inst, Indices{});
}
}
void EmitInst(EmitContext& ctx, IR::Inst* inst) {
switch (inst->GetOpcode()) {
#define OPCODE(name, result_type, ...) \
case IR::Opcode::name: \
return Invoke<&Emit##name>(ctx, inst);
#include "shader_recompiler/ir/opcodes.inc"
#undef OPCODE
}
throw LogicError("Invalid opcode {}", inst->GetOpcode());
}
Id TypeId(const EmitContext& ctx, IR::Type type) {
switch (type) {
case IR::Type::U1:
return ctx.U1[1];
case IR::Type::U32:
return ctx.U32[1];
default:
throw NotImplementedException("Phi node type {}", type);
}
}
void Traverse(EmitContext& ctx, IR::Program& program) {
IR::Block* current_block{};
for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
switch (node.type) {
case IR::AbstractSyntaxNode::Type::Block: {
const Id label{node.data.block->Definition<Id>()};
if (current_block) {
ctx.OpBranch(label);
}
current_block = node.data.block;
ctx.AddLabel(label);
for (IR::Inst& inst : node.data.block->Instructions()) {
EmitInst(ctx, &inst);
}
break;
}
case IR::AbstractSyntaxNode::Type::If: {
const Id if_label{node.data.if_node.body->Definition<Id>()};
const Id endif_label{node.data.if_node.merge->Definition<Id>()};
ctx.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone);
ctx.OpBranchConditional(ctx.Def(node.data.if_node.cond), if_label, endif_label);
break;
}
case IR::AbstractSyntaxNode::Type::Loop: {
const Id body_label{node.data.loop.body->Definition<Id>()};
const Id continue_label{node.data.loop.continue_block->Definition<Id>()};
const Id endloop_label{node.data.loop.merge->Definition<Id>()};
ctx.OpLoopMerge(endloop_label, continue_label, spv::LoopControlMask::MaskNone);
ctx.OpBranch(body_label);
break;
}
case IR::AbstractSyntaxNode::Type::Break: {
const Id break_label{node.data.break_node.merge->Definition<Id>()};
const Id skip_label{node.data.break_node.skip->Definition<Id>()};
ctx.OpBranchConditional(ctx.Def(node.data.break_node.cond), break_label, skip_label);
break;
}
case IR::AbstractSyntaxNode::Type::EndIf:
if (current_block) {
ctx.OpBranch(node.data.end_if.merge->Definition<Id>());
}
break;
case IR::AbstractSyntaxNode::Type::Repeat: {
Id cond{ctx.Def(node.data.repeat.cond)};
const Id loop_header_label{node.data.repeat.loop_header->Definition<Id>()};
const Id merge_label{node.data.repeat.merge->Definition<Id>()};
ctx.OpBranchConditional(cond, loop_header_label, merge_label);
break;
}
case IR::AbstractSyntaxNode::Type::Return:
ctx.OpReturn();
break;
case IR::AbstractSyntaxNode::Type::Unreachable:
ctx.OpUnreachable();
break;
}
if (node.type != IR::AbstractSyntaxNode::Type::Block) {
current_block = nullptr;
}
}
}
Id DefineMain(EmitContext& ctx, IR::Program& program) {
const Id void_function{ctx.TypeFunction(ctx.void_id)};
const Id main{ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function)};
for (IR::Block* const block : program.blocks) {
block->SetDefinition(ctx.OpLabel());
}
Traverse(ctx, program);
ctx.OpFunctionEnd();
return main;
}
void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
const std::span interfaces(ctx.interfaces.data(), ctx.interfaces.size());
spv::ExecutionModel execution_model{};
switch (program.stage) {
case Stage::Compute: {
// const std::array<u32, 3> workgroup_size{program.workgroup_size};
// execution_model = spv::ExecutionModel::GLCompute;
// ctx.AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0],
// workgroup_size[1], workgroup_size[2]);
break;
}
case Stage::Vertex:
execution_model = spv::ExecutionModel::Vertex;
break;
case Stage::Fragment:
execution_model = spv::ExecutionModel::Fragment;
if (ctx.profile.lower_left_origin_mode) {
ctx.AddExecutionMode(main, spv::ExecutionMode::OriginLowerLeft);
} else {
ctx.AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft);
}
// if (program.info.stores_frag_depth) {
// ctx.AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
// }
break;
default:
throw NotImplementedException("Stage {}", u32(program.stage));
}
ctx.AddEntryPoint(execution_model, main, "main", interfaces);
}
void PatchPhiNodes(IR::Program& program, EmitContext& ctx) {
auto inst{program.blocks.front()->begin()};
size_t block_index{0};
ctx.PatchDeferredPhi([&](size_t phi_arg) {
if (phi_arg == 0) {
++inst;
if (inst == program.blocks[block_index]->end() ||
inst->GetOpcode() != IR::Opcode::Phi) {
do {
++block_index;
inst = program.blocks[block_index]->begin();
} while (inst->GetOpcode() != IR::Opcode::Phi);
}
}
return ctx.Def(inst->Arg(phi_arg));
});
}
} // Anonymous namespace
std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program, Bindings& bindings) {
EmitContext ctx{profile, program, bindings};
const Id main{DefineMain(ctx, program)};
DefineEntryPoint(program, ctx, main);
if (program.stage == Stage::Vertex) {
ctx.AddExtension("SPV_KHR_shader_draw_parameters");
ctx.AddCapability(spv::Capability::DrawParameters);
}
PatchPhiNodes(program, ctx);
return ctx.Assemble();
}
Id EmitPhi(EmitContext& ctx, IR::Inst* inst) {
const size_t num_args{inst->NumArgs()};
boost::container::small_vector<Id, 32> blocks;
blocks.reserve(num_args);
for (size_t index = 0; index < num_args; ++index) {
blocks.push_back(inst->PhiBlock(index)->Definition<Id>());
}
// The type of a phi instruction is stored in its flags
const Id result_type{TypeId(ctx, inst->Flags<IR::Type>())};
return ctx.DeferredOpPhi(result_type, std::span(blocks.data(), blocks.size()));
}
void EmitVoid(EmitContext&) {}
Id EmitIdentity(EmitContext& ctx, const IR::Value& value) {
throw NotImplementedException("Forward identity declaration");
}
Id EmitConditionRef(EmitContext& ctx, const IR::Value& value) {
throw NotImplementedException("Forward identity declaration");
}
void EmitReference(EmitContext&) {}
void EmitPhiMove(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetZeroFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetSignFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetCarryFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetOverflowFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitSetVcc(EmitContext& ctx) {
throw LogicError("Unreachable instruction");
}
void EmitGetVcc(EmitContext& ctx) {
throw LogicError("Unreachable instruction");
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,21 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <vector>
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::SPIRV {
[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program,
Bindings& bindings);
[[nodiscard]] inline std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program) {
Bindings binding;
return EmitSPIRV(profile, program, binding);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,57 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
void EmitBitCastU16F16(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitBitCastU32F32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[1], value);
}
void EmitBitCastU64F64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitBitCastF16U16(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitBitCastF32U32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.F32[1], value);
}
void EmitBitCastF64U64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitPackUint2x32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U64, value);
}
Id EmitUnpackUint2x32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[2], value);
}
Id EmitPackFloat2x16(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[1], value);
}
Id EmitUnpackFloat2x16(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.F16[2], value);
}
Id EmitPackHalf2x16(EmitContext& ctx, Id value) {
return ctx.OpPackHalf2x16(ctx.U32[1], value);
}
Id EmitUnpackHalf2x16(EmitContext& ctx, Id value) {
return ctx.OpUnpackHalf2x16(ctx.F32[2], value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,153 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2) {
return ctx.OpCompositeConstruct(ctx.U32[2], e1, e2);
}
Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
return ctx.OpCompositeConstruct(ctx.U32[3], e1, e2, e3);
}
Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
return ctx.OpCompositeConstruct(ctx.U32[4], e1, e2, e3, e4);
}
Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
}
Id EmitCompositeExtractU32x3(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
}
Id EmitCompositeExtractU32x4(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
}
Id EmitCompositeInsertU32x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.U32[2], object, composite, index);
}
Id EmitCompositeInsertU32x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.U32[3], object, composite, index);
}
Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.U32[4], object, composite, index);
}
Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2) {
return ctx.OpCompositeConstruct(ctx.F16[2], e1, e2);
}
Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
return ctx.OpCompositeConstruct(ctx.F16[3], e1, e2, e3);
}
Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
return ctx.OpCompositeConstruct(ctx.F16[4], e1, e2, e3, e4);
}
Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
}
Id EmitCompositeExtractF16x3(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
}
Id EmitCompositeExtractF16x4(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
}
Id EmitCompositeInsertF16x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F16[2], object, composite, index);
}
Id EmitCompositeInsertF16x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F16[3], object, composite, index);
}
Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F16[4], object, composite, index);
}
Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2) {
return ctx.OpCompositeConstruct(ctx.F32[2], e1, e2);
}
Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
return ctx.OpCompositeConstruct(ctx.F32[3], e1, e2, e3);
}
Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
return ctx.OpCompositeConstruct(ctx.F32[4], e1, e2, e3, e4);
}
Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
}
Id EmitCompositeExtractF32x3(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
}
Id EmitCompositeExtractF32x4(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
}
Id EmitCompositeInsertF32x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F32[2], object, composite, index);
}
Id EmitCompositeInsertF32x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F32[3], object, composite, index);
}
Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F32[4], object, composite, index);
}
void EmitCompositeConstructF64x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeConstructF64x3(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeConstructF64x4(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeExtractF64x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeExtractF64x3(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeExtractF64x4(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitCompositeInsertF64x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F64[2], object, composite, index);
}
Id EmitCompositeInsertF64x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F64[3], object, composite, index);
}
Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F64[4], object, composite, index);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,103 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
namespace {
Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
if (IR::IsParam(attr)) {
const u32 index{u32(attr) - u32(IR::Attribute::Param0)};
const auto& info{ctx.output_params.at(index).at(element)};
if (info.num_components == 1) {
return info.id;
} else {
const u32 index_element{element - info.first_element};
return ctx.OpAccessChain(ctx.output_f32, info.id, ctx.ConstU32(index_element));
}
}
switch (attr) {
case IR::Attribute::Position0: {
return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, ctx.ConstU32(element));
case IR::Attribute::RenderTarget0:
return ctx.OpAccessChain(ctx.output_f32, ctx.frag_color[0], ctx.ConstU32(element));
}
default:
throw NotImplementedException("Read attribute {}", attr);
}
}
} // Anonymous namespace
void EmitGetScalarRegister(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitSetScalarRegister(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetVectorRegister(EmitContext& ctx) {
throw LogicError("Unreachable instruction");
}
void EmitSetVectorRegister(EmitContext& ctx) {
throw LogicError("Unreachable instruction");
}
void EmitSetGotoVariable(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetGotoVariable(EmitContext&) {
throw LogicError("Unreachable instruction");
}
Id EmitReadConst(EmitContext& ctx) {
throw LogicError("Unreachable instruction");
}
Id EmitReadConstBuffer(EmitContext& ctx, const IR::Value& binding, const IR::Value& addr,
const IR::Value& offset) {
throw LogicError("Unreachable instruction");
}
Id EmitReadConstBufferF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& addr,
const IR::Value& offset) {
throw LogicError("Unreachable instruction");
}
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
const u32 element{static_cast<u32>(attr) % 4};
if (IR::IsParam(attr)) {
const u32 index{u32(attr) - u32(IR::Attribute::Param0)};
const auto& param{ctx.input_params.at(index)};
if (!ValidId(param.id)) {
// Attribute is disabled or varying component is not written
return ctx.ConstF32(element == 3 ? 1.0f : 0.0f);
}
const Id pointer{ctx.OpAccessChain(param.pointer_type, param.id, ctx.ConstU32(element))};
return ctx.OpLoad(param.component_type, pointer);
}
throw NotImplementedException("Read attribute {}", attr);
}
Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, Id) {
switch (attr) {
case IR::Attribute::VertexId:
return ctx.OpLoad(ctx.U32[1], ctx.vertex_index);
default:
throw NotImplementedException("Read U32 attribute {}", attr);
}
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 element) {
if (attr == IR::Attribute::Param0) {
return;
}
const Id pointer{OutputAttrPointer(ctx, attr, element)};
ctx.OpStore(pointer, value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,262 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
namespace {
Id ExtractU16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U16, value);
} else {
return ctx.OpBitFieldUExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.ConstU32(16u));
}
}
Id ExtractS16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.S16, value);
} else {
return ctx.OpBitFieldSExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.ConstU32(16u));
}
}
Id ExtractU8(EmitContext& ctx, Id value) {
if (ctx.profile.support_int8) {
return ctx.OpUConvert(ctx.U8, value);
} else {
return ctx.OpBitFieldUExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.ConstU32(8u));
}
}
Id ExtractS8(EmitContext& ctx, Id value) {
if (ctx.profile.support_int8) {
return ctx.OpSConvert(ctx.S8, value);
} else {
return ctx.OpBitFieldSExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.ConstU32(8u));
}
}
} // Anonymous namespace
Id EmitConvertS16F16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
} else {
return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
}
}
Id EmitConvertS16F32(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
} else {
return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
}
}
Id EmitConvertS16F64(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
} else {
return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
}
}
Id EmitConvertS32F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U32[1], value);
}
Id EmitConvertS32F32(EmitContext& ctx, Id value) {
if (ctx.profile.has_broken_signed_operations) {
return ctx.OpBitcast(ctx.U32[1], ctx.OpConvertFToS(ctx.S32[1], value));
} else {
return ctx.OpConvertFToS(ctx.U32[1], value);
}
}
Id EmitConvertS32F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U32[1], value);
}
Id EmitConvertS64F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U64, value);
}
Id EmitConvertS64F32(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U64, value);
}
Id EmitConvertS64F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U64, value);
}
Id EmitConvertU16F16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
} else {
return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
}
}
Id EmitConvertU16F32(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
} else {
return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
}
}
Id EmitConvertU16F64(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
} else {
return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
}
}
Id EmitConvertU32F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U32[1], value);
}
Id EmitConvertU32F32(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U32[1], value);
}
Id EmitConvertU32F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U32[1], value);
}
Id EmitConvertU64F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U64, value);
}
Id EmitConvertU64F32(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U64, value);
}
Id EmitConvertU64F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U64, value);
}
Id EmitConvertU64U32(EmitContext& ctx, Id value) {
return ctx.OpUConvert(ctx.U64, value);
}
Id EmitConvertU32U64(EmitContext& ctx, Id value) {
return ctx.OpUConvert(ctx.U32[1], value);
}
Id EmitConvertF16F32(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F16[1], value);
}
Id EmitConvertF32F16(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F32[1], value);
}
Id EmitConvertF32F64(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F32[1], value);
}
Id EmitConvertF64F32(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F64[1], value);
}
Id EmitConvertF16S8(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], ExtractS8(ctx, value));
}
Id EmitConvertF16S16(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], ExtractS16(ctx, value));
}
Id EmitConvertF16S32(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], value);
}
Id EmitConvertF16S64(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], value);
}
Id EmitConvertF16U8(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], ExtractU8(ctx, value));
}
Id EmitConvertF16U16(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], ExtractU16(ctx, value));
}
Id EmitConvertF16U32(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], value);
}
Id EmitConvertF16U64(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], value);
}
Id EmitConvertF32S8(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], ExtractS8(ctx, value));
}
Id EmitConvertF32S16(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], ExtractS16(ctx, value));
}
Id EmitConvertF32S32(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], value);
}
Id EmitConvertF32S64(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], value);
}
Id EmitConvertF32U8(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], ExtractU8(ctx, value));
}
Id EmitConvertF32U16(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], ExtractU16(ctx, value));
}
Id EmitConvertF32U32(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], value);
}
Id EmitConvertF32U64(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], value);
}
Id EmitConvertF64S8(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], ExtractS8(ctx, value));
}
Id EmitConvertF64S16(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], ExtractS16(ctx, value));
}
Id EmitConvertF64S32(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], value);
}
Id EmitConvertF64S64(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], value);
}
Id EmitConvertF64U8(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], ExtractU8(ctx, value));
}
Id EmitConvertF64U16(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], ExtractU16(ctx, value));
}
Id EmitConvertF64U32(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], value);
}
Id EmitConvertF64U64(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,355 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
Id EmitFPAbs16(EmitContext& ctx, Id value) {
return ctx.OpFAbs(ctx.F16[1], value);
}
Id EmitFPAbs32(EmitContext& ctx, Id value) {
return ctx.OpFAbs(ctx.F32[1], value);
}
Id EmitFPAbs64(EmitContext& ctx, Id value) {
return ctx.OpFAbs(ctx.F64[1], value);
}
Id EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpFAdd(ctx.F16[1], a, b);
}
Id EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpFAdd(ctx.F32[1], a, b);
}
Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpFAdd(ctx.F64[1], a, b);
}
Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
return ctx.OpFma(ctx.F16[1], a, b, c);
}
Id EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
return ctx.OpFma(ctx.F32[1], a, b, c);
}
Id EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
return ctx.OpFma(ctx.F64[1], a, b, c);
}
Id EmitFPMax32(EmitContext& ctx, Id a, Id b) {
return ctx.OpFMax(ctx.F32[1], a, b);
}
Id EmitFPMax64(EmitContext& ctx, Id a, Id b) {
return ctx.OpFMax(ctx.F64[1], a, b);
}
Id EmitFPMin32(EmitContext& ctx, Id a, Id b) {
return ctx.OpFMin(ctx.F32[1], a, b);
}
Id EmitFPMin64(EmitContext& ctx, Id a, Id b) {
return ctx.OpFMin(ctx.F64[1], a, b);
}
Id EmitFPMul16(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpFMul(ctx.F16[1], a, b);
}
Id EmitFPMul32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpFMul(ctx.F32[1], a, b);
}
Id EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpFMul(ctx.F64[1], a, b);
}
Id EmitFPNeg16(EmitContext& ctx, Id value) {
return ctx.OpFNegate(ctx.F16[1], value);
}
Id EmitFPNeg32(EmitContext& ctx, Id value) {
return ctx.OpFNegate(ctx.F32[1], value);
}
Id EmitFPNeg64(EmitContext& ctx, Id value) {
return ctx.OpFNegate(ctx.F64[1], value);
}
Id EmitFPSin(EmitContext& ctx, Id value) {
return ctx.OpSin(ctx.F32[1], value);
}
Id EmitFPCos(EmitContext& ctx, Id value) {
return ctx.OpCos(ctx.F32[1], value);
}
Id EmitFPExp2(EmitContext& ctx, Id value) {
return ctx.OpExp2(ctx.F32[1], value);
}
Id EmitFPLog2(EmitContext& ctx, Id value) {
return ctx.OpLog2(ctx.F32[1], value);
}
Id EmitFPRecip32(EmitContext& ctx, Id value) {
return ctx.OpFDiv(ctx.F32[1], ctx.ConstF32(1.0f), value);
}
Id EmitFPRecip64(EmitContext& ctx, Id value) {
return ctx.OpFDiv(ctx.F64[1], ctx.Constant(ctx.F64[1], 1.0f), value);
}
Id EmitFPRecipSqrt32(EmitContext& ctx, Id value) {
return ctx.OpInverseSqrt(ctx.F32[1], value);
}
Id EmitFPRecipSqrt64(EmitContext& ctx, Id value) {
return ctx.OpInverseSqrt(ctx.F64[1], value);
}
Id EmitFPSqrt(EmitContext& ctx, Id value) {
return ctx.OpSqrt(ctx.F32[1], value);
}
Id EmitFPSaturate16(EmitContext& ctx, Id value) {
const Id zero{ctx.Constant(ctx.F16[1], u16{0})};
const Id one{ctx.Constant(ctx.F16[1], u16{0x3c00})};
return ctx.OpFClamp(ctx.F16[1], value, zero, one);
}
Id EmitFPSaturate32(EmitContext& ctx, Id value) {
const Id zero{ctx.ConstF32(f32{0.0})};
const Id one{ctx.ConstF32(f32{1.0})};
return ctx.OpFClamp(ctx.F32[1], value, zero, one);
}
Id EmitFPSaturate64(EmitContext& ctx, Id value) {
const Id zero{ctx.Constant(ctx.F64[1], f64{0.0})};
const Id one{ctx.Constant(ctx.F64[1], f64{1.0})};
return ctx.OpFClamp(ctx.F64[1], value, zero, one);
}
Id EmitFPClamp16(EmitContext& ctx, Id value, Id min_value, Id max_value) {
return ctx.OpFClamp(ctx.F16[1], value, min_value, max_value);
}
Id EmitFPClamp32(EmitContext& ctx, Id value, Id min_value, Id max_value) {
return ctx.OpFClamp(ctx.F32[1], value, min_value, max_value);
}
Id EmitFPClamp64(EmitContext& ctx, Id value, Id min_value, Id max_value) {
return ctx.OpFClamp(ctx.F64[1], value, min_value, max_value);
}
Id EmitFPRoundEven16(EmitContext& ctx, Id value) {
return ctx.OpRoundEven(ctx.F16[1], value);
}
Id EmitFPRoundEven32(EmitContext& ctx, Id value) {
return ctx.OpRoundEven(ctx.F32[1], value);
}
Id EmitFPRoundEven64(EmitContext& ctx, Id value) {
return ctx.OpRoundEven(ctx.F64[1], value);
}
Id EmitFPFloor16(EmitContext& ctx, Id value) {
return ctx.OpFloor(ctx.F16[1], value);
}
Id EmitFPFloor32(EmitContext& ctx, Id value) {
return ctx.OpFloor(ctx.F32[1], value);
}
Id EmitFPFloor64(EmitContext& ctx, Id value) {
return ctx.OpFloor(ctx.F64[1], value);
}
Id EmitFPCeil16(EmitContext& ctx, Id value) {
return ctx.OpCeil(ctx.F16[1], value);
}
Id EmitFPCeil32(EmitContext& ctx, Id value) {
return ctx.OpCeil(ctx.F32[1], value);
}
Id EmitFPCeil64(EmitContext& ctx, Id value) {
return ctx.OpCeil(ctx.F64[1], value);
}
Id EmitFPTrunc16(EmitContext& ctx, Id value) {
return ctx.OpTrunc(ctx.F16[1], value);
}
Id EmitFPTrunc32(EmitContext& ctx, Id value) {
return ctx.OpTrunc(ctx.F32[1], value);
}
Id EmitFPTrunc64(EmitContext& ctx, Id value) {
return ctx.OpTrunc(ctx.F64[1], value);
}
Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdNotEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdNotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdNotEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdNotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdNotEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdNotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordNotEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordNotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordNotEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordNotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordNotEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordNotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdLessThan16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdLessThan32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdLessThan64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordLessThan16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordLessThan32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordLessThan64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdGreaterThan16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdGreaterThan32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdGreaterThan64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordGreaterThan16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordGreaterThan32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordGreaterThan64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPOrdGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFOrdGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPUnordGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpFUnordGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitFPIsNan16(EmitContext& ctx, Id value) {
return ctx.OpIsNan(ctx.U1[1], value);
}
Id EmitFPIsNan32(EmitContext& ctx, Id value) {
return ctx.OpIsNan(ctx.U1[1], value);
}
Id EmitFPIsNan64(EmitContext& ctx, Id value) {
return ctx.OpIsNan(ctx.U1[1], value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,66 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
Id bias_lc, const IR::Value& offset) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
Id lod, const IR::Value& offset) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
Id coords, Id dref, Id bias_lc, const IR::Value& offset) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
Id coords, Id dref, Id lod, const IR::Value& offset) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
const IR::Value& offset, const IR::Value& offset2) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
const IR::Value& offset, const IR::Value& offset2, Id dref) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
Id lod, Id ms) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,
const IR::Value& skip_mips_val) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
Id derivatives, const IR::Value& offset, Id lod_clamp) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color) {
throw NotImplementedException("SPIR-V Instruction");
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,335 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <sirit/sirit.h>
#include "common/types.h"
namespace Shader::IR {
enum class Attribute : u64;
enum class Patch : u64;
class Inst;
class Value;
} // namespace Shader::IR
namespace Shader::Backend::SPIRV {
using Sirit::Id;
class EmitContext;
// Microinstruction emitters
Id EmitPhi(EmitContext& ctx, IR::Inst* inst);
void EmitVoid(EmitContext& ctx);
Id EmitIdentity(EmitContext& ctx, const IR::Value& value);
Id EmitConditionRef(EmitContext& ctx, const IR::Value& value);
void EmitReference(EmitContext&);
void EmitPhiMove(EmitContext&);
void EmitJoin(EmitContext& ctx);
void EmitBarrier(EmitContext& ctx);
void EmitWorkgroupMemoryBarrier(EmitContext& ctx);
void EmitDeviceMemoryBarrier(EmitContext& ctx);
void EmitGetVcc(EmitContext& ctx);
void EmitSetVcc(EmitContext& ctx);
void EmitPrologue(EmitContext& ctx);
void EmitEpilogue(EmitContext& ctx);
void EmitGetScalarRegister(EmitContext& ctx);
void EmitSetScalarRegister(EmitContext& ctx);
void EmitGetVectorRegister(EmitContext& ctx);
void EmitSetVectorRegister(EmitContext& ctx);
void EmitSetGotoVariable(EmitContext& ctx);
void EmitGetGotoVariable(EmitContext& ctx);
void EmitSetScc(EmitContext& ctx);
Id EmitReadConst(EmitContext& ctx);
Id EmitReadConstBuffer(EmitContext& ctx, const IR::Value& handle, const IR::Value& index,
const IR::Value& offset);
Id EmitReadConstBufferF32(EmitContext& ctx, const IR::Value& handle, const IR::Value& index,
const IR::Value& offset);
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex);
Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, Id vertex);
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 element);
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value);
void EmitSetSampleMask(EmitContext& ctx, Id value);
void EmitSetFragDepth(EmitContext& ctx, Id value);
Id EmitWorkgroupId(EmitContext& ctx);
Id EmitLocalInvocationId(EmitContext& ctx);
Id EmitInvocationId(EmitContext& ctx);
Id EmitInvocationInfo(EmitContext& ctx);
Id EmitSampleId(EmitContext& ctx);
Id EmitUndefU1(EmitContext& ctx);
Id EmitUndefU8(EmitContext& ctx);
Id EmitUndefU16(EmitContext& ctx);
Id EmitUndefU32(EmitContext& ctx);
Id EmitUndefU64(EmitContext& ctx);
Id EmitReadSharedU8(EmitContext& ctx, Id offset);
Id EmitReadSharedS8(EmitContext& ctx, Id offset);
Id EmitReadSharedU16(EmitContext& ctx, Id offset);
Id EmitReadSharedS16(EmitContext& ctx, Id offset);
Id EmitReadSharedU32(EmitContext& ctx, Id offset);
Id EmitReadSharedU64(EmitContext& ctx, Id offset);
void EmitWriteSharedU8(EmitContext& ctx, Id offset, Id value);
void EmitWriteSharedU16(EmitContext& ctx, Id offset, Id value);
void EmitWriteSharedU32(EmitContext& ctx, Id offset, Id value);
void EmitWriteSharedU64(EmitContext& ctx, Id offset, Id value);
void EmitWriteSharedU128(EmitContext& ctx, Id offset, Id value);
Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2);
Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeExtractU32x3(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeExtractU32x4(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeInsertU32x2(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertU32x3(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2);
Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3);
Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeExtractF16x3(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeExtractF16x4(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeInsertF16x2(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertF16x3(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2);
Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeExtractF32x3(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeExtractF32x4(EmitContext& ctx, Id composite, u32 index);
Id EmitCompositeInsertF32x2(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertF32x3(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index);
void EmitCompositeConstructF64x2(EmitContext& ctx);
void EmitCompositeConstructF64x3(EmitContext& ctx);
void EmitCompositeConstructF64x4(EmitContext& ctx);
void EmitCompositeExtractF64x2(EmitContext& ctx);
void EmitCompositeExtractF64x3(EmitContext& ctx);
void EmitCompositeExtractF64x4(EmitContext& ctx);
Id EmitCompositeInsertF64x2(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertF64x3(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index);
Id EmitSelectU1(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectU8(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectU32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectU64(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectF16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectF32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
Id EmitSelectF64(EmitContext& ctx, Id cond, Id true_value, Id false_value);
void EmitBitCastU16F16(EmitContext& ctx);
Id EmitBitCastU32F32(EmitContext& ctx, Id value);
void EmitBitCastU64F64(EmitContext& ctx);
void EmitBitCastF16U16(EmitContext&);
Id EmitBitCastF32U32(EmitContext& ctx, Id value);
void EmitBitCastF64U64(EmitContext& ctx);
Id EmitPackUint2x32(EmitContext& ctx, Id value);
Id EmitUnpackUint2x32(EmitContext& ctx, Id value);
Id EmitPackFloat2x16(EmitContext& ctx, Id value);
Id EmitUnpackFloat2x16(EmitContext& ctx, Id value);
Id EmitPackHalf2x16(EmitContext& ctx, Id value);
Id EmitUnpackHalf2x16(EmitContext& ctx, Id value);
Id EmitFPAbs16(EmitContext& ctx, Id value);
Id EmitFPAbs32(EmitContext& ctx, Id value);
Id EmitFPAbs64(EmitContext& ctx, Id value);
Id EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
Id EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
Id EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
Id EmitFPMax32(EmitContext& ctx, Id a, Id b);
Id EmitFPMax64(EmitContext& ctx, Id a, Id b);
Id EmitFPMin32(EmitContext& ctx, Id a, Id b);
Id EmitFPMin64(EmitContext& ctx, Id a, Id b);
Id EmitFPMul16(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitFPMul32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitFPNeg16(EmitContext& ctx, Id value);
Id EmitFPNeg32(EmitContext& ctx, Id value);
Id EmitFPNeg64(EmitContext& ctx, Id value);
Id EmitFPSin(EmitContext& ctx, Id value);
Id EmitFPCos(EmitContext& ctx, Id value);
Id EmitFPExp2(EmitContext& ctx, Id value);
Id EmitFPLog2(EmitContext& ctx, Id value);
Id EmitFPRecip32(EmitContext& ctx, Id value);
Id EmitFPRecip64(EmitContext& ctx, Id value);
Id EmitFPRecipSqrt32(EmitContext& ctx, Id value);
Id EmitFPRecipSqrt64(EmitContext& ctx, Id value);
Id EmitFPSqrt(EmitContext& ctx, Id value);
Id EmitFPSaturate16(EmitContext& ctx, Id value);
Id EmitFPSaturate32(EmitContext& ctx, Id value);
Id EmitFPSaturate64(EmitContext& ctx, Id value);
Id EmitFPClamp16(EmitContext& ctx, Id value, Id min_value, Id max_value);
Id EmitFPClamp32(EmitContext& ctx, Id value, Id min_value, Id max_value);
Id EmitFPClamp64(EmitContext& ctx, Id value, Id min_value, Id max_value);
Id EmitFPRoundEven16(EmitContext& ctx, Id value);
Id EmitFPRoundEven32(EmitContext& ctx, Id value);
Id EmitFPRoundEven64(EmitContext& ctx, Id value);
Id EmitFPFloor16(EmitContext& ctx, Id value);
Id EmitFPFloor32(EmitContext& ctx, Id value);
Id EmitFPFloor64(EmitContext& ctx, Id value);
Id EmitFPCeil16(EmitContext& ctx, Id value);
Id EmitFPCeil32(EmitContext& ctx, Id value);
Id EmitFPCeil64(EmitContext& ctx, Id value);
Id EmitFPTrunc16(EmitContext& ctx, Id value);
Id EmitFPTrunc32(EmitContext& ctx, Id value);
Id EmitFPTrunc64(EmitContext& ctx, Id value);
Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdNotEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdNotEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdNotEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordNotEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordNotEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordNotEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdLessThan16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdLessThan32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdLessThan64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordLessThan16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordLessThan32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordLessThan64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdGreaterThan16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdGreaterThan32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdGreaterThan64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordGreaterThan16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordGreaterThan32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordGreaterThan64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPOrdGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPUnordGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
Id EmitFPIsNan16(EmitContext& ctx, Id value);
Id EmitFPIsNan32(EmitContext& ctx, Id value);
Id EmitFPIsNan64(EmitContext& ctx, Id value);
Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitIAdd64(EmitContext& ctx, Id a, Id b);
Id EmitISub32(EmitContext& ctx, Id a, Id b);
Id EmitISub64(EmitContext& ctx, Id a, Id b);
Id EmitIMul32(EmitContext& ctx, Id a, Id b);
Id EmitSDiv32(EmitContext& ctx, Id a, Id b);
Id EmitUDiv32(EmitContext& ctx, Id a, Id b);
Id EmitINeg32(EmitContext& ctx, Id value);
Id EmitINeg64(EmitContext& ctx, Id value);
Id EmitIAbs32(EmitContext& ctx, Id value);
Id EmitShiftLeftLogical32(EmitContext& ctx, Id base, Id shift);
Id EmitShiftLeftLogical64(EmitContext& ctx, Id base, Id shift);
Id EmitShiftRightLogical32(EmitContext& ctx, Id base, Id shift);
Id EmitShiftRightLogical64(EmitContext& ctx, Id base, Id shift);
Id EmitShiftRightArithmetic32(EmitContext& ctx, Id base, Id shift);
Id EmitShiftRightArithmetic64(EmitContext& ctx, Id base, Id shift);
Id EmitBitwiseAnd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitBitwiseOr32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitBitwiseXor32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitBitFieldInsert(EmitContext& ctx, Id base, Id insert, Id offset, Id count);
Id EmitBitFieldSExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count);
Id EmitBitFieldUExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count);
Id EmitBitReverse32(EmitContext& ctx, Id value);
Id EmitBitCount32(EmitContext& ctx, Id value);
Id EmitBitwiseNot32(EmitContext& ctx, Id value);
Id EmitFindSMsb32(EmitContext& ctx, Id value);
Id EmitFindUMsb32(EmitContext& ctx, Id value);
Id EmitSMin32(EmitContext& ctx, Id a, Id b);
Id EmitUMin32(EmitContext& ctx, Id a, Id b);
Id EmitSMax32(EmitContext& ctx, Id a, Id b);
Id EmitUMax32(EmitContext& ctx, Id a, Id b);
Id EmitSClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max);
Id EmitUClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max);
Id EmitSLessThan(EmitContext& ctx, Id lhs, Id rhs);
Id EmitULessThan(EmitContext& ctx, Id lhs, Id rhs);
Id EmitIEqual(EmitContext& ctx, Id lhs, Id rhs);
Id EmitSLessThanEqual(EmitContext& ctx, Id lhs, Id rhs);
Id EmitULessThanEqual(EmitContext& ctx, Id lhs, Id rhs);
Id EmitSGreaterThan(EmitContext& ctx, Id lhs, Id rhs);
Id EmitUGreaterThan(EmitContext& ctx, Id lhs, Id rhs);
Id EmitINotEqual(EmitContext& ctx, Id lhs, Id rhs);
Id EmitSGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs);
Id EmitUGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs);
Id EmitLogicalOr(EmitContext& ctx, Id a, Id b);
Id EmitLogicalAnd(EmitContext& ctx, Id a, Id b);
Id EmitLogicalXor(EmitContext& ctx, Id a, Id b);
Id EmitLogicalNot(EmitContext& ctx, Id value);
Id EmitConvertS16F16(EmitContext& ctx, Id value);
Id EmitConvertS16F32(EmitContext& ctx, Id value);
Id EmitConvertS16F64(EmitContext& ctx, Id value);
Id EmitConvertS32F16(EmitContext& ctx, Id value);
Id EmitConvertS32F32(EmitContext& ctx, Id value);
Id EmitConvertS32F64(EmitContext& ctx, Id value);
Id EmitConvertS64F16(EmitContext& ctx, Id value);
Id EmitConvertS64F32(EmitContext& ctx, Id value);
Id EmitConvertS64F64(EmitContext& ctx, Id value);
Id EmitConvertU16F16(EmitContext& ctx, Id value);
Id EmitConvertU16F32(EmitContext& ctx, Id value);
Id EmitConvertU16F64(EmitContext& ctx, Id value);
Id EmitConvertU32F16(EmitContext& ctx, Id value);
Id EmitConvertU32F32(EmitContext& ctx, Id value);
Id EmitConvertU32F64(EmitContext& ctx, Id value);
Id EmitConvertU64F16(EmitContext& ctx, Id value);
Id EmitConvertU64F32(EmitContext& ctx, Id value);
Id EmitConvertU64F64(EmitContext& ctx, Id value);
Id EmitConvertU64U32(EmitContext& ctx, Id value);
Id EmitConvertU32U64(EmitContext& ctx, Id value);
Id EmitConvertF16F32(EmitContext& ctx, Id value);
Id EmitConvertF32F16(EmitContext& ctx, Id value);
Id EmitConvertF32F64(EmitContext& ctx, Id value);
Id EmitConvertF64F32(EmitContext& ctx, Id value);
Id EmitConvertF16S8(EmitContext& ctx, Id value);
Id EmitConvertF16S16(EmitContext& ctx, Id value);
Id EmitConvertF16S32(EmitContext& ctx, Id value);
Id EmitConvertF16S64(EmitContext& ctx, Id value);
Id EmitConvertF16U8(EmitContext& ctx, Id value);
Id EmitConvertF16U16(EmitContext& ctx, Id value);
Id EmitConvertF16U32(EmitContext& ctx, Id value);
Id EmitConvertF16U64(EmitContext& ctx, Id value);
Id EmitConvertF32S8(EmitContext& ctx, Id value);
Id EmitConvertF32S16(EmitContext& ctx, Id value);
Id EmitConvertF32S32(EmitContext& ctx, Id value);
Id EmitConvertF32S64(EmitContext& ctx, Id value);
Id EmitConvertF32U8(EmitContext& ctx, Id value);
Id EmitConvertF32U16(EmitContext& ctx, Id value);
Id EmitConvertF32U32(EmitContext& ctx, Id value);
Id EmitConvertF32U64(EmitContext& ctx, Id value);
Id EmitConvertF64S8(EmitContext& ctx, Id value);
Id EmitConvertF64S16(EmitContext& ctx, Id value);
Id EmitConvertF64S32(EmitContext& ctx, Id value);
Id EmitConvertF64S64(EmitContext& ctx, Id value);
Id EmitConvertF64U8(EmitContext& ctx, Id value);
Id EmitConvertF64U16(EmitContext& ctx, Id value);
Id EmitConvertF64U32(EmitContext& ctx, Id value);
Id EmitConvertF64U64(EmitContext& ctx, Id value);
Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
Id bias_lc, const IR::Value& offset);
Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
Id lod, const IR::Value& offset);
Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
Id coords, Id dref, Id bias_lc, const IR::Value& offset);
Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
Id coords, Id dref, Id lod, const IR::Value& offset);
Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
const IR::Value& offset, const IR::Value& offset2);
Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
const IR::Value& offset, const IR::Value& offset2, Id dref);
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
Id lod, Id ms);
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,
const IR::Value& skip_mips);
Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
Id derivatives, const IR::Value& offset, Id lod_clamp);
Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color);
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,262 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
namespace {
void SetZeroFlag(EmitContext& ctx, IR::Inst* inst, Id result) {
// IR::Inst* const zero{inst->GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp)};
// if (!zero) {
// return;
// }
// zero->SetDefinition(ctx.OpIEqual(ctx.U1[1], result, ctx.u32_zero_value));
// zero->Invalidate();
}
void SetSignFlag(EmitContext& ctx, IR::Inst* inst, Id result) {
// IR::Inst* const sign{inst->GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp)};
// if (!sign) {
// return;
// }
// sign->SetDefinition(ctx.OpSLessThan(ctx.U1[1], result, ctx.u32_zero_value));
// sign->Invalidate();
}
} // Anonymous namespace
Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
return ctx.OpIAdd(ctx.U32[1], a, b);
// Id result{};
// if (IR::Inst* const carry{inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) {
// const Id carry_type{ctx.TypeStruct(ctx.U32[1], ctx.U32[1])};
// const Id carry_result{ctx.OpIAddCarry(carry_type, a, b)};
// result = ctx.OpCompositeExtract(ctx.U32[1], carry_result, 0U);
// const Id carry_value{ctx.OpCompositeExtract(ctx.U32[1], carry_result, 1U)};
// carry->SetDefinition(ctx.OpINotEqual(ctx.U1[1][1], carry_value, ctx.u32_zero_value));
// carry->Invalidate();
//} else {
// result = ctx.OpIAdd(ctx.U32[1], a, b);
//}
// SetZeroFlag(ctx, inst, result);
// SetSignFlag(ctx, inst, result);
// if (IR::Inst * overflow{inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp)}) {
// // https://stackoverflow.com/questions/55468823/how-to-detect-integer-overflow-in-c
// constexpr u32 s32_max{static_cast<u32>(std::numeric_limits<s32>::max())};
// const Id is_positive{ctx.OpSGreaterThanEqual(ctx.U1[1], a, ctx.u32_zero_value)};
// const Id sub_a{ctx.OpISub(ctx.U32[1], ctx.Const(s32_max), a)};
// const Id positive_test{ctx.OpSGreaterThan(ctx.U1[1], b, sub_a)};
// const Id negative_test{ctx.OpSLessThan(ctx.U1[1], b, sub_a)};
// const Id carry_flag{ctx.OpSelect(ctx.U1[1], is_positive, positive_test, negative_test)};
// overflow->SetDefinition(carry_flag);
// overflow->Invalidate();
//}
// return result;
}
Id EmitIAdd64(EmitContext& ctx, Id a, Id b) {
return ctx.OpIAdd(ctx.U64, a, b);
}
Id EmitISub32(EmitContext& ctx, Id a, Id b) {
return ctx.OpISub(ctx.U32[1], a, b);
}
Id EmitISub64(EmitContext& ctx, Id a, Id b) {
return ctx.OpISub(ctx.U64, a, b);
}
Id EmitIMul32(EmitContext& ctx, Id a, Id b) {
return ctx.OpIMul(ctx.U32[1], a, b);
}
Id EmitSDiv32(EmitContext& ctx, Id a, Id b) {
return ctx.OpSDiv(ctx.U32[1], a, b);
}
Id EmitUDiv32(EmitContext& ctx, Id a, Id b) {
return ctx.OpUDiv(ctx.U32[1], a, b);
}
Id EmitINeg32(EmitContext& ctx, Id value) {
return ctx.OpSNegate(ctx.U32[1], value);
}
Id EmitINeg64(EmitContext& ctx, Id value) {
return ctx.OpSNegate(ctx.U64, value);
}
Id EmitIAbs32(EmitContext& ctx, Id value) {
return ctx.OpSAbs(ctx.U32[1], value);
}
Id EmitShiftLeftLogical32(EmitContext& ctx, Id base, Id shift) {
return ctx.OpShiftLeftLogical(ctx.U32[1], base, shift);
}
Id EmitShiftLeftLogical64(EmitContext& ctx, Id base, Id shift) {
return ctx.OpShiftLeftLogical(ctx.U64, base, shift);
}
Id EmitShiftRightLogical32(EmitContext& ctx, Id base, Id shift) {
return ctx.OpShiftRightLogical(ctx.U32[1], base, shift);
}
Id EmitShiftRightLogical64(EmitContext& ctx, Id base, Id shift) {
return ctx.OpShiftRightLogical(ctx.U64, base, shift);
}
Id EmitShiftRightArithmetic32(EmitContext& ctx, Id base, Id shift) {
return ctx.OpShiftRightArithmetic(ctx.U32[1], base, shift);
}
Id EmitShiftRightArithmetic64(EmitContext& ctx, Id base, Id shift) {
return ctx.OpShiftRightArithmetic(ctx.U64, base, shift);
}
Id EmitBitwiseAnd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
const Id result{ctx.OpBitwiseAnd(ctx.U32[1], a, b)};
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitBitwiseOr32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
const Id result{ctx.OpBitwiseOr(ctx.U32[1], a, b)};
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitBitwiseXor32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
const Id result{ctx.OpBitwiseXor(ctx.U32[1], a, b)};
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitBitFieldInsert(EmitContext& ctx, Id base, Id insert, Id offset, Id count) {
return ctx.OpBitFieldInsert(ctx.U32[1], base, insert, offset, count);
}
Id EmitBitFieldSExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count) {
const Id result{ctx.OpBitFieldSExtract(ctx.U32[1], base, offset, count)};
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitBitFieldUExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count) {
const Id result{ctx.OpBitFieldUExtract(ctx.U32[1], base, offset, count)};
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitBitReverse32(EmitContext& ctx, Id value) {
return ctx.OpBitReverse(ctx.U32[1], value);
}
Id EmitBitCount32(EmitContext& ctx, Id value) {
return ctx.OpBitCount(ctx.U32[1], value);
}
Id EmitBitwiseNot32(EmitContext& ctx, Id value) {
return ctx.OpNot(ctx.U32[1], value);
}
Id EmitFindSMsb32(EmitContext& ctx, Id value) {
return ctx.OpFindSMsb(ctx.U32[1], value);
}
Id EmitFindUMsb32(EmitContext& ctx, Id value) {
return ctx.OpFindUMsb(ctx.U32[1], value);
}
Id EmitSMin32(EmitContext& ctx, Id a, Id b) {
return ctx.OpSMin(ctx.U32[1], a, b);
}
Id EmitUMin32(EmitContext& ctx, Id a, Id b) {
return ctx.OpUMin(ctx.U32[1], a, b);
}
Id EmitSMax32(EmitContext& ctx, Id a, Id b) {
return ctx.OpSMax(ctx.U32[1], a, b);
}
Id EmitUMax32(EmitContext& ctx, Id a, Id b) {
return ctx.OpUMax(ctx.U32[1], a, b);
}
Id EmitSClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max) {
Id result{};
if (ctx.profile.has_broken_spirv_clamp) {
value = ctx.OpBitcast(ctx.S32[1], value);
min = ctx.OpBitcast(ctx.S32[1], min);
max = ctx.OpBitcast(ctx.S32[1], max);
result = ctx.OpSMax(ctx.S32[1], ctx.OpSMin(ctx.S32[1], value, max), min);
result = ctx.OpBitcast(ctx.U32[1], result);
} else {
result = ctx.OpSClamp(ctx.U32[1], value, min, max);
}
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitUClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max) {
Id result{};
if (ctx.profile.has_broken_spirv_clamp) {
result = ctx.OpUMax(ctx.U32[1], ctx.OpUMin(ctx.U32[1], value, max), min);
} else {
result = ctx.OpUClamp(ctx.U32[1], value, min, max);
}
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
return result;
}
Id EmitSLessThan(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpSLessThan(ctx.U1[1], lhs, rhs);
}
Id EmitULessThan(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpULessThan(ctx.U1[1], lhs, rhs);
}
Id EmitIEqual(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpIEqual(ctx.U1[1], lhs, rhs);
}
Id EmitSLessThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpSLessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitULessThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpULessThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitSGreaterThan(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpSGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitUGreaterThan(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpUGreaterThan(ctx.U1[1], lhs, rhs);
}
Id EmitINotEqual(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpINotEqual(ctx.U1[1], lhs, rhs);
}
Id EmitSGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpSGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
Id EmitUGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
return ctx.OpUGreaterThanEqual(ctx.U1[1], lhs, rhs);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,25 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
Id EmitLogicalOr(EmitContext& ctx, Id a, Id b) {
return ctx.OpLogicalOr(ctx.U1[1], a, b);
}
Id EmitLogicalAnd(EmitContext& ctx, Id a, Id b) {
return ctx.OpLogicalAnd(ctx.U1[1], a, b);
}
Id EmitLogicalXor(EmitContext& ctx, Id a, Id b) {
return ctx.OpLogicalNotEqual(ctx.U1[1], a, b);
}
Id EmitLogicalNot(EmitContext& ctx, Id value) {
return ctx.OpLogicalNot(ctx.U1[1], value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,41 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
Id EmitSelectU1(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.U1[1], cond, true_value, false_value);
}
Id EmitSelectU8(EmitContext&, Id, Id, Id) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.U16, cond, true_value, false_value);
}
Id EmitSelectU32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.U32[1], cond, true_value, false_value);
}
Id EmitSelectU64(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.U64, cond, true_value, false_value);
}
Id EmitSelectF16(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.F16[1], cond, true_value, false_value);
}
Id EmitSelectF32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.F32[1], cond, true_value, false_value);
}
Id EmitSelectF64(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
return ctx.OpSelect(ctx.F64[1], cond, true_value, false_value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,21 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
void EmitPrologue(EmitContext& ctx) {}
void EmitEpilogue(EmitContext& ctx) {}
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
throw NotImplementedException("Geometry streams");
}
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
throw NotImplementedException("Geometry streams");
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,29 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
Id EmitUndefU1(EmitContext& ctx) {
return ctx.OpUndef(ctx.U1[1]);
}
Id EmitUndefU8(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitUndefU16(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitUndefU32(EmitContext& ctx) {
return ctx.OpUndef(ctx.U32[1]);
}
Id EmitUndefU64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,136 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <boost/container/static_vector.hpp>
#include <fmt/format.h>
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
namespace Shader::Backend::SPIRV {
namespace {
std::string_view StageName(Stage stage) {
switch (stage) {
case Stage::Vertex:
return "vs";
case Stage::TessellationControl:
return "tcs";
case Stage::TessellationEval:
return "tes";
case Stage::Geometry:
return "gs";
case Stage::Fragment:
return "fs";
case Stage::Compute:
return "cs";
}
throw InvalidArgument("Invalid stage {}", u32(stage));
}
template <typename... Args>
void Name(EmitContext& ctx, Id object, std::string_view format_str, Args&&... args) {
ctx.Name(object, fmt::format(fmt::runtime(format_str), StageName(ctx.stage),
std::forward<Args>(args)...)
.c_str());
}
} // Anonymous namespace
EmitContext::EmitContext(const Profile& profile_, IR::Program& program, Bindings& bindings)
: Sirit::Module(profile_.supported_spirv), profile{profile_}, stage{program.stage} {
u32& uniform_binding{bindings.unified};
u32& storage_binding{bindings.unified};
u32& texture_binding{bindings.unified};
u32& image_binding{bindings.unified};
AddCapability(spv::Capability::Shader);
DefineArithmeticTypes();
DefineInterfaces(program);
}
EmitContext::~EmitContext() = default;
Id EmitContext::Def(const IR::Value& value) {
if (!value.IsImmediate()) {
return value.InstRecursive()->Definition<Id>();
}
switch (value.Type()) {
case IR::Type::Void:
return Id{};
case IR::Type::U1:
return value.U1() ? true_value : false_value;
case IR::Type::U32:
return ConstU32(value.U32());
case IR::Type::U64:
return Constant(U64, value.U64());
case IR::Type::F32:
return ConstF32(value.F32());
case IR::Type::F64:
return Constant(F64[1], value.F64());
default:
throw NotImplementedException("Immediate type {}", value.Type());
}
}
void EmitContext::DefineArithmeticTypes() {
void_id = Name(TypeVoid(), "void_id");
U1[1] = Name(TypeBool(), "bool_id");
// F16[1] = Name(TypeFloat(16), "f16_id");
F32[1] = Name(TypeFloat(32), "f32_id");
// F64[1] = Name(TypeFloat(64), "f64_id");
S32[1] = Name(TypeSInt(32), "i32_id");
U32[1] = Name(TypeUInt(32), "u32_id");
// U8 = Name(TypeSInt(8), "u8");
// S8 = Name(TypeUInt(8), "s8");
// U16 = Name(TypeUInt(16), "u16_id");
// S16 = Name(TypeSInt(16), "s16_id");
// U64 = Name(TypeUInt(64), "u64_id");
for (u32 i = 2; i <= 4; i++) {
// F16[i] = Name(TypeVector(F16[1], i), fmt::format("f16vec{}_id", i));
F32[i] = Name(TypeVector(F32[1], i), fmt::format("f32vec{}_id", i));
// F64[i] = Name(TypeVector(F64[1], i), fmt::format("f64vec{}_id", i));
S32[i] = Name(TypeVector(S32[1], i), fmt::format("i32vec{}_id", i));
U32[i] = Name(TypeVector(U32[1], i), fmt::format("u32vec{}_id", i));
U1[i] = Name(TypeVector(U1[1], i), fmt::format("bvec{}_id", i));
}
true_value = ConstantTrue(U1[1]);
false_value = ConstantFalse(U1[1]);
u32_zero_value = ConstU32(0U);
f32_zero_value = ConstF32(0.0f);
output_f32 = Name(TypePointer(spv::StorageClass::Output, F32[1]), "output_f32");
output_u32 = Name(TypePointer(spv::StorageClass::Output, U32[1]), "output_u32");
}
void EmitContext::DefineInterfaces(const IR::Program& program) {
DefineInputs(program);
DefineOutputs(program);
}
void EmitContext::DefineInputs(const IR::Program& program) {
switch (stage) {
case Stage::Vertex:
vertex_index = DefineVariable(U32[1], spv::BuiltIn::VertexIndex, spv::StorageClass::Input);
base_vertex = DefineVariable(U32[1], spv::BuiltIn::BaseVertex, spv::StorageClass::Input);
break;
default:
break;
}
}
void EmitContext::DefineOutputs(const IR::Program& program) {
switch (stage) {
case Stage::Vertex:
output_position = DefineVariable(F32[4], spv::BuiltIn::Position, spv::StorageClass::Output);
break;
case Stage::Fragment:
frag_color[0] = DefineOutput(F32[4], 0);
Name(frag_color[0], fmt::format("frag_color{}", 0));
interfaces.push_back(frag_color[0]);
break;
default:
break;
}
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,169 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <sirit/sirit.h>
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::SPIRV {
using Sirit::Id;
struct VectorIds {
[[nodiscard]] Id& operator[](u32 index) {
return ids[index - 1];
}
[[nodiscard]] const Id& operator[](u32 index) const {
return ids[index - 1];
}
std::array<Id, 4> ids;
};
class EmitContext final : public Sirit::Module {
public:
explicit EmitContext(const Profile& profile, IR::Program& program, Bindings& binding);
~EmitContext();
Id Def(const IR::Value& value);
[[nodiscard]] Id DefineInput(Id type, u32 location) {
const Id input_id{DefineVar(type, spv::StorageClass::Input)};
Decorate(input_id, spv::Decoration::Location, location);
return input_id;
}
[[nodiscard]] Id DefineOutput(Id type, std::optional<u32> location = std::nullopt) {
const Id output_id{DefineVar(type, spv::StorageClass::Output)};
if (location) {
Decorate(output_id, spv::Decoration::Location, *location);
}
return output_id;
}
[[nodiscard]] Id DefineUniformConst(Id type, u32 set, u32 binding, bool readonly = false) {
const Id uniform_id{DefineVar(type, spv::StorageClass::UniformConstant)};
Decorate(uniform_id, spv::Decoration::DescriptorSet, set);
Decorate(uniform_id, spv::Decoration::Binding, binding);
if (readonly) {
Decorate(uniform_id, spv::Decoration::NonWritable);
}
return uniform_id;
}
template <bool global = true>
[[nodiscard]] Id DefineVar(Id type, spv::StorageClass storage_class) {
const Id pointer_type_id{TypePointer(storage_class, type)};
return global ? AddGlobalVariable(pointer_type_id, storage_class)
: AddLocalVariable(pointer_type_id, storage_class);
}
[[nodiscard]] Id DefineVariable(Id type, std::optional<spv::BuiltIn> builtin,
spv::StorageClass storage_class) {
const Id id{DefineVar(type, storage_class)};
if (builtin) {
Decorate(id, spv::Decoration::BuiltIn, *builtin);
}
interfaces.push_back(id);
return id;
}
[[nodiscard]] Id ConstU32(u32 value) {
return Constant(U32[1], value);
}
template <typename... Args>
[[nodiscard]] Id ConstU32(Args&&... values) {
constexpr u32 size = static_cast<u32>(sizeof...(values));
static_assert(size >= 2);
const std::array constituents{Constant(U32[1], values)...};
const Id type = size <= 4 ? U32[size] : TypeArray(U32[1], ConstU32(size));
return ConstantComposite(type, constituents);
}
[[nodiscard]] Id ConstS32(s32 value) {
return Constant(S32[1], value);
}
template <typename... Args>
[[nodiscard]] Id ConstS32(Args&&... values) {
constexpr u32 size = static_cast<u32>(sizeof...(values));
static_assert(size >= 2);
const std::array constituents{Constant(S32[1], values)...};
const Id type = size <= 4 ? S32[size] : TypeArray(S32[1], ConstU32(size));
return ConstantComposite(type, constituents);
}
[[nodiscard]] Id ConstF32(f32 value) {
return Constant(F32[1], value);
}
template <typename... Args>
[[nodiscard]] Id ConstF32(Args... values) {
constexpr u32 size = static_cast<u32>(sizeof...(values));
static_assert(size >= 2);
const std::array constituents{Constant(F32[1], values)...};
const Id type = size <= 4 ? F32[size] : TypeArray(F32[1], ConstU32(size));
return ConstantComposite(type, constituents);
}
const Profile& profile;
Stage stage{};
Id void_id{};
Id U8{};
Id S8{};
Id U16{};
Id S16{};
Id U64{};
VectorIds F16{};
VectorIds F32{};
VectorIds F64{};
VectorIds S32{};
VectorIds U32{};
VectorIds U1{};
Id true_value{};
Id false_value{};
Id u32_zero_value{};
Id f32_zero_value{};
Id output_u32{};
Id output_f32{};
boost::container::small_vector<Id, 16> interfaces;
Id output_position{};
Id vertex_index{};
Id base_vertex{};
std::array<Id, 8> frag_color{};
struct InputParamInfo {
Id id;
Id pointer_type;
Id component_type;
};
std::array<InputParamInfo, 32> input_params{};
struct ParamElementInfo {
Id id{};
u32 first_element{};
u32 num_components{};
};
std::array<std::array<ParamElementInfo, 4>, 32> output_params{};
private:
void DefineArithmeticTypes();
void DefineInterfaces(const IR::Program& program);
void DefineInputs(const IR::Program& program);
void DefineOutputs(const IR::Program& program);
};
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,64 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <exception>
#include <string>
#include <utility>
#include <fmt/format.h>
namespace Shader {
class Exception : public std::exception {
public:
explicit Exception(std::string message) noexcept : err_message{std::move(message)} {}
[[nodiscard]] const char* what() const noexcept override {
return err_message.c_str();
}
void Prepend(std::string_view prepend) {
err_message.insert(0, prepend);
}
void Append(std::string_view append) {
err_message += append;
}
private:
std::string err_message;
};
class LogicError : public Exception {
public:
template <typename... Args>
explicit LogicError(const char* message, Args&&... args)
: Exception{fmt::format(fmt::runtime(message), std::forward<Args>(args)...)} {}
};
class RuntimeError : public Exception {
public:
template <typename... Args>
explicit RuntimeError(const char* message, Args&&... args)
: Exception{fmt::format(fmt::runtime(message), std::forward<Args>(args)...)} {}
};
class NotImplementedException : public Exception {
public:
template <typename... Args>
explicit NotImplementedException(const char* message, Args&&... args)
: Exception{fmt::format(fmt::runtime(message), std::forward<Args>(args)...)} {
Append(" is not implemented");
}
};
class InvalidArgument : public Exception {
public:
template <typename... Args>
explicit InvalidArgument(const char* message, Args&&... args)
: Exception{fmt::format(fmt::runtime(message), std::forward<Args>(args)...)} {}
};
} // namespace Shader

View File

@ -0,0 +1,209 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "common/assert.h"
#include "shader_recompiler/frontend/control_flow_graph.h"
namespace Shader::Gcn {
struct Compare {
bool operator()(const Block& lhs, u32 rhs) const noexcept {
return lhs.begin < rhs;
}
bool operator()(u32 lhs, const Block& rhs) const noexcept {
return lhs < rhs.begin;
}
bool operator()(const Block& lhs, const Block& rhs) const noexcept {
return lhs.begin < rhs.begin;
}
};
static IR::Condition MakeCondition(Opcode opcode) {
switch (opcode) {
case Opcode::S_CBRANCH_SCC0:
return IR::Condition::Scc0;
case Opcode::S_CBRANCH_SCC1:
return IR::Condition::Scc1;
case Opcode::S_CBRANCH_VCCZ:
return IR::Condition::Vccz;
case Opcode::S_CBRANCH_VCCNZ:
return IR::Condition::Vccnz;
case Opcode::S_CBRANCH_EXECZ:
return IR::Condition::Execz;
case Opcode::S_CBRANCH_EXECNZ:
return IR::Condition::Execnz;
default:
return IR::Condition::True;
}
}
CFG::CFG(ObjectPool<Block>& block_pool_, std::span<const GcnInst> inst_list_)
: block_pool{block_pool_}, inst_list{inst_list_} {
index_to_pc.resize(inst_list.size());
EmitLabels();
EmitBlocks();
LinkBlocks();
}
void CFG::EmitLabels() {
// Always set a label at entry point.
u32 pc = 0;
labels.push_back(pc);
const auto add_label = [this](u32 address) {
const auto it = std::ranges::find(labels, address);
if (it == labels.end()) {
labels.push_back(address);
}
};
// Iterate instruction list and add labels to branch targets.
for (u32 i = 0; i < inst_list.size(); i++) {
index_to_pc[i] = pc;
const GcnInst inst = inst_list[i];
if (inst.IsUnconditionalBranch()) {
const u32 target = inst.BranchTarget(pc);
add_label(target);
} else if (inst.IsConditionalBranch()) {
const u32 true_label = inst.BranchTarget(pc);
const u32 false_label = pc + inst.length;
add_label(true_label);
add_label(false_label);
} else if (inst.opcode == Opcode::S_ENDPGM) {
const u32 next_label = pc + inst.length;
add_label(next_label);
}
pc += inst.length;
}
// Sort labels to make sure block insertion is correct.
std::ranges::sort(labels);
}
void CFG::EmitBlocks() {
const auto get_index = [this](Label label) -> size_t {
if (label == 0) {
return 0ULL;
}
const auto it_index = std::ranges::lower_bound(index_to_pc, label);
ASSERT(it_index != index_to_pc.end() || label > index_to_pc.back());
return std::distance(index_to_pc.begin(), std::prev(it_index));
};
for (auto it = labels.begin(); it != labels.end(); it++) {
const Label start = *it;
const auto next_it = std::next(it);
const bool is_last = next_it == labels.end();
if (is_last) {
// Last label is special.
return;
}
const Label end = *next_it;
const size_t end_index = get_index(end);
const auto& end_inst = inst_list[end_index];
// Insert block between the labels using the last instruction
// as an indicator for branching type.
Block* block = block_pool.Create();
block->begin = start;
block->end = end;
block->begin_index = get_index(start);
block->end_index = end_index;
block->end_inst = end_inst;
block->cond = MakeCondition(end_inst.opcode);
blocks.insert(*block);
}
}
void CFG::LinkBlocks() {
const auto get_block = [this](u32 address) {
const auto it = blocks.find(address, Compare{});
ASSERT_MSG(it != blocks.end() && it->begin == address);
return &*it;
};
for (auto& block : blocks) {
const auto end_inst{block.end_inst};
// If the block doesn't end with a branch we simply
// need to link with the next block.
if (!end_inst.IsTerminateInstruction()) {
block.branch_true = get_block(block.end);
block.end_class = EndClass::Branch;
continue;
}
// Find the branch targets from the instruction and link the blocks.
// Note: Block end address is one instruction after end_inst.
const u32 branch_pc = block.end - end_inst.length;
const u32 target_pc = end_inst.BranchTarget(branch_pc);
if (end_inst.IsUnconditionalBranch()) {
block.branch_true = get_block(target_pc);
block.end_class = EndClass::Branch;
} else if (end_inst.IsConditionalBranch()) {
block.branch_true = get_block(target_pc);
block.branch_false = get_block(block.end);
block.end_class = EndClass::Branch;
} else {
// Exit blocks don't link to anything.
block.end_class = EndClass::Exit;
}
}
}
std::string CFG::Dot() const {
int node_uid{0};
const auto name_of = [](const Block& block) { return fmt::format("\"{:#x}\"", block.begin); };
std::string dot{"digraph shader {\n"};
dot += fmt::format("\tsubgraph cluster_{} {{\n", 0);
dot += fmt::format("\t\tnode [style=filled];\n");
for (const Block& block : blocks) {
const std::string name{name_of(block)};
const auto add_branch = [&](Block* branch, bool add_label) {
dot += fmt::format("\t\t{}->{}", name, name_of(*branch));
if (add_label && block.cond != IR::Condition::True &&
block.cond != IR::Condition::False) {
dot += fmt::format(" [label=\"{}\"]", block.cond);
}
dot += '\n';
};
dot += fmt::format("\t\t{};\n", name);
switch (block.end_class) {
case EndClass::Branch:
if (block.cond != IR::Condition::False) {
add_branch(block.branch_true, true);
}
if (block.cond != IR::Condition::True) {
add_branch(block.branch_false, false);
}
break;
case EndClass::Exit:
dot += fmt::format("\t\t{}->N{};\n", name, node_uid);
dot +=
fmt::format("\t\tN{} [label=\"Exit\"][shape=square][style=stripped];\n", node_uid);
++node_uid;
break;
// case EndClass::Kill:
// dot += fmt::format("\t\t{}->N{};\n", name, node_uid);
// dot += fmt::format("\t\tN{} [label=\"Kill\"][shape=square][style=stripped];\n",
// node_uid);
// ++node_uid;
// break;
}
}
dot += "\t\tlabel = \"main\";\n\t}\n";
if (blocks.empty()) {
dot += "Start;\n";
} else {
dot += fmt::format("\tStart -> {};\n", name_of(*blocks.begin()));
}
dot += fmt::format("\tStart [shape=diamond];\n");
dot += "}\n";
return dot;
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,66 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include <string>
#include <boost/container/small_vector.hpp>
#include <boost/intrusive/set.hpp>
#include "common/types.h"
#include "shader_recompiler/frontend/instruction.h"
#include "shader_recompiler/ir/condition.h"
#include "shader_recompiler/object_pool.h"
namespace Shader::Gcn {
using Hook =
boost::intrusive::set_base_hook<boost::intrusive::link_mode<boost::intrusive::normal_link>>;
enum class EndClass {
Branch, ///< Block ends with a (un)conditional branch.
Exit, ///< Block ends with an exit instruction.
};
/// A block represents a linear range of instructions.
struct Block : Hook {
[[nodiscard]] bool Contains(u32 pc) const noexcept;
bool operator<(const Block& rhs) const noexcept {
return begin < rhs.begin;
}
u32 begin;
u32 end;
u32 begin_index;
u32 end_index;
IR::Condition cond{};
GcnInst end_inst{};
EndClass end_class{};
Block* branch_true{};
Block* branch_false{};
};
class CFG {
using Label = u32;
public:
explicit CFG(ObjectPool<Block>& block_pool, std::span<const GcnInst> inst_list);
[[nodiscard]] std::string Dot() const;
private:
void EmitLabels();
void EmitBlocks();
void LinkBlocks();
public:
ObjectPool<Block>& block_pool;
std::span<const GcnInst> inst_list;
std::vector<u32> index_to_pc;
boost::container::small_vector<Label, 16> labels;
boost::intrusive::set<Block> blocks;
};
} // namespace Shader::Gcn

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,97 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "shader_recompiler/frontend/instruction.h"
namespace Shader::Gcn {
struct InstFormat {
InstClass inst_class = InstClass::Undefined;
InstCategory inst_category = InstCategory::Undefined;
u32 src_count = 0;
u32 dst_count = 0;
ScalarType src_type = ScalarType::Undefined;
ScalarType dst_type = ScalarType::Undefined;
};
InstEncoding GetInstructionEncoding(u32 token);
u32 GetEncodingLength(InstEncoding encoding);
InstFormat InstructionFormat(InstEncoding encoding, u32 opcode);
Opcode DecodeOpcode(u32 token);
class GcnCodeSlice {
public:
GcnCodeSlice(const u32* ptr, const u32* end) : m_ptr(ptr), m_end(end) {}
GcnCodeSlice(const GcnCodeSlice& other) = default;
~GcnCodeSlice() = default;
u32 at(u32 id) const {
return m_ptr[id];
}
u32 readu32() {
return *(m_ptr++);
}
u64 readu64() {
const u64 value = *(u64*)m_ptr;
m_ptr += 2;
return value;
}
bool atEnd() const {
return m_ptr == m_end;
}
private:
const u32* m_ptr{};
const u32* m_end{};
};
class GcnDecodeContext {
public:
GcnInst decodeInstruction(GcnCodeSlice& code);
private:
uint32_t getEncodingLength(InstEncoding encoding);
uint32_t getOpMapOffset(InstEncoding encoding);
uint32_t mapEncodingOp(InstEncoding encoding, Opcode opcode);
void updateInstructionMeta(InstEncoding encoding);
uint32_t getMimgModifier(Opcode opcode);
void repairOperandType();
OperandField getOperandField(uint32_t code);
void decodeInstruction32(InstEncoding encoding, GcnCodeSlice& code);
void decodeInstruction64(InstEncoding encoding, GcnCodeSlice& code);
void decodeLiteralConstant(InstEncoding encoding, GcnCodeSlice& code);
// 32 bits encodings
void decodeInstructionSOP1(uint32_t hexInstruction);
void decodeInstructionSOPP(uint32_t hexInstruction);
void decodeInstructionSOPC(uint32_t hexInstruction);
void decodeInstructionSOPK(uint32_t hexInstruction);
void decodeInstructionSOP2(uint32_t hexInstruction);
void decodeInstructionVOP1(uint32_t hexInstruction);
void decodeInstructionVOPC(uint32_t hexInstruction);
void decodeInstructionVOP2(uint32_t hexInstruction);
void decodeInstructionSMRD(uint32_t hexInstruction);
void decodeInstructionVINTRP(uint32_t hexInstruction);
// 64 bits encodings
void decodeInstructionVOP3(uint64_t hexInstruction);
void decodeInstructionMUBUF(uint64_t hexInstruction);
void decodeInstructionMTBUF(uint64_t hexInstruction);
void decodeInstructionMIMG(uint64_t hexInstruction);
void decodeInstructionDS(uint64_t hexInstruction);
void decodeInstructionEXP(uint64_t hexInstruction);
private:
GcnInst m_instruction;
};
} // namespace Shader::Gcn

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,50 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "shader_recompiler/frontend/instruction.h"
namespace Shader::Gcn {
u32 GcnInst::BranchTarget(u32 pc) const {
const s16 simm = static_cast<s16>(control.sopp.simm * 4);
const u32 target = pc + simm + 4;
return target;
}
bool GcnInst::IsTerminateInstruction() const {
return IsUnconditionalBranch() || IsConditionalBranch() || IsFork() ||
opcode == Opcode::S_ENDPGM;
}
bool GcnInst::IsUnconditionalBranch() const {
return opcode == Opcode::S_BRANCH;
}
bool GcnInst::IsFork() const {
return opcode == Opcode::S_CBRANCH_I_FORK || opcode == Opcode::S_CBRANCH_G_FORK ||
opcode == Opcode::S_CBRANCH_JOIN;
}
bool GcnInst::IsConditionalBranch() const {
switch (opcode) {
case Opcode::S_CBRANCH_SCC0:
case Opcode::S_CBRANCH_SCC1:
case Opcode::S_CBRANCH_VCCZ:
case Opcode::S_CBRANCH_VCCNZ:
case Opcode::S_CBRANCH_EXECZ:
case Opcode::S_CBRANCH_EXECNZ:
return true;
case Opcode::S_CBRANCH_CDBGSYS:
case Opcode::S_CBRANCH_CDBGUSER:
case Opcode::S_CBRANCH_CDBGSYS_OR_USER:
case Opcode::S_CBRANCH_CDBGSYS_AND_USER:
UNIMPLEMENTED();
return true;
default:
break;
}
return false;
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,208 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <limits>
#include "common/bit_field.h"
#include "shader_recompiler/frontend/opcodes.h"
namespace Shader::Gcn {
constexpr u32 GcnMaxSrcCount = 4;
constexpr u32 GcnMaxDstCount = 2;
enum OperandFieldRange {
ScalarGPRMin = 0,
ScalarGPRMax = 103,
SignedConstIntPosMin = 129,
SignedConstIntPosMax = 192,
SignedConstIntNegMin = 193,
SignedConstIntNegMax = 208,
ConstFloatMin = 240,
VectorGPRMin = 256,
VectorGPRMax = 511
};
/// These are applied after loading an operand register.
struct InputModifiers {
bool neg = false;
bool abs = false;
};
/// These are applied before storing an operand register.
struct OutputModifiers {
bool clamp = false;
float multiplier = std::numeric_limits<float>::quiet_NaN();
};
struct InstOperand {
OperandField field = OperandField::Undefined;
ScalarType type = ScalarType::Undefined;
InputModifiers input_modifier = {};
OutputModifiers output_modifier = {};
u32 code = 0xFFFFFFFF;
};
struct Operand {
OperandField field = OperandField::Undefined;
ScalarType type = ScalarType::Undefined;
union {
InputModifiers input_modifier = {};
OutputModifiers output_modifier;
};
u32 code = 0xFFFFFFFF;
};
struct InstSOPK {
u16 simm;
};
struct InstSOPP {
u16 simm;
};
struct InstVOP3 {
Operand vdst;
Operand src0;
Operand src1;
Operand src2;
};
struct SMRD {
u8 offset;
bool imm;
u8 sbase;
};
struct InstControlSOPK {
BitField<0, 16, u32> simm;
};
struct InstControlSOPP {
BitField<0, 16, u32> simm;
};
struct InstControlVOP3 {
u64 : 8;
u64 abs : 3;
u64 clmp : 1;
u64 : 47;
u64 omod : 2;
u64 neg : 3;
};
struct InstControlSMRD {
u32 offset : 8;
u32 imm : 1;
u32 count : 5;
u32 : 18;
};
struct InstControlMUBUF {
u64 offset : 12;
u64 offen : 1;
u64 idxen : 1;
u64 glc : 1;
u64 : 1;
u64 lds : 1;
u64 : 37;
u64 slc : 1;
u64 tfe : 1;
u64 count : 3;
u64 size : 5;
};
struct InstControlMTBUF {
u64 offset : 12;
u64 offen : 1;
u64 idxen : 1;
u64 glc : 1;
u64 : 4;
u64 dfmt : 4;
u64 nfmt : 3;
u64 : 28;
u64 slc : 1;
u64 tfe : 1;
u64 count : 3;
u64 size : 5;
};
struct InstControlMIMG {
u64 : 8;
u64 dmask : 4;
u64 unrm : 1;
u64 glc : 1;
u64 da : 1;
u64 r128 : 1;
u64 tfe : 1;
u64 lwe : 1;
u64 : 7;
u64 slc : 1;
u64 mod : 32;
u64 : 6;
};
struct InstControlDS {
u64 offset0 : 8;
u64 offset1 : 8;
u64 : 1;
u64 gds : 1;
u64 dual : 1;
u64 sign : 1;
u64 relative : 1;
u64 stride : 1;
u64 size : 4;
u64 : 38;
};
struct InstControlVINTRP {
u32 : 8;
u32 chan : 2;
u32 attr : 6;
u32 : 16;
};
struct InstControlEXP {
u64 en : 4;
u64 target : 6;
u64 compr : 1;
u64 done : 1;
u64 vm : 1;
u64 reserved : 51;
};
union InstControl {
InstControlSOPK sopk;
InstControlSOPP sopp;
InstControlVOP3 vop3;
InstControlSMRD smrd;
InstControlMUBUF mubuf;
InstControlMTBUF mtbuf;
InstControlMIMG mimg;
InstControlDS ds;
InstControlVINTRP vintrp;
InstControlEXP exp;
};
struct GcnInst {
Opcode opcode;
InstEncoding encoding;
InstClass inst_class;
InstCategory category;
InstControl control;
u32 length;
u32 src_count;
u32 dst_count;
std::array<InstOperand, GcnMaxSrcCount> src;
std::array<InstOperand, GcnMaxDstCount> dst;
u32 BranchTarget(u32 pc) const;
bool IsTerminateInstruction() const;
bool IsUnconditionalBranch() const;
bool IsConditionalBranch() const;
bool IsFork() const;
};
} // namespace Shader::Gcn

View File

@ -0,0 +1,10 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
namespace Shader::Gcn {
void Translate();
} // namespace Shader::Gcn

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,829 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <boost/intrusive/list.hpp>
#include <fmt/format.h>
#include "shader_recompiler/frontend/structured_control_flow.h"
#include "shader_recompiler/frontend/translate/translate.h"
#include "shader_recompiler/ir/ir_emitter.h"
namespace Shader::Gcn {
namespace {
struct Statement;
// Use normal_link because we are not guaranteed to destroy the tree in order
using ListBaseHook =
boost::intrusive::list_base_hook<boost::intrusive::link_mode<boost::intrusive::normal_link>>;
using Tree = boost::intrusive::list<Statement,
// Allow using Statement without a definition
boost::intrusive::base_hook<ListBaseHook>,
// Avoid linear complexity on splice, size is never called
boost::intrusive::constant_time_size<false>>;
using Node = Tree::iterator;
enum class StatementType {
Code,
Goto,
Label,
If,
Loop,
Break,
Return,
Kill,
Unreachable,
Function,
Identity,
Not,
Or,
SetVariable,
Variable,
};
bool HasChildren(StatementType type) {
switch (type) {
case StatementType::If:
case StatementType::Loop:
case StatementType::Function:
return true;
default:
return false;
}
}
struct Goto {};
struct Label {};
struct If {};
struct Loop {};
struct Break {};
struct Return {};
struct Kill {};
struct Unreachable {};
struct FunctionTag {};
struct Identity {};
struct Not {};
struct Or {};
struct SetVariable {};
struct Variable {};
struct Statement : ListBaseHook {
Statement(const Block* block_, Statement* up_)
: block{block_}, up{up_}, type{StatementType::Code} {}
Statement(Goto, Statement* cond_, Node label_, Statement* up_)
: label{label_}, cond{cond_}, up{up_}, type{StatementType::Goto} {}
Statement(Label, u32 id_, Statement* up_) : id{id_}, up{up_}, type{StatementType::Label} {}
Statement(If, Statement* cond_, Tree&& children_, Statement* up_)
: children{std::move(children_)}, cond{cond_}, up{up_}, type{StatementType::If} {}
Statement(Loop, Statement* cond_, Tree&& children_, Statement* up_)
: children{std::move(children_)}, cond{cond_}, up{up_}, type{StatementType::Loop} {}
Statement(Break, Statement* cond_, Statement* up_)
: cond{cond_}, up{up_}, type{StatementType::Break} {}
Statement(Return, Statement* up_) : up{up_}, type{StatementType::Return} {}
Statement(Kill, Statement* up_) : up{up_}, type{StatementType::Kill} {}
Statement(Unreachable, Statement* up_) : up{up_}, type{StatementType::Unreachable} {}
Statement(FunctionTag) : children{}, type{StatementType::Function} {}
Statement(Identity, IR::Condition cond_, Statement* up_)
: guest_cond{cond_}, up{up_}, type{StatementType::Identity} {}
Statement(Not, Statement* op_, Statement* up_) : op{op_}, up{up_}, type{StatementType::Not} {}
Statement(Or, Statement* op_a_, Statement* op_b_, Statement* up_)
: op_a{op_a_}, op_b{op_b_}, up{up_}, type{StatementType::Or} {}
Statement(SetVariable, u32 id_, Statement* op_, Statement* up_)
: op{op_}, id{id_}, up{up_}, type{StatementType::SetVariable} {}
Statement(Variable, u32 id_, Statement* up_)
: id{id_}, up{up_}, type{StatementType::Variable} {}
~Statement() {
if (HasChildren(type)) {
std::destroy_at(&children);
}
}
union {
const Block* block;
Node label;
Tree children;
IR::Condition guest_cond;
Statement* op;
Statement* op_a;
u32 location;
s32 branch_offset;
};
union {
Statement* cond;
Statement* op_b;
u32 id;
};
Statement* up{};
StatementType type;
};
std::string DumpExpr(const Statement* stmt) {
switch (stmt->type) {
case StatementType::Identity:
return fmt::format("{}", stmt->guest_cond);
case StatementType::Not:
return fmt::format("!{}", DumpExpr(stmt->op));
case StatementType::Or:
return fmt::format("{} || {}", DumpExpr(stmt->op_a), DumpExpr(stmt->op_b));
case StatementType::Variable:
return fmt::format("goto_L{}", stmt->id);
default:
return "<invalid type>";
}
}
[[maybe_unused]] std::string DumpTree(const Tree& tree, u32 indentation = 0) {
std::string ret;
std::string indent(indentation, ' ');
for (auto stmt = tree.begin(); stmt != tree.end(); ++stmt) {
switch (stmt->type) {
case StatementType::Code:
ret += fmt::format("{} Block {:04x} -> {:04x} (0x{:016x});\n", indent,
stmt->block->begin, stmt->block->end,
reinterpret_cast<uintptr_t>(stmt->block));
break;
case StatementType::Goto:
ret += fmt::format("{} if ({}) goto L{};\n", indent, DumpExpr(stmt->cond),
stmt->label->id);
break;
case StatementType::Label:
ret += fmt::format("{}L{}:\n", indent, stmt->id);
break;
case StatementType::If:
ret += fmt::format("{} if ({}) {{\n", indent, DumpExpr(stmt->cond));
ret += DumpTree(stmt->children, indentation + 4);
ret += fmt::format("{} }}\n", indent);
break;
case StatementType::Loop:
ret += fmt::format("{} do {{\n", indent);
ret += DumpTree(stmt->children, indentation + 4);
ret += fmt::format("{} }} while ({});\n", indent, DumpExpr(stmt->cond));
break;
case StatementType::Break:
ret += fmt::format("{} if ({}) break;\n", indent, DumpExpr(stmt->cond));
break;
case StatementType::Return:
ret += fmt::format("{} return;\n", indent);
break;
case StatementType::Kill:
ret += fmt::format("{} kill;\n", indent);
break;
case StatementType::Unreachable:
ret += fmt::format("{} unreachable;\n", indent);
break;
case StatementType::SetVariable:
ret += fmt::format("{} goto_L{} = {};\n", indent, stmt->id, DumpExpr(stmt->op));
break;
case StatementType::Function:
case StatementType::Identity:
case StatementType::Not:
case StatementType::Or:
case StatementType::Variable:
throw LogicError("Statement can't be printed");
}
}
return ret;
}
void SanitizeNoBreaks(const Tree& tree) {
if (std::ranges::find(tree, StatementType::Break, &Statement::type) != tree.end()) {
throw NotImplementedException("Capturing statement with break nodes");
}
}
size_t Level(Node stmt) {
size_t level{0};
Statement* node{stmt->up};
while (node) {
++level;
node = node->up;
}
return level;
}
bool IsDirectlyRelated(Node goto_stmt, Node label_stmt) {
const size_t goto_level{Level(goto_stmt)};
const size_t label_level{Level(label_stmt)};
size_t min_level;
size_t max_level;
Node min;
Node max;
if (label_level < goto_level) {
min_level = label_level;
max_level = goto_level;
min = label_stmt;
max = goto_stmt;
} else { // goto_level < label_level
min_level = goto_level;
max_level = label_level;
min = goto_stmt;
max = label_stmt;
}
while (max_level > min_level) {
--max_level;
max = max->up;
}
return min->up == max->up;
}
bool IsIndirectlyRelated(Node goto_stmt, Node label_stmt) {
return goto_stmt->up != label_stmt->up && !IsDirectlyRelated(goto_stmt, label_stmt);
}
[[maybe_unused]] bool AreSiblings(Node goto_stmt, Node label_stmt) noexcept {
Node it{goto_stmt};
do {
if (it == label_stmt) {
return true;
}
--it;
} while (it != goto_stmt->up->children.begin());
while (it != goto_stmt->up->children.end()) {
if (it == label_stmt) {
return true;
}
++it;
}
return false;
}
Node SiblingFromNephew(Node uncle, Node nephew) noexcept {
Statement* const parent{uncle->up};
Statement* it{&*nephew};
while (it->up != parent) {
it = it->up;
}
return Tree::s_iterator_to(*it);
}
bool AreOrdered(Node left_sibling, Node right_sibling) noexcept {
const Node end{right_sibling->up->children.end()};
for (auto it = right_sibling; it != end; ++it) {
if (it == left_sibling) {
return false;
}
}
return true;
}
bool NeedsLift(Node goto_stmt, Node label_stmt) noexcept {
const Node sibling{SiblingFromNephew(goto_stmt, label_stmt)};
return AreOrdered(sibling, goto_stmt);
}
/**
* The algorithm used here is from:
* Taming Control Flow: A Structured Approach to Eliminating Goto Statements.
* Ana M. Erosa and Laurie J. Hendren
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.42.1485&rep=rep1&type=pdf
*/
class GotoPass {
public:
explicit GotoPass(CFG& cfg, ObjectPool<Statement>& stmt_pool) : pool{stmt_pool} {
std::vector gotos{BuildTree(cfg)};
const auto end{gotos.rend()};
for (auto goto_stmt = gotos.rbegin(); goto_stmt != end; ++goto_stmt) {
RemoveGoto(*goto_stmt);
}
}
Statement& RootStatement() noexcept {
return root_stmt;
}
private:
void RemoveGoto(Node goto_stmt) {
// Force goto_stmt and label_stmt to be directly related
const Node label_stmt{goto_stmt->label};
if (IsIndirectlyRelated(goto_stmt, label_stmt)) {
// Move goto_stmt out using outward-movement transformation until it becomes
// directly related to label_stmt
while (!IsDirectlyRelated(goto_stmt, label_stmt)) {
goto_stmt = MoveOutward(goto_stmt);
}
}
// Force goto_stmt and label_stmt to be siblings
if (IsDirectlyRelated(goto_stmt, label_stmt)) {
const size_t label_level{Level(label_stmt)};
size_t goto_level{Level(goto_stmt)};
if (goto_level > label_level) {
// Move goto_stmt out of its level using outward-movement transformations
while (goto_level > label_level) {
goto_stmt = MoveOutward(goto_stmt);
--goto_level;
}
} else { // Level(goto_stmt) < Level(label_stmt)
if (NeedsLift(goto_stmt, label_stmt)) {
// Lift goto_stmt to above stmt containing label_stmt using goto-lifting
// transformations
goto_stmt = Lift(goto_stmt);
}
// Move goto_stmt into label_stmt's level using inward-movement transformation
while (goto_level < label_level) {
goto_stmt = MoveInward(goto_stmt);
++goto_level;
}
}
}
// Expensive operation:
if (!AreSiblings(goto_stmt, label_stmt)) {
throw LogicError("Goto is not a sibling with the label");
}
// goto_stmt and label_stmt are guaranteed to be siblings, eliminate
if (std::next(goto_stmt) == label_stmt) {
// Simply eliminate the goto if the label is next to it
goto_stmt->up->children.erase(goto_stmt);
} else if (AreOrdered(goto_stmt, label_stmt)) {
// Eliminate goto_stmt with a conditional
EliminateAsConditional(goto_stmt, label_stmt);
} else {
// Eliminate goto_stmt with a loop
EliminateAsLoop(goto_stmt, label_stmt);
}
}
std::vector<Node> BuildTree(CFG& cfg) {
u32 label_id{0};
std::vector<Node> gotos;
BuildTree(cfg, label_id, gotos, root_stmt.children.end(), std::nullopt);
return gotos;
}
void BuildTree(CFG& cfg, u32& label_id, std::vector<Node>& gotos, Node function_insert_point,
std::optional<Node> return_label) {
Statement* const false_stmt{pool.Create(Identity{}, IR::Condition::False, &root_stmt)};
Tree& root{root_stmt.children};
std::unordered_map<Block*, Node> local_labels;
local_labels.reserve(cfg.blocks.size());
for (Block& block : cfg.blocks) {
Statement* const label{pool.Create(Label{}, label_id, &root_stmt)};
const Node label_it{root.insert(function_insert_point, *label)};
local_labels.emplace(&block, label_it);
++label_id;
}
for (Block& block : cfg.blocks) {
const Node label{local_labels.at(&block)};
// Insertion point
const Node ip{std::next(label)};
// Reset goto variables before the first block and after its respective label
const auto make_reset_variable{[&]() -> Statement& {
return *pool.Create(SetVariable{}, label->id, false_stmt, &root_stmt);
}};
root.push_front(make_reset_variable());
root.insert(ip, make_reset_variable());
root.insert(ip, *pool.Create(&block, &root_stmt));
switch (block.end_class) {
case EndClass::Branch: {
Statement* const always_cond{
pool.Create(Identity{}, IR::Condition::True, &root_stmt)};
if (block.cond == IR::Condition::True) {
const Node true_label{local_labels.at(block.branch_true)};
gotos.push_back(
root.insert(ip, *pool.Create(Goto{}, always_cond, true_label, &root_stmt)));
} else if (block.cond == IR::Condition::False) {
const Node false_label{local_labels.at(block.branch_false)};
gotos.push_back(root.insert(
ip, *pool.Create(Goto{}, always_cond, false_label, &root_stmt)));
} else {
const Node true_label{local_labels.at(block.branch_true)};
const Node false_label{local_labels.at(block.branch_false)};
Statement* const true_cond{pool.Create(Identity{}, block.cond, &root_stmt)};
gotos.push_back(
root.insert(ip, *pool.Create(Goto{}, true_cond, true_label, &root_stmt)));
gotos.push_back(root.insert(
ip, *pool.Create(Goto{}, always_cond, false_label, &root_stmt)));
}
break;
}
case EndClass::Exit:
root.insert(ip, *pool.Create(Return{}, &root_stmt));
break;
// case EndClass::Kill:
// root.insert(ip, *pool.Create(Kill{}, &root_stmt));
// break;
}
}
}
void UpdateTreeUp(Statement* tree) {
for (Statement& stmt : tree->children) {
stmt.up = tree;
}
}
void EliminateAsConditional(Node goto_stmt, Node label_stmt) {
Tree& body{goto_stmt->up->children};
Tree if_body;
if_body.splice(if_body.begin(), body, std::next(goto_stmt), label_stmt);
Statement* const cond{pool.Create(Not{}, goto_stmt->cond, &root_stmt)};
Statement* const if_stmt{pool.Create(If{}, cond, std::move(if_body), goto_stmt->up)};
UpdateTreeUp(if_stmt);
body.insert(goto_stmt, *if_stmt);
body.erase(goto_stmt);
}
void EliminateAsLoop(Node goto_stmt, Node label_stmt) {
Tree& body{goto_stmt->up->children};
Tree loop_body;
loop_body.splice(loop_body.begin(), body, label_stmt, goto_stmt);
Statement* const cond{goto_stmt->cond};
Statement* const loop{pool.Create(Loop{}, cond, std::move(loop_body), goto_stmt->up)};
UpdateTreeUp(loop);
body.insert(goto_stmt, *loop);
body.erase(goto_stmt);
}
[[nodiscard]] Node MoveOutward(Node goto_stmt) {
switch (goto_stmt->up->type) {
case StatementType::If:
return MoveOutwardIf(goto_stmt);
case StatementType::Loop:
return MoveOutwardLoop(goto_stmt);
default:
throw LogicError("Invalid outward movement");
}
}
[[nodiscard]] Node MoveInward(Node goto_stmt) {
Statement* const parent{goto_stmt->up};
Tree& body{parent->children};
const Node label{goto_stmt->label};
const Node label_nested_stmt{SiblingFromNephew(goto_stmt, label)};
const u32 label_id{label->id};
Statement* const goto_cond{goto_stmt->cond};
Statement* const set_var{pool.Create(SetVariable{}, label_id, goto_cond, parent)};
body.insert(goto_stmt, *set_var);
Tree if_body;
if_body.splice(if_body.begin(), body, std::next(goto_stmt), label_nested_stmt);
Statement* const variable{pool.Create(Variable{}, label_id, &root_stmt)};
Statement* const neg_var{pool.Create(Not{}, variable, &root_stmt)};
if (!if_body.empty()) {
Statement* const if_stmt{pool.Create(If{}, neg_var, std::move(if_body), parent)};
UpdateTreeUp(if_stmt);
body.insert(goto_stmt, *if_stmt);
}
body.erase(goto_stmt);
switch (label_nested_stmt->type) {
case StatementType::If:
// Update nested if condition
label_nested_stmt->cond =
pool.Create(Or{}, variable, label_nested_stmt->cond, &root_stmt);
break;
case StatementType::Loop:
break;
default:
throw LogicError("Invalid inward movement");
}
Tree& nested_tree{label_nested_stmt->children};
Statement* const new_goto{pool.Create(Goto{}, variable, label, &*label_nested_stmt)};
return nested_tree.insert(nested_tree.begin(), *new_goto);
}
[[nodiscard]] Node Lift(Node goto_stmt) {
Statement* const parent{goto_stmt->up};
Tree& body{parent->children};
const Node label{goto_stmt->label};
const u32 label_id{label->id};
const Node label_nested_stmt{SiblingFromNephew(goto_stmt, label)};
Tree loop_body;
loop_body.splice(loop_body.begin(), body, label_nested_stmt, goto_stmt);
SanitizeNoBreaks(loop_body);
Statement* const variable{pool.Create(Variable{}, label_id, &root_stmt)};
Statement* const loop_stmt{pool.Create(Loop{}, variable, std::move(loop_body), parent)};
UpdateTreeUp(loop_stmt);
body.insert(goto_stmt, *loop_stmt);
Statement* const new_goto{pool.Create(Goto{}, variable, label, loop_stmt)};
loop_stmt->children.push_front(*new_goto);
const Node new_goto_node{loop_stmt->children.begin()};
Statement* const set_var{pool.Create(SetVariable{}, label_id, goto_stmt->cond, loop_stmt)};
loop_stmt->children.push_back(*set_var);
body.erase(goto_stmt);
return new_goto_node;
}
Node MoveOutwardIf(Node goto_stmt) {
const Node parent{Tree::s_iterator_to(*goto_stmt->up)};
Tree& body{parent->children};
const u32 label_id{goto_stmt->label->id};
Statement* const goto_cond{goto_stmt->cond};
Statement* const set_goto_var{pool.Create(SetVariable{}, label_id, goto_cond, &*parent)};
body.insert(goto_stmt, *set_goto_var);
Tree if_body;
if_body.splice(if_body.begin(), body, std::next(goto_stmt), body.end());
if_body.pop_front();
Statement* const cond{pool.Create(Variable{}, label_id, &root_stmt)};
Statement* const neg_cond{pool.Create(Not{}, cond, &root_stmt)};
Statement* const if_stmt{pool.Create(If{}, neg_cond, std::move(if_body), &*parent)};
UpdateTreeUp(if_stmt);
body.insert(goto_stmt, *if_stmt);
body.erase(goto_stmt);
Statement* const new_cond{pool.Create(Variable{}, label_id, &root_stmt)};
Statement* const new_goto{pool.Create(Goto{}, new_cond, goto_stmt->label, parent->up)};
Tree& parent_tree{parent->up->children};
return parent_tree.insert(std::next(parent), *new_goto);
}
Node MoveOutwardLoop(Node goto_stmt) {
Statement* const parent{goto_stmt->up};
Tree& body{parent->children};
const u32 label_id{goto_stmt->label->id};
Statement* const goto_cond{goto_stmt->cond};
Statement* const set_goto_var{pool.Create(SetVariable{}, label_id, goto_cond, parent)};
Statement* const cond{pool.Create(Variable{}, label_id, &root_stmt)};
Statement* const break_stmt{pool.Create(Break{}, cond, parent)};
body.insert(goto_stmt, *set_goto_var);
body.insert(goto_stmt, *break_stmt);
body.erase(goto_stmt);
const Node loop{Tree::s_iterator_to(*goto_stmt->up)};
Statement* const new_goto_cond{pool.Create(Variable{}, label_id, &root_stmt)};
Statement* const new_goto{pool.Create(Goto{}, new_goto_cond, goto_stmt->label, loop->up)};
Tree& parent_tree{loop->up->children};
return parent_tree.insert(std::next(loop), *new_goto);
}
ObjectPool<Statement>& pool;
Statement root_stmt{FunctionTag{}};
};
[[nodiscard]] Statement* TryFindForwardBlock(Statement& stmt) {
Tree& tree{stmt.up->children};
const Node end{tree.end()};
Node forward_node{std::next(Tree::s_iterator_to(stmt))};
while (forward_node != end && !HasChildren(forward_node->type)) {
if (forward_node->type == StatementType::Code) {
return &*forward_node;
}
++forward_node;
}
return nullptr;
}
[[nodiscard]] IR::U1 VisitExpr(IR::IREmitter& ir, const Statement& stmt) {
switch (stmt.type) {
case StatementType::Identity:
return ir.Condition(stmt.guest_cond);
case StatementType::Not:
return ir.LogicalNot(IR::U1{VisitExpr(ir, *stmt.op)});
case StatementType::Or:
return ir.LogicalOr(VisitExpr(ir, *stmt.op_a), VisitExpr(ir, *stmt.op_b));
case StatementType::Variable:
return ir.GetGotoVariable(stmt.id);
default:
throw NotImplementedException("Statement type {}", u32(stmt.type));
}
}
class TranslatePass {
public:
TranslatePass(ObjectPool<IR::Inst>& inst_pool_, ObjectPool<IR::Block>& block_pool_,
ObjectPool<Statement>& stmt_pool_, Statement& root_stmt,
IR::AbstractSyntaxList& syntax_list_, std::span<const GcnInst> inst_list_,
Stage stage_)
: stmt_pool{stmt_pool_}, inst_pool{inst_pool_}, block_pool{block_pool_},
syntax_list{syntax_list_}, inst_list{inst_list_}, stage{stage_} {
Visit(root_stmt, nullptr, nullptr);
IR::Block& first_block{*syntax_list.front().data.block};
IR::IREmitter ir(first_block, first_block.begin());
ir.Prologue();
}
private:
void Visit(Statement& parent, IR::Block* break_block, IR::Block* fallthrough_block) {
IR::Block* current_block{};
const auto ensure_block{[&] {
if (current_block) {
return;
}
current_block = block_pool.Create(inst_pool);
auto& node{syntax_list.emplace_back()};
node.type = IR::AbstractSyntaxNode::Type::Block;
node.data.block = current_block;
}};
Tree& tree{parent.children};
for (auto it = tree.begin(); it != tree.end(); ++it) {
Statement& stmt{*it};
switch (stmt.type) {
case StatementType::Label:
// Labels can be ignored
break;
case StatementType::Code: {
ensure_block();
const u32 start = stmt.block->begin_index;
const u32 size = stmt.block->end_index - start + 1;
Translate(current_block, stage, inst_list.subspan(start, size));
fmt::print("{}\n", IR::DumpBlock(*current_block));
break;
}
case StatementType::SetVariable: {
ensure_block();
IR::IREmitter ir{*current_block};
ir.SetGotoVariable(stmt.id, VisitExpr(ir, *stmt.op));
break;
}
case StatementType::If: {
ensure_block();
IR::Block* const merge_block{MergeBlock(parent, stmt)};
// Implement if header block
IR::IREmitter ir{*current_block};
const IR::U1 cond{ir.ConditionRef(VisitExpr(ir, *stmt.cond))};
const size_t if_node_index{syntax_list.size()};
syntax_list.emplace_back();
// Visit children
const size_t then_block_index{syntax_list.size()};
Visit(stmt, break_block, merge_block);
IR::Block* const then_block{syntax_list.at(then_block_index).data.block};
current_block->AddBranch(then_block);
current_block->AddBranch(merge_block);
current_block = merge_block;
auto& if_node{syntax_list[if_node_index]};
if_node.type = IR::AbstractSyntaxNode::Type::If;
if_node.data.if_node.cond = cond;
if_node.data.if_node.body = then_block;
if_node.data.if_node.merge = merge_block;
auto& endif_node{syntax_list.emplace_back()};
endif_node.type = IR::AbstractSyntaxNode::Type::EndIf;
endif_node.data.end_if.merge = merge_block;
auto& merge{syntax_list.emplace_back()};
merge.type = IR::AbstractSyntaxNode::Type::Block;
merge.data.block = merge_block;
break;
}
case StatementType::Loop: {
IR::Block* const loop_header_block{block_pool.Create(inst_pool)};
if (current_block) {
current_block->AddBranch(loop_header_block);
}
auto& header_node{syntax_list.emplace_back()};
header_node.type = IR::AbstractSyntaxNode::Type::Block;
header_node.data.block = loop_header_block;
IR::Block* const continue_block{block_pool.Create(inst_pool)};
IR::Block* const merge_block{MergeBlock(parent, stmt)};
const size_t loop_node_index{syntax_list.size()};
syntax_list.emplace_back();
// Visit children
const size_t body_block_index{syntax_list.size()};
Visit(stmt, merge_block, continue_block);
// The continue block is located at the end of the loop
IR::IREmitter ir{*continue_block};
const IR::U1 cond{ir.ConditionRef(VisitExpr(ir, *stmt.cond))};
IR::Block* const body_block{syntax_list.at(body_block_index).data.block};
loop_header_block->AddBranch(body_block);
continue_block->AddBranch(loop_header_block);
continue_block->AddBranch(merge_block);
current_block = merge_block;
auto& loop{syntax_list[loop_node_index]};
loop.type = IR::AbstractSyntaxNode::Type::Loop;
loop.data.loop.body = body_block;
loop.data.loop.continue_block = continue_block;
loop.data.loop.merge = merge_block;
auto& continue_block_node{syntax_list.emplace_back()};
continue_block_node.type = IR::AbstractSyntaxNode::Type::Block;
continue_block_node.data.block = continue_block;
auto& repeat{syntax_list.emplace_back()};
repeat.type = IR::AbstractSyntaxNode::Type::Repeat;
repeat.data.repeat.cond = cond;
repeat.data.repeat.loop_header = loop_header_block;
repeat.data.repeat.merge = merge_block;
auto& merge{syntax_list.emplace_back()};
merge.type = IR::AbstractSyntaxNode::Type::Block;
merge.data.block = merge_block;
break;
}
case StatementType::Break: {
ensure_block();
IR::Block* const skip_block{MergeBlock(parent, stmt)};
IR::IREmitter ir{*current_block};
const IR::U1 cond{ir.ConditionRef(VisitExpr(ir, *stmt.cond))};
current_block->AddBranch(break_block);
current_block->AddBranch(skip_block);
current_block = skip_block;
auto& break_node{syntax_list.emplace_back()};
break_node.type = IR::AbstractSyntaxNode::Type::Break;
break_node.data.break_node.cond = cond;
break_node.data.break_node.merge = break_block;
break_node.data.break_node.skip = skip_block;
auto& merge{syntax_list.emplace_back()};
merge.type = IR::AbstractSyntaxNode::Type::Block;
merge.data.block = skip_block;
break;
}
case StatementType::Return: {
ensure_block();
IR::Block* return_block{block_pool.Create(inst_pool)};
IR::IREmitter{*return_block}.Epilogue();
current_block->AddBranch(return_block);
auto& merge{syntax_list.emplace_back()};
merge.type = IR::AbstractSyntaxNode::Type::Block;
merge.data.block = return_block;
current_block = nullptr;
syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Return;
break;
}
case StatementType::Kill: {
ensure_block();
IR::Block* demote_block{MergeBlock(parent, stmt)};
// IR::IREmitter{*current_block}.DemoteToHelperInvocation();
current_block->AddBranch(demote_block);
current_block = demote_block;
auto& merge{syntax_list.emplace_back()};
merge.type = IR::AbstractSyntaxNode::Type::Block;
merge.data.block = demote_block;
break;
}
case StatementType::Unreachable: {
ensure_block();
current_block = nullptr;
syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Unreachable;
break;
}
default:
throw NotImplementedException("Statement type {}", u32(stmt.type));
}
}
if (current_block) {
if (fallthrough_block) {
current_block->AddBranch(fallthrough_block);
} else {
syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Unreachable;
}
}
}
IR::Block* MergeBlock(Statement& parent, Statement& stmt) {
Statement* merge_stmt{TryFindForwardBlock(stmt)};
if (!merge_stmt) {
// Create a merge block we can visit later
merge_stmt = stmt_pool.Create(&dummy_flow_block, &parent);
parent.children.insert(std::next(Tree::s_iterator_to(stmt)), *merge_stmt);
}
return block_pool.Create(inst_pool);
}
ObjectPool<Statement>& stmt_pool;
ObjectPool<IR::Inst>& inst_pool;
ObjectPool<IR::Block>& block_pool;
IR::AbstractSyntaxList& syntax_list;
const Block dummy_flow_block{};
std::span<const GcnInst> inst_list;
Stage stage;
};
} // Anonymous namespace
IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool,
CFG& cfg, Stage stage) {
ObjectPool<Statement> stmt_pool{64};
GotoPass goto_pass{cfg, stmt_pool};
Statement& root{goto_pass.RootStatement()};
IR::AbstractSyntaxList syntax_list;
TranslatePass{inst_pool, block_pool, stmt_pool, root, syntax_list, cfg.inst_list, stage};
return syntax_list;
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,22 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "shader_recompiler/frontend/control_flow_graph.h"
#include "shader_recompiler/ir/abstract_syntax_list.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/value.h"
#include "shader_recompiler/object_pool.h"
namespace Shader {
enum class Stage : u32;
}
namespace Shader::Gcn {
[[nodiscard]] IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool,
ObjectPool<IR::Block>& block_pool, CFG& cfg,
Stage stage);
} // namespace Shader::Gcn

View File

@ -0,0 +1,44 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::DS_READ(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst) {
const IR::U32 addr{ir.GetVectorReg(IR::VectorReg(inst.src[0].code))};
const IR::VectorReg dst_reg{inst.dst[0].code};
if (is_pair) {
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset0)));
ir.SetVectorReg(dst_reg, ir.ReadShared(32, is_signed, addr0));
const IR::U32 addr1 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset1)));
ir.SetVectorReg(dst_reg + 1, ir.ReadShared(32, is_signed, addr1));
} else if (bit_size == 64) {
const IR::Value data = ir.UnpackUint2x32(ir.ReadShared(bit_size, is_signed, addr));
ir.SetVectorReg(dst_reg, IR::U32{ir.CompositeExtract(data, 0)});
ir.SetVectorReg(dst_reg + 1, IR::U32{ir.CompositeExtract(data, 1)});
} else {
const IR::U32 data = ir.ReadShared(bit_size, is_signed, addr);
ir.SetVectorReg(dst_reg, data);
}
}
void Translator::DS_WRITE(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst) {
const IR::U32 addr{ir.GetVectorReg(IR::VectorReg(inst.src[0].code))};
const IR::VectorReg data0{inst.src[1].code};
const IR::VectorReg data1{inst.src[2].code};
if (is_pair) {
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset0)));
ir.WriteShared(32, ir.GetVectorReg(data0), addr0);
const IR::U32 addr1 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset1)));
ir.WriteShared(32, ir.GetVectorReg(data1), addr1);
} else if (bit_size == 64) {
const IR::U64 data = ir.PackUint2x32(
ir.CompositeConstruct(ir.GetVectorReg(data0), ir.GetVectorReg(data0 + 1)));
ir.WriteShared(bit_size, data, addr);
} else {
ir.WriteShared(bit_size, ir.GetVectorReg(data0), addr);
}
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,49 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::EXP(const GcnInst& inst) {
const auto& exp = inst.control.exp;
const IR::Attribute attrib{exp.target};
const std::array vsrc = {
IR::VectorReg(inst.src[0].code),
IR::VectorReg(inst.src[1].code),
IR::VectorReg(inst.src[2].code),
IR::VectorReg(inst.src[3].code),
};
const auto unpack = [&](u32 idx) {
const IR::Value value = ir.UnpackHalf2x16(ir.GetVectorReg(vsrc[idx]));
const IR::F32 r = IR::F32{ir.CompositeExtract(value, 0)};
const IR::F32 g = IR::F32{ir.CompositeExtract(value, 1)};
ir.SetAttribute(attrib, r, idx * 2);
ir.SetAttribute(attrib, g, idx * 2 + 1);
};
// Components are float16 packed into a VGPR
if (exp.compr) {
// Export R, G
if (exp.en & 1) {
unpack(0);
}
// Export B, A
if ((exp.en >> 2) & 1) {
unpack(1);
}
} else {
// Components are float32 into separate VGPRS
u32 mask = exp.en;
for (u32 i = 0; i < 4; i++, mask >>= 1) {
if ((mask & 1) == 0) {
continue;
}
const IR::F32 comp = ir.GetVectorReg<IR::F32>(vsrc[i]);
ir.SetAttribute(attrib, comp, i);
}
}
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,38 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::S_MOV(const GcnInst& inst) {
SetDst(inst.dst[0], GetSrc(inst.src[0]));
}
void Translator::S_MUL_I32(const GcnInst& inst) {
SetDst(inst.dst[0], ir.IMul(GetSrc(inst.src[0]), GetSrc(inst.src[1])));
}
void Translator::S_CMP(ConditionOp cond, bool is_signed, const GcnInst& inst) {
const IR::U32 lhs = GetSrc(inst.src[0]);
const IR::U32 rhs = GetSrc(inst.src[1]);
const IR::U1 result = [&] {
switch (cond) {
case ConditionOp::EQ:
return ir.IEqual(lhs, rhs);
case ConditionOp::LG:
return ir.INotEqual(lhs, rhs);
case ConditionOp::GT:
return ir.IGreaterThan(lhs, rhs, is_signed);
case ConditionOp::GE:
return ir.IGreaterThanEqual(lhs, rhs, is_signed);
case ConditionOp::LT:
return ir.ILessThan(lhs, rhs, is_signed);
case ConditionOp::LE:
return ir.ILessThanEqual(lhs, rhs, is_signed);
}
}();
// ir.SetScc(result);
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,45 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Load(IR::IREmitter& ir, int num_dwords, const IR::Value& handle, IR::ScalarReg dst_reg,
const IR::U32U64& address) {
for (u32 i = 0; i < num_dwords; i++) {
const IR::U32 value = handle.IsEmpty() ? ir.ReadConst(address, ir.Imm32(i))
: ir.ReadConstBuffer(handle, address, ir.Imm32(i));
ir.SetScalarReg(dst_reg++, value);
}
}
void Translator::S_LOAD_DWORD(int num_dwords, const GcnInst& inst) {
const auto& smrd = inst.control.smrd;
const IR::ScalarReg sbase = IR::ScalarReg(inst.src[0].code * 2);
const IR::U32 offset =
smrd.imm ? ir.Imm32(smrd.offset * 4)
: IR::U32{ir.ShiftLeftLogical(ir.GetScalarReg(IR::ScalarReg(smrd.offset)),
ir.Imm32(2))};
const IR::U64 base =
ir.PackUint2x32(ir.CompositeConstruct(ir.GetScalarReg(sbase), ir.GetScalarReg(sbase + 1)));
const IR::U64 address = ir.IAdd(base, offset);
const IR::ScalarReg dst_reg{inst.dst[0].code};
Load(ir, num_dwords, {}, dst_reg, address);
}
void Translator::S_BUFFER_LOAD_DWORD(int num_dwords, const GcnInst& inst) {
const auto& smrd = inst.control.smrd;
const IR::ScalarReg sbase = IR::ScalarReg(inst.src[0].code * 2);
const IR::U32 offset =
smrd.imm ? ir.Imm32(smrd.offset * 4)
: IR::U32{ir.ShiftLeftLogical(ir.GetScalarReg(IR::ScalarReg(smrd.offset)),
ir.Imm32(2))};
const IR::Value vsharp =
ir.CompositeConstruct(ir.GetScalarReg(sbase), ir.GetScalarReg(sbase + 1),
ir.GetScalarReg(sbase + 2), ir.GetScalarReg(sbase + 3));
const IR::ScalarReg dst_reg{inst.dst[0].code};
Load(ir, num_dwords, vsharp, dst_reg, offset);
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,152 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/translate/translate.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Gcn {
Translator::Translator(IR::Block* block_, Stage stage) : block{block_}, ir{*block} {
IR::VectorReg dst_vreg = IR::VectorReg::V0;
switch (stage) {
case Stage::Vertex:
// https://github.com/chaotic-cx/mesa-mirror/blob/72326e15/src/amd/vulkan/radv_shader_args.c#L146C1-L146C23
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::VertexId));
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId));
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::PrimitiveId));
break;
case Stage::Fragment:
// https://github.com/chaotic-cx/mesa-mirror/blob/72326e15/src/amd/vulkan/radv_shader_args.c#L258
// The first two VGPRs are used for i/j barycentric coordinates. In the vast majority of
// cases it will be only those two, but if shader is using both e.g linear and perspective
// inputs it can be more For now assume that this isn't the case.
dst_vreg = IR::VectorReg::V2;
for (u32 i = 0; i < 4; i++) {
ir.SetVectorReg(dst_vreg++, ir.GetAttribute(IR::Attribute::FragCoord, i));
}
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::IsFrontFace));
break;
default:
throw NotImplementedException("Unknown shader stage");
}
// Initialize user data.
IR::ScalarReg dst_sreg = IR::ScalarReg::S0;
for (u32 i = 0; i < 16; i++) {
ir.SetScalarReg(dst_sreg++, ir.Imm32(0U));
}
}
IR::U32F32 Translator::GetSrc(const InstOperand& operand, bool force_flt) {
switch (operand.field) {
case OperandField::ScalarGPR:
if (operand.type == ScalarType::Float32 || force_flt) {
return ir.GetScalarReg<IR::F32>(IR::ScalarReg(operand.code));
} else {
return ir.GetScalarReg<IR::U32>(IR::ScalarReg(operand.code));
}
case OperandField::VectorGPR:
if (operand.type == ScalarType::Float32 || force_flt) {
return ir.GetVectorReg<IR::F32>(IR::VectorReg(operand.code));
} else {
return ir.GetVectorReg<IR::U32>(IR::VectorReg(operand.code));
}
case OperandField::ConstZero:
if (force_flt) {
return ir.Imm32(0.f);
} else {
return ir.Imm32(0U);
}
case OperandField::SignedConstIntPos:
ASSERT(!force_flt);
return ir.Imm32(operand.code - SignedConstIntPosMin + 1);
case OperandField::SignedConstIntNeg:
ASSERT(!force_flt);
return ir.Imm32(-s32(operand.code) + SignedConstIntNegMin - 1);
case OperandField::LiteralConst:
ASSERT(!force_flt);
return ir.Imm32(operand.code);
case OperandField::ConstFloatPos_1_0:
return ir.Imm32(1.f);
case OperandField::ConstFloatPos_0_5:
return ir.Imm32(0.5f);
case OperandField::ConstFloatNeg_0_5:
return ir.Imm32(-0.5f);
default:
UNREACHABLE();
}
}
void Translator::SetDst(const InstOperand& operand, const IR::U32F32& value) {
switch (operand.field) {
case OperandField::ScalarGPR:
return ir.SetScalarReg(IR::ScalarReg(operand.code), value);
case OperandField::VectorGPR:
return ir.SetVectorReg(IR::VectorReg(operand.code), value);
case OperandField::VccHi:
case OperandField::M0:
break; // Ignore for now
default:
UNREACHABLE();
}
}
void Translate(IR::Block* block, Stage stage, std::span<const GcnInst> inst_list) {
if (inst_list.empty()) {
return;
}
Translator translator{block, stage};
for (const auto& inst : inst_list) {
switch (inst.opcode) {
case Opcode::S_MOV_B32:
translator.S_MOV(inst);
break;
case Opcode::S_MUL_I32:
translator.S_MUL_I32(inst);
break;
case Opcode::V_MOV_B32:
translator.V_MOV(inst);
break;
case Opcode::V_MAC_F32:
translator.V_MAC_F32(inst);
break;
case Opcode::V_MUL_F32:
translator.V_MUL_F32(inst);
break;
case Opcode::S_SWAPPC_B64:
case Opcode::S_WAITCNT:
break; // Ignore for now.
case Opcode::S_BUFFER_LOAD_DWORDX16:
translator.S_BUFFER_LOAD_DWORD(16, inst);
break;
case Opcode::EXP:
translator.EXP(inst);
break;
case Opcode::V_INTERP_P2_F32:
translator.V_INTERP_P2_F32(inst);
break;
case Opcode::V_CVT_PKRTZ_F16_F32:
translator.V_CVT_PKRTZ_F16_F32(inst);
break;
case Opcode::IMAGE_SAMPLE:
translator.IMAGE_SAMPLE(inst);
break;
case Opcode::V_CMP_EQ_U32:
translator.V_CMP_EQ_U32(inst);
break;
case Opcode::V_CNDMASK_B32:
translator.V_CNDMASK_B32(inst);
break;
case Opcode::S_MOV_B64:
case Opcode::S_WQM_B64:
case Opcode::V_INTERP_P1_F32:
case Opcode::S_ENDPGM:
break;
default:
UNREACHABLE_MSG("Unknown opcode {}", u32(inst.opcode));
}
}
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,73 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "shader_recompiler/frontend/instruction.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/ir_emitter.h"
namespace Shader {
enum class Stage : u32;
}
namespace Shader::Gcn {
enum class ConditionOp : u32 {
EQ,
LG,
GT,
GE,
LT,
LE,
};
class Translator {
public:
explicit Translator(IR::Block* block_, Stage stage);
// Scalar ALU
void S_MOV(const GcnInst& inst);
void S_MUL_I32(const GcnInst& inst);
void S_CMP(ConditionOp cond, bool is_signed, const GcnInst& inst);
// Scalar Memory
void S_LOAD_DWORD(int num_dwords, const GcnInst& inst);
void S_BUFFER_LOAD_DWORD(int num_dwords, const GcnInst& inst);
// Vector ALU
void V_MOV(const GcnInst& inst);
void V_SAD(const GcnInst& inst);
void V_MAC_F32(const GcnInst& inst);
void V_CVT_PKRTZ_F16_F32(const GcnInst& inst);
void V_MUL_F32(const GcnInst& inst);
void V_CMP_EQ_U32(const GcnInst& inst);
void V_CNDMASK_B32(const GcnInst& inst);
// Vector interpolation
void V_INTERP_P2_F32(const GcnInst& inst);
// Data share
void DS_READ(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst);
void DS_WRITE(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst);
// MIMG
void IMAGE_GET_RESINFO(const GcnInst& inst);
void IMAGE_SAMPLE(const GcnInst& inst);
// Export
void EXP(const GcnInst& inst);
private:
IR::U32F32 GetSrc(const InstOperand& operand, bool flt_zero = false);
void SetDst(const InstOperand& operand, const IR::U32F32& value);
private:
IR::Block* block;
IR::IREmitter ir;
};
void Translate(IR::Block* block, Stage stage, std::span<const GcnInst> inst_list);
} // namespace Shader::Gcn

View File

@ -0,0 +1,65 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::V_MOV(const GcnInst& inst) {
SetDst(inst.dst[0], GetSrc(inst.src[0]));
}
void Translator::V_SAD(const GcnInst& inst) {
const IR::U32 abs_diff = ir.IAbs(ir.ISub(GetSrc(inst.src[0]), GetSrc(inst.src[1])));
SetDst(inst.dst[0], ir.IAdd(abs_diff, GetSrc(inst.src[2])));
}
void Translator::V_MAC_F32(const GcnInst& inst) {
SetDst(inst.dst[0], ir.FPFma(GetSrc(inst.src[0]), GetSrc(inst.src[1]), GetSrc(inst.dst[0])));
}
void Translator::V_CVT_PKRTZ_F16_F32(const GcnInst& inst) {
const IR::VectorReg dst_reg{inst.dst[0].code};
const IR::Value vec_f32 = ir.CompositeConstruct(ir.FPConvert(16, GetSrc(inst.src[0])),
ir.FPConvert(16, GetSrc(inst.src[1])));
ir.SetVectorReg(dst_reg, ir.PackFloat2x16(vec_f32));
}
void Translator::V_MUL_F32(const GcnInst& inst) {
const IR::VectorReg dst_reg{inst.dst[0].code};
ir.SetVectorReg(dst_reg, ir.FPMul(GetSrc(inst.src[0]), GetSrc(inst.src[1])));
}
void Translator::V_CMP_EQ_U32(const GcnInst& inst) {
const IR::U1 result = ir.IEqual(GetSrc(inst.src[0]), GetSrc(inst.src[1]));
if (inst.dst[1].field == OperandField::VccLo) {
return ir.SetVcc(result);
} else if (inst.dst[1].field == OperandField::ScalarGPR) {
const IR::ScalarReg dst_reg{inst.dst[1].code};
return ir.SetScalarReg(dst_reg, IR::U32{ir.Select(result, ir.Imm32(1U), ir.Imm32(0U))});
}
UNREACHABLE();
}
void Translator::V_CNDMASK_B32(const GcnInst& inst) {
const IR::VectorReg dst_reg{inst.dst[0].code};
const IR::ScalarReg flag_reg{inst.src[2].code};
const IR::U1 flag = inst.src[2].field == OperandField::ScalarGPR
? ir.INotEqual(ir.GetScalarReg(flag_reg), ir.Imm32(0U))
: ir.GetVcc();
// We can treat the instruction as integer most of the time, but when a source is
// a floating point constant we will force the other as float for better readability
// The other operand is also higly likely to be float as well.
const auto is_float_const = [](OperandField field) {
return field >= OperandField::ConstFloatPos_0_5 && field <= OperandField::ConstFloatNeg_4_0;
};
const bool has_flt_source =
is_float_const(inst.src[0].field) || is_float_const(inst.src[1].field);
const IR::U32F32 src0 = GetSrc(inst.src[0], has_flt_source);
const IR::U32F32 src1 = GetSrc(inst.src[1], has_flt_source);
const IR::Value result = ir.Select(flag, src1, src0);
ir.SetVectorReg(dst_reg, IR::U32F32{result});
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,14 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::V_INTERP_P2_F32(const GcnInst& inst) {
const IR::VectorReg dst_reg{inst.dst[0].code};
const IR::Attribute attrib{IR::Attribute::Param0 + inst.control.vintrp.attr};
ir.SetVectorReg(dst_reg, ir.GetAttribute(attrib, inst.control.vintrp.chan));
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,103 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::IMAGE_GET_RESINFO(const GcnInst& inst) {
IR::VectorReg dst_reg{inst.src[1].code};
const IR::ScalarReg tsharp_reg{inst.src[2].code};
const auto flags = ImageResFlags(inst.control.mimg.dmask);
const IR::U32 lod = ir.GetVectorReg(IR::VectorReg(inst.src[0].code));
const IR::Value tsharp =
ir.CompositeConstruct(ir.GetScalarReg(tsharp_reg), ir.GetScalarReg(tsharp_reg + 1),
ir.GetScalarReg(tsharp_reg + 2), ir.GetScalarReg(tsharp_reg + 3));
const IR::Value size = ir.ImageQueryDimension(tsharp, lod, ir.Imm1(false));
if (flags.test(ImageResComponent::Width)) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(size, 0)});
}
if (flags.test(ImageResComponent::Height)) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(size, 1)});
}
if (flags.test(ImageResComponent::Depth)) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(size, 2)});
}
if (flags.test(ImageResComponent::MipCount)) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(size, 3)});
}
}
void Translator::IMAGE_SAMPLE(const GcnInst& inst) {
const auto& mimg = inst.control.mimg;
ASSERT(!mimg.da);
IR::VectorReg addr_reg{inst.src[0].code};
IR::VectorReg dest_reg{inst.dst[0].code};
const IR::ScalarReg tsharp_reg{inst.src[2].code * 4};
const IR::ScalarReg sampler_reg{inst.src[3].code * 4};
const auto flags = MimgModifierFlags(mimg.mod);
// Load first dword of T# and S#. We will use them as the handle that will guide resource
// tracking pass where to read the sharps. This will later also get patched to the SPIRV texture
// binding index.
const IR::Value handle =
ir.CompositeConstruct(ir.GetScalarReg(tsharp_reg), ir.GetScalarReg(sampler_reg));
// Load first address components as denoted in 8.2.4 VGPR Usage Sea Islands Series Instruction
// Set Architecture
const IR::Value offset =
flags.test(MimgModifier::Offset) ? ir.GetVectorReg(addr_reg++) : IR::Value{};
const IR::F32 bias =
flags.test(MimgModifier::LodBias) ? ir.GetVectorReg<IR::F32>(addr_reg++) : IR::F32{};
const IR::F32 dref =
flags.test(MimgModifier::Pcf) ? ir.GetVectorReg<IR::F32>(addr_reg++) : IR::F32{};
// Derivatives are tricky because their number depends on the texture type which is located in
// T#. We don't have access to T# though until resource tracking pass. For now assume no
// derivatives are present, otherwise we don't know where coordinates are placed in the address
// stream.
ASSERT_MSG(!flags.test(MimgModifier::Derivative), "Derivative image instruction");
// Now we can load body components as noted in Table 8.9 Image Opcodes with Sampler
// Since these are at most 4 dwords, we load them into a single uvec4 and place them
// in coords field of the instruction. Then the resource tracking pass will patch the
// IR instruction to fill in lod_clamp field. The vector can also be used
// as coords directly as SPIR-V will ignore any extra parameters.
const IR::Value body =
ir.CompositeConstruct(ir.GetVectorReg(addr_reg++), ir.GetVectorReg(addr_reg++),
ir.GetVectorReg(addr_reg++), ir.GetVectorReg(addr_reg++));
// Issue IR instruction, leaving unknown fields blank to patch later.
const IR::Value texel = [&]() -> IR::Value {
const IR::F32 lod = flags.test(MimgModifier::Level0) ? ir.Imm32(0.f) : IR::F32{};
const bool explicit_lod = flags.any(MimgModifier::Level0, MimgModifier::Lod);
if (!flags.test(MimgModifier::Pcf)) {
if (explicit_lod) {
return ir.ImageSampleExplicitLod(handle, body, lod, offset, {});
} else {
return ir.ImageSampleImplicitLod(handle, body, bias, offset, {}, {});
}
}
if (explicit_lod) {
return ir.ImageSampleDrefExplicitLod(handle, body, dref, lod, offset, {});
}
return ir.ImageSampleDrefImplicitLod(handle, body, dref, bias, offset, {}, {});
}();
for (u32 i = 0; i < 4; i++) {
if (((mimg.dmask >> i) & 1) == 0) {
continue;
}
IR::F32 value;
if (flags.test(MimgModifier::Pcf)) {
value = i < 3 ? IR::F32{texel} : ir.Imm32(1.0f);
} else {
value = IR::F32{ir.CompositeExtract(texel, i)};
}
ir.SetVectorReg(dest_reg++, value);
}
}
} // namespace Shader::Gcn

View File

@ -0,0 +1,56 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <vector>
#include "shader_recompiler/ir/value.h"
namespace Shader::IR {
class Block;
struct AbstractSyntaxNode {
enum class Type {
Block,
If,
EndIf,
Loop,
Repeat,
Break,
Return,
Unreachable,
};
union Data {
Block* block;
struct {
U1 cond;
Block* body;
Block* merge;
} if_node;
struct {
Block* merge;
} end_if;
struct {
Block* body;
Block* continue_block;
Block* merge;
} loop;
struct {
U1 cond;
Block* loop_header;
Block* merge;
} repeat;
struct {
U1 cond;
Block* merge;
Block* skip;
} break_node;
};
Data data{};
Type type{};
};
using AbstractSyntaxList = std::vector<AbstractSyntaxNode>;
} // namespace Shader::IR

View File

@ -0,0 +1,115 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <fmt/format.h>
#include "shader_recompiler/ir/attribute.h"
namespace Shader::IR {
bool IsParam(Attribute attribute) noexcept {
return attribute >= Attribute::Param0 && attribute <= Attribute::Param31;
}
std::string NameOf(Attribute attribute) {
switch (attribute) {
case Attribute::RenderTarget0:
return "RenderTarget0";
case Attribute::RenderTarget1:
return "RenderTarget1";
case Attribute::RenderTarget2:
return "RenderTarget2";
case Attribute::RenderTarget3:
return "RenderTarget3";
case Attribute::RenderTarget4:
return "RenderTarget4";
case Attribute::RenderTarget5:
return "RenderTarget5";
case Attribute::RenderTarget6:
return "RenderTarget6";
case Attribute::RenderTarget7:
return "RenderTarget7";
case Attribute::Depth:
return "Depth";
case Attribute::Null:
return "Null";
case Attribute::Position0:
return "Position0";
case Attribute::Position1:
return "Position1";
case Attribute::Position2:
return "Position2";
case Attribute::Position3:
return "Position3";
case Attribute::Param0:
return "Param0";
case Attribute::Param1:
return "Param1";
case Attribute::Param2:
return "Param2";
case Attribute::Param3:
return "Param3";
case Attribute::Param4:
return "Param4";
case Attribute::Param5:
return "Param5";
case Attribute::Param6:
return "Param6";
case Attribute::Param7:
return "Param7";
case Attribute::Param8:
return "Param8";
case Attribute::Param9:
return "Param9";
case Attribute::Param10:
return "Param10";
case Attribute::Param11:
return "Param11";
case Attribute::Param12:
return "Param12";
case Attribute::Param13:
return "Param13";
case Attribute::Param14:
return "Param14";
case Attribute::Param15:
return "Param15";
case Attribute::Param16:
return "Param16";
case Attribute::Param17:
return "Param17";
case Attribute::Param18:
return "Param18";
case Attribute::Param19:
return "Param19";
case Attribute::Param20:
return "Param20";
case Attribute::Param21:
return "Param21";
case Attribute::Param22:
return "Param22";
case Attribute::Param23:
return "Param23";
case Attribute::Param24:
return "Param24";
case Attribute::Param25:
return "Param25";
case Attribute::Param26:
return "Param26";
case Attribute::Param27:
return "Param27";
case Attribute::Param28:
return "Param28";
case Attribute::Param29:
return "Param29";
case Attribute::Param30:
return "Param30";
case Attribute::Param31:
return "Param31";
case Attribute::VertexId:
return "VertexId";
default:
break;
}
return fmt::format("<reserved attribute {}>", static_cast<int>(attribute));
}
} // namespace Shader::IR

View File

@ -0,0 +1,105 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <fmt/format.h>
#include "common/types.h"
#include "shader_recompiler/exception.h"
namespace Shader::IR {
enum class Attribute : u64 {
// Export targets
RenderTarget0 = 0,
RenderTarget1 = 1,
RenderTarget2 = 2,
RenderTarget3 = 3,
RenderTarget4 = 4,
RenderTarget5 = 5,
RenderTarget6 = 6,
RenderTarget7 = 7,
Depth = 8,
Null = 9,
Position0 = 12,
Position1 = 13,
Position2 = 14,
Position3 = 15,
Param0 = 32,
Param1 = 33,
Param2 = 34,
Param3 = 35,
Param4 = 36,
Param5 = 37,
Param6 = 38,
Param7 = 39,
Param8 = 40,
Param9 = 41,
Param10 = 42,
Param11 = 43,
Param12 = 44,
Param13 = 45,
Param14 = 46,
Param15 = 47,
Param16 = 48,
Param17 = 49,
Param18 = 50,
Param19 = 51,
Param20 = 52,
Param21 = 53,
Param22 = 54,
Param23 = 55,
Param24 = 56,
Param25 = 57,
Param26 = 58,
Param27 = 59,
Param28 = 60,
Param29 = 61,
Param30 = 62,
Param31 = 63,
// System values
ClipDistance = 64,
CullDistance = 65,
RenderTargetId = 66,
ViewportId = 67,
VertexId = 68,
PrimitiveId = 69,
InstanceId = 70,
IsFrontFace = 71,
SampleIndex = 72,
GlobalInvocationId = 73,
WorkgroupId = 74,
LocalInvocationId = 75,
LocalInvocationIndex = 76,
FragCoord = 77,
};
constexpr size_t EXP_NUM_POS = 4;
constexpr size_t EXP_NUM_PARAM = 32;
[[nodiscard]] bool IsParam(Attribute attribute) noexcept;
[[nodiscard]] std::string NameOf(Attribute attribute);
[[nodiscard]] constexpr Attribute operator+(Attribute attr, int num) {
const int result{static_cast<int>(attr) + num};
if (result > static_cast<int>(Attribute::Param31)) {
throw LogicError("Overflow on register arithmetic");
}
if (result < static_cast<int>(Attribute::Param0)) {
throw LogicError("Underflow on register arithmetic");
}
return static_cast<Attribute>(result);
}
} // namespace Shader::IR
template <>
struct fmt::formatter<Shader::IR::Attribute> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
auto format(const Shader::IR::Attribute& attribute, format_context& ctx) const {
return fmt::format_to(ctx.out(), "{}", Shader::IR::NameOf(attribute));
}
};

View File

@ -0,0 +1,149 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <initializer_list>
#include <map>
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/value.h"
namespace Shader::IR {
Block::Block(ObjectPool<Inst>& inst_pool_) : inst_pool{&inst_pool_} {}
Block::~Block() = default;
void Block::AppendNewInst(Opcode op, std::initializer_list<Value> args) {
PrependNewInst(end(), op, args);
}
Block::iterator Block::PrependNewInst(iterator insertion_point, const Inst& base_inst) {
Inst* const inst{inst_pool->Create(base_inst)};
return instructions.insert(insertion_point, *inst);
}
Block::iterator Block::PrependNewInst(iterator insertion_point, Opcode op,
std::initializer_list<Value> args, u32 flags) {
Inst* const inst{inst_pool->Create(op, flags)};
const auto result_it{instructions.insert(insertion_point, *inst)};
if (inst->NumArgs() != args.size()) {
throw InvalidArgument("Invalid number of arguments {} in {}", args.size(), op);
}
std::ranges::for_each(args, [inst, index = size_t{0}](const Value& arg) mutable {
inst->SetArg(index, arg);
++index;
});
return result_it;
}
void Block::AddBranch(Block* block) {
if (std::ranges::find(imm_successors, block) != imm_successors.end()) {
throw LogicError("Successor already inserted");
}
if (std::ranges::find(block->imm_predecessors, this) != block->imm_predecessors.end()) {
throw LogicError("Predecessor already inserted");
}
imm_successors.push_back(block);
block->imm_predecessors.push_back(this);
}
static std::string BlockToIndex(const std::map<const Block*, size_t>& block_to_index,
Block* block) {
if (const auto it{block_to_index.find(block)}; it != block_to_index.end()) {
return fmt::format("{{Block ${}}}", it->second);
}
return fmt::format("$<unknown block {:016x}>", reinterpret_cast<u64>(block));
}
static size_t InstIndex(std::map<const Inst*, size_t>& inst_to_index, size_t& inst_index,
const Inst* inst) {
const auto [it, is_inserted]{inst_to_index.emplace(inst, inst_index + 1)};
if (is_inserted) {
++inst_index;
}
return it->second;
}
static std::string ArgToIndex(std::map<const Inst*, size_t>& inst_to_index, size_t& inst_index,
const Value& arg) {
if (arg.IsEmpty()) {
return "<null>";
}
if (!arg.IsImmediate() || arg.IsIdentity()) {
return fmt::format("%{}", InstIndex(inst_to_index, inst_index, arg.Inst()));
}
switch (arg.Type()) {
case Type::U1:
return fmt::format("#{}", arg.U1() ? "true" : "false");
case Type::U8:
return fmt::format("#{}", arg.U8());
case Type::U16:
return fmt::format("#{}", arg.U16());
case Type::U32:
return fmt::format("#{}", arg.U32());
case Type::U64:
return fmt::format("#{}", arg.U64());
case Type::F32:
return fmt::format("#{}", arg.F32());
case Type::ScalarReg:
return fmt::format("{}", arg.ScalarReg());
case Type::VectorReg:
return fmt::format("{}", arg.VectorReg());
case Type::Attribute:
return fmt::format("{}", arg.Attribute());
default:
return "<unknown immediate type>";
}
}
std::string DumpBlock(const Block& block) {
size_t inst_index{0};
std::map<const Inst*, size_t> inst_to_index;
return DumpBlock(block, {}, inst_to_index, inst_index);
}
std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>& block_to_index,
std::map<const Inst*, size_t>& inst_to_index, size_t& inst_index) {
std::string ret{"Block"};
if (const auto it{block_to_index.find(&block)}; it != block_to_index.end()) {
ret += fmt::format(" ${}", it->second);
}
ret += '\n';
for (const Inst& inst : block) {
const Opcode op{inst.GetOpcode()};
ret += fmt::format("[{:016x}] ", reinterpret_cast<u64>(&inst));
if (TypeOf(op) != Type::Void) {
ret += fmt::format("%{:<5} = {}", InstIndex(inst_to_index, inst_index, &inst), op);
} else {
ret += fmt::format(" {}", op); // '%00000 = ' -> 1 + 5 + 3 = 9 spaces
}
const size_t arg_count{inst.NumArgs()};
for (size_t arg_index = 0; arg_index < arg_count; ++arg_index) {
const Value arg{inst.Arg(arg_index)};
const std::string arg_str{ArgToIndex(inst_to_index, inst_index, arg)};
ret += arg_index != 0 ? ", " : " ";
if (op == Opcode::Phi) {
ret += fmt::format("[ {}, {} ]", arg_str,
BlockToIndex(block_to_index, inst.PhiBlock(arg_index)));
} else {
ret += arg_str;
}
if (op != Opcode::Phi) {
const Type actual_type{arg.Type()};
const Type expected_type{ArgTypeOf(op, arg_index)};
if (!AreTypesCompatible(actual_type, expected_type)) {
ret += fmt::format("<type error: {} != {}>", actual_type, expected_type);
}
}
}
if (TypeOf(op) != Type::Void) {
ret += fmt::format(" (uses: {})\n", inst.UseCount());
} else {
ret += '\n';
}
}
return ret;
}
} // namespace Shader::IR

View File

@ -0,0 +1,180 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <initializer_list>
#include <map>
#include <span>
#include <vector>
#include <boost/intrusive/list.hpp>
#include "common/types.h"
#include "shader_recompiler/ir/reg.h"
#include "shader_recompiler/ir/value.h"
#include "shader_recompiler/object_pool.h"
namespace Shader::IR {
class Block {
public:
using InstructionList = boost::intrusive::list<Inst>;
using size_type = InstructionList::size_type;
using iterator = InstructionList::iterator;
using const_iterator = InstructionList::const_iterator;
using reverse_iterator = InstructionList::reverse_iterator;
using const_reverse_iterator = InstructionList::const_reverse_iterator;
explicit Block(ObjectPool<Inst>& inst_pool_);
~Block();
Block(const Block&) = delete;
Block& operator=(const Block&) = delete;
Block(Block&&) = default;
Block& operator=(Block&&) = default;
/// Appends a new instruction to the end of this basic block.
void AppendNewInst(Opcode op, std::initializer_list<Value> args);
/// Prepends a copy of an instruction to this basic block before the insertion point.
iterator PrependNewInst(iterator insertion_point, const Inst& base_inst);
/// Prepends a new instruction to this basic block before the insertion point.
iterator PrependNewInst(iterator insertion_point, Opcode op,
std::initializer_list<Value> args = {}, u32 flags = 0);
/// Adds a new branch to this basic block.
void AddBranch(Block* block);
/// Gets a mutable reference to the instruction list for this basic block.
[[nodiscard]] InstructionList& Instructions() noexcept {
return instructions;
}
/// Gets an immutable reference to the instruction list for this basic block.
[[nodiscard]] const InstructionList& Instructions() const noexcept {
return instructions;
}
/// Gets an immutable span to the immediate predecessors.
[[nodiscard]] std::span<Block* const> ImmPredecessors() const noexcept {
return imm_predecessors;
}
/// Gets an immutable span to the immediate successors.
[[nodiscard]] std::span<Block* const> ImmSuccessors() const noexcept {
return imm_successors;
}
/// Intrusively store the host definition of this instruction.
template <typename T>
void SetDefinition(T def) {
definition = std::bit_cast<u32>(def);
}
/// Return the intrusively stored host definition of this instruction.
template <typename T>
[[nodiscard]] T Definition() const noexcept {
return std::bit_cast<T>(definition);
}
void SsaSeal() noexcept {
is_ssa_sealed = true;
}
[[nodiscard]] bool IsSsaSealed() const noexcept {
return is_ssa_sealed;
}
[[nodiscard]] bool empty() const {
return instructions.empty();
}
[[nodiscard]] size_type size() const {
return instructions.size();
}
[[nodiscard]] Inst& front() {
return instructions.front();
}
[[nodiscard]] const Inst& front() const {
return instructions.front();
}
[[nodiscard]] Inst& back() {
return instructions.back();
}
[[nodiscard]] const Inst& back() const {
return instructions.back();
}
[[nodiscard]] iterator begin() {
return instructions.begin();
}
[[nodiscard]] const_iterator begin() const {
return instructions.begin();
}
[[nodiscard]] iterator end() {
return instructions.end();
}
[[nodiscard]] const_iterator end() const {
return instructions.end();
}
[[nodiscard]] reverse_iterator rbegin() {
return instructions.rbegin();
}
[[nodiscard]] const_reverse_iterator rbegin() const {
return instructions.rbegin();
}
[[nodiscard]] reverse_iterator rend() {
return instructions.rend();
}
[[nodiscard]] const_reverse_iterator rend() const {
return instructions.rend();
}
[[nodiscard]] const_iterator cbegin() const {
return instructions.cbegin();
}
[[nodiscard]] const_iterator cend() const {
return instructions.cend();
}
[[nodiscard]] const_reverse_iterator crbegin() const {
return instructions.crbegin();
}
[[nodiscard]] const_reverse_iterator crend() const {
return instructions.crend();
}
/// Intrusively store the value of a register in the block.
std::array<Value, NumScalarRegs> ssa_sreg_values;
std::array<Value, NumVectorRegs> ssa_vreg_values;
private:
/// Memory pool for instruction list
ObjectPool<Inst>* inst_pool;
/// List of instructions in this block
InstructionList instructions;
/// Block immediate predecessors
std::vector<Block*> imm_predecessors;
/// Block immediate successors
std::vector<Block*> imm_successors;
/// Intrusively store if the block is sealed in the SSA pass.
bool is_ssa_sealed{false};
/// Intrusively stored host definition of this block.
u32 definition{};
};
using BlockList = std::vector<Block*>;
[[nodiscard]] std::string DumpBlock(const Block& block);
[[nodiscard]] std::string DumpBlock(const Block& block,
const std::map<const Block*, size_t>& block_to_index,
std::map<const Inst*, size_t>& inst_to_index,
size_t& inst_index);
} // namespace Shader::IR

View File

@ -0,0 +1,50 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <fmt/format.h>
#include "common/types.h"
namespace Shader::IR {
enum class Condition : u32 {
False,
True,
Scc0,
Scc1,
Vccz,
Vccnz,
Execz,
Execnz,
};
constexpr std::string_view NameOf(Condition condition) {
switch (condition) {
case Condition::False:
return "False";
case Condition::True:
return "True";
case Condition::Scc0:
return "Scc0";
case Condition::Scc1:
return "Scc1";
case Condition::Vccz:
return "Vccz";
case Condition::Vccnz:
return "Vccnz";
case Condition::Execz:
return "Execz";
case Condition::Execnz:
return "Execnz";
}
}
} // namespace Shader::IR
template <>
struct fmt::formatter<Shader::IR::Condition> : formatter<std::string_view> {
auto format(const Shader::IR::Condition& cond, format_context& ctx) const {
return formatter<string_view>::format(NameOf(cond), ctx);
}
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,250 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <cstring>
#include <type_traits>
#include "shader_recompiler/ir/attribute.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/condition.h"
#include "shader_recompiler/ir/value.h"
namespace Shader::IR {
class IREmitter {
public:
explicit IREmitter(Block& block_) : block{&block_}, insertion_point{block->end()} {}
explicit IREmitter(Block& block_, Block::iterator insertion_point_)
: block{&block_}, insertion_point{insertion_point_} {}
Block* block;
[[nodiscard]] U1 Imm1(bool value) const;
[[nodiscard]] U8 Imm8(u8 value) const;
[[nodiscard]] U16 Imm16(u16 value) const;
[[nodiscard]] U32 Imm32(u32 value) const;
[[nodiscard]] U32 Imm32(s32 value) const;
[[nodiscard]] F32 Imm32(f32 value) const;
[[nodiscard]] U64 Imm64(u64 value) const;
[[nodiscard]] U64 Imm64(s64 value) const;
[[nodiscard]] F64 Imm64(f64 value) const;
template <typename Dest, typename Source>
[[nodiscard]] Dest BitCast(const Source& value);
U1 ConditionRef(const U1& value);
void Reference(const Value& value);
void PhiMove(IR::Inst& phi, const Value& value);
void Prologue();
void Epilogue();
template <typename T = U32>
[[nodiscard]] T GetScalarReg(IR::ScalarReg reg);
template <typename T = U32>
[[nodiscard]] T GetVectorReg(IR::VectorReg reg);
void SetScalarReg(IR::ScalarReg reg, const U32F32& value);
void SetVectorReg(IR::VectorReg reg, const U32F32& value);
[[nodiscard]] U1 GetGotoVariable(u32 id);
void SetGotoVariable(u32 id, const U1& value);
[[nodiscard]] U1 GetVcc();
void SetVcc(const U1& value);
[[nodiscard]] U1 Condition(IR::Condition cond);
[[nodiscard]] F32 GetAttribute(IR::Attribute attribute);
[[nodiscard]] F32 GetAttribute(IR::Attribute attribute, u32 comp);
[[nodiscard]] U32 GetAttributeU32(IR::Attribute attribute);
[[nodiscard]] U32 GetAttributeU32(IR::Attribute attribute, u32 comp);
void SetAttribute(IR::Attribute attribute, const F32& value, u32 comp);
[[nodiscard]] U32U64 ReadShared(int bit_size, bool is_signed, const U32& offset);
void WriteShared(int bit_size, const Value& value, const U32& offset);
[[nodiscard]] U32 ReadConst(const U64& address, const U32& offset);
template <typename T = U32>
[[nodiscard]] T ReadConstBuffer(const Value& handle, const U32& index, const U32& offset);
[[nodiscard]] U1 GetZeroFromOp(const Value& op);
[[nodiscard]] U1 GetSignFromOp(const Value& op);
[[nodiscard]] U1 GetCarryFromOp(const Value& op);
[[nodiscard]] U1 GetOverflowFromOp(const Value& op);
[[nodiscard]] U1 GetSparseFromOp(const Value& op);
[[nodiscard]] U1 GetInBoundsFromOp(const Value& op);
[[nodiscard]] Value CompositeConstruct(const Value& e1, const Value& e2);
[[nodiscard]] Value CompositeConstruct(const Value& e1, const Value& e2, const Value& e3);
[[nodiscard]] Value CompositeConstruct(const Value& e1, const Value& e2, const Value& e3,
const Value& e4);
[[nodiscard]] Value CompositeExtract(const Value& vector, size_t element);
[[nodiscard]] Value CompositeInsert(const Value& vector, const Value& object, size_t element);
[[nodiscard]] Value Select(const U1& condition, const Value& true_value,
const Value& false_value);
[[nodiscard]] U64 PackUint2x32(const Value& vector);
[[nodiscard]] Value UnpackUint2x32(const U64& value);
[[nodiscard]] U32 PackFloat2x16(const Value& vector);
[[nodiscard]] Value UnpackFloat2x16(const U32& value);
[[nodiscard]] U32 PackHalf2x16(const Value& vector);
[[nodiscard]] Value UnpackHalf2x16(const U32& value);
[[nodiscard]] F32F64 FPAdd(const F32F64& a, const F32F64& b);
[[nodiscard]] F32F64 FPMul(const F32F64& a, const F32F64& b);
[[nodiscard]] F32F64 FPFma(const F32F64& a, const F32F64& b, const F32F64& c);
[[nodiscard]] F32F64 FPAbs(const F32F64& value);
[[nodiscard]] F32F64 FPNeg(const F32F64& value);
[[nodiscard]] F32F64 FPAbsNeg(const F32F64& value, bool abs, bool neg);
[[nodiscard]] F32 FPCos(const F32& value);
[[nodiscard]] F32 FPSin(const F32& value);
[[nodiscard]] F32 FPExp2(const F32& value);
[[nodiscard]] F32 FPLog2(const F32& value);
[[nodiscard]] F32F64 FPRecip(const F32F64& value);
[[nodiscard]] F32F64 FPRecipSqrt(const F32F64& value);
[[nodiscard]] F32 FPSqrt(const F32& value);
[[nodiscard]] F32F64 FPSaturate(const F32F64& value);
[[nodiscard]] F32F64 FPClamp(const F32F64& value, const F32F64& min_value,
const F32F64& max_value);
[[nodiscard]] F32F64 FPRoundEven(const F32F64& value);
[[nodiscard]] F32F64 FPFloor(const F32F64& value);
[[nodiscard]] F32F64 FPCeil(const F32F64& value);
[[nodiscard]] F32F64 FPTrunc(const F32F64& value);
[[nodiscard]] U1 FPEqual(const F32F64& lhs, const F32F64& rhs, bool ordered = true);
[[nodiscard]] U1 FPNotEqual(const F32F64& lhs, const F32F64& rhs, bool ordered = true);
[[nodiscard]] U1 FPLessThanEqual(const F32F64& lhs, const F32F64& rhs, bool ordered = true);
[[nodiscard]] U1 FPGreaterThanEqual(const F32F64& lhs, const F32F64& rhs, bool ordered = true);
[[nodiscard]] U1 FPLessThan(const F32F64& lhs, const F32F64& rhs, bool ordered = true);
[[nodiscard]] U1 FPGreaterThan(const F32F64& lhs, const F32F64& rhs, bool ordered = true);
[[nodiscard]] U1 FPIsNan(const F32F64& value);
[[nodiscard]] U1 FPOrdered(const F32F64& lhs, const F32F64& rhs);
[[nodiscard]] U1 FPUnordered(const F32F64& lhs, const F32F64& rhs);
[[nodiscard]] F32F64 FPMax(const F32F64& lhs, const F32F64& rhs);
[[nodiscard]] F32F64 FPMin(const F32F64& lhs, const F32F64& rhs);
[[nodiscard]] U32U64 IAdd(const U32U64& a, const U32U64& b);
[[nodiscard]] U32U64 ISub(const U32U64& a, const U32U64& b);
[[nodiscard]] U32 IMul(const U32& a, const U32& b);
[[nodiscard]] U32 IDiv(const U32& a, const U32& b, bool is_signed = false);
[[nodiscard]] U32U64 INeg(const U32U64& value);
[[nodiscard]] U32 IAbs(const U32& value);
[[nodiscard]] U32U64 ShiftLeftLogical(const U32U64& base, const U32& shift);
[[nodiscard]] U32U64 ShiftRightLogical(const U32U64& base, const U32& shift);
[[nodiscard]] U32U64 ShiftRightArithmetic(const U32U64& base, const U32& shift);
[[nodiscard]] U32 BitwiseAnd(const U32& a, const U32& b);
[[nodiscard]] U32 BitwiseOr(const U32& a, const U32& b);
[[nodiscard]] U32 BitwiseXor(const U32& a, const U32& b);
[[nodiscard]] U32 BitFieldInsert(const U32& base, const U32& insert, const U32& offset,
const U32& count);
[[nodiscard]] U32 BitFieldExtract(const U32& base, const U32& offset, const U32& count,
bool is_signed = false);
[[nodiscard]] U32 BitReverse(const U32& value);
[[nodiscard]] U32 BitCount(const U32& value);
[[nodiscard]] U32 BitwiseNot(const U32& value);
[[nodiscard]] U32 FindSMsb(const U32& value);
[[nodiscard]] U32 FindUMsb(const U32& value);
[[nodiscard]] U32 SMin(const U32& a, const U32& b);
[[nodiscard]] U32 UMin(const U32& a, const U32& b);
[[nodiscard]] U32 IMin(const U32& a, const U32& b, bool is_signed);
[[nodiscard]] U32 SMax(const U32& a, const U32& b);
[[nodiscard]] U32 UMax(const U32& a, const U32& b);
[[nodiscard]] U32 IMax(const U32& a, const U32& b, bool is_signed);
[[nodiscard]] U32 SClamp(const U32& value, const U32& min, const U32& max);
[[nodiscard]] U32 UClamp(const U32& value, const U32& min, const U32& max);
[[nodiscard]] U1 ILessThan(const U32& lhs, const U32& rhs, bool is_signed);
[[nodiscard]] U1 IEqual(const U32U64& lhs, const U32U64& rhs);
[[nodiscard]] U1 ILessThanEqual(const U32& lhs, const U32& rhs, bool is_signed);
[[nodiscard]] U1 IGreaterThan(const U32& lhs, const U32& rhs, bool is_signed);
[[nodiscard]] U1 INotEqual(const U32& lhs, const U32& rhs);
[[nodiscard]] U1 IGreaterThanEqual(const U32& lhs, const U32& rhs, bool is_signed);
[[nodiscard]] U1 LogicalOr(const U1& a, const U1& b);
[[nodiscard]] U1 LogicalAnd(const U1& a, const U1& b);
[[nodiscard]] U1 LogicalXor(const U1& a, const U1& b);
[[nodiscard]] U1 LogicalNot(const U1& value);
[[nodiscard]] U32U64 ConvertFToS(size_t bitsize, const F32F64& value);
[[nodiscard]] U32U64 ConvertFToU(size_t bitsize, const F32F64& value);
[[nodiscard]] U32U64 ConvertFToI(size_t bitsize, bool is_signed, const F32F64& value);
[[nodiscard]] F32F64 ConvertSToF(size_t dest_bitsize, size_t src_bitsize, const Value& value);
[[nodiscard]] F32F64 ConvertUToF(size_t dest_bitsize, size_t src_bitsize, const Value& value);
[[nodiscard]] F32F64 ConvertIToF(size_t dest_bitsize, size_t src_bitsize, bool is_signed,
const Value& value);
[[nodiscard]] U32U64 UConvert(size_t result_bitsize, const U32U64& value);
[[nodiscard]] F16F32F64 FPConvert(size_t result_bitsize, const F16F32F64& value);
[[nodiscard]] Value ImageSampleImplicitLod(const Value& handle, const Value& coords,
const F32& bias, const Value& offset,
const F32& lod_clamp, TextureInstInfo info);
[[nodiscard]] Value ImageSampleExplicitLod(const Value& handle, const Value& coords,
const F32& lod, const Value& offset,
TextureInstInfo info);
[[nodiscard]] F32 ImageSampleDrefImplicitLod(const Value& handle, const Value& coords,
const F32& dref, const F32& bias,
const Value& offset, const F32& lod_clamp,
TextureInstInfo info);
[[nodiscard]] F32 ImageSampleDrefExplicitLod(const Value& handle, const Value& coords,
const F32& dref, const F32& lod,
const Value& offset, TextureInstInfo info);
[[nodiscard]] Value ImageQueryDimension(const Value& handle, const IR::U32& lod,
const IR::U1& skip_mips);
[[nodiscard]] Value ImageQueryDimension(const Value& handle, const IR::U32& lod,
const IR::U1& skip_mips, TextureInstInfo info);
[[nodiscard]] Value ImageQueryLod(const Value& handle, const Value& coords,
TextureInstInfo info);
[[nodiscard]] Value ImageGather(const Value& handle, const Value& coords, const Value& offset,
const Value& offset2, TextureInstInfo info);
[[nodiscard]] Value ImageGatherDref(const Value& handle, const Value& coords,
const Value& offset, const Value& offset2, const F32& dref,
TextureInstInfo info);
[[nodiscard]] Value ImageFetch(const Value& handle, const Value& coords, const Value& offset,
const U32& lod, const U32& multisampling, TextureInstInfo info);
[[nodiscard]] Value ImageGradient(const Value& handle, const Value& coords,
const Value& derivatives, const Value& offset,
const F32& lod_clamp, TextureInstInfo info);
[[nodiscard]] Value ImageRead(const Value& handle, const Value& coords, TextureInstInfo info);
void ImageWrite(const Value& handle, const Value& coords, const Value& color,
TextureInstInfo info);
private:
IR::Block::iterator insertion_point;
template <typename T = Value, typename... Args>
T Inst(Opcode op, Args... args) {
auto it{block->PrependNewInst(insertion_point, op, {Value{args}...})};
return T{Value{&*it}};
}
template <typename T>
requires(sizeof(T) <= sizeof(u32) && std::is_trivially_copyable_v<T>)
struct Flags {
Flags() = default;
Flags(T proxy_) : proxy{proxy_} {}
T proxy;
};
template <typename T = Value, typename FlagType, typename... Args>
T Inst(Opcode op, Flags<FlagType> flags, Args... args) {
u32 raw_flags{};
std::memcpy(&raw_flags, &flags.proxy, sizeof(flags.proxy));
auto it{block->PrependNewInst(insertion_point, op, {Value{args}...}, raw_flags)};
return T{Value{&*it}};
}
};
} // namespace Shader::IR

View File

@ -0,0 +1,167 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <memory>
#include "shader_recompiler/exception.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/type.h"
#include "shader_recompiler/ir/value.h"
namespace Shader::IR {
Inst::Inst(IR::Opcode op_, u32 flags_) noexcept : op{op_}, flags{flags_} {
if (op == Opcode::Phi) {
std::construct_at(&phi_args);
} else {
std::construct_at(&args);
}
}
Inst::Inst(const Inst& base) : op{base.op}, flags{base.flags} {
if (base.op == Opcode::Phi) {
throw NotImplementedException("Copying phi node");
}
std::construct_at(&args);
const size_t num_args{base.NumArgs()};
for (size_t index = 0; index < num_args; ++index) {
SetArg(index, base.Arg(index));
}
}
Inst::~Inst() {
if (op == Opcode::Phi) {
std::destroy_at(&phi_args);
} else {
std::destroy_at(&args);
}
}
bool Inst::MayHaveSideEffects() const noexcept {
switch (op) {
case Opcode::ConditionRef:
case Opcode::Reference:
case Opcode::PhiMove:
case Opcode::Prologue:
case Opcode::Epilogue:
// case Opcode::Join:
// case Opcode::Barrier:
// case Opcode::WorkgroupMemoryBarrier:
// case Opcode::DeviceMemoryBarrier:
// case Opcode::EmitVertex:
// case Opcode::EndPrimitive:
case Opcode::SetAttribute:
// case Opcode::SetFragColor:
// case Opcode::SetFragDepth:
return true;
default:
return false;
}
}
bool Inst::AreAllArgsImmediates() const {
if (op == Opcode::Phi) {
throw LogicError("Testing for all arguments are immediates on phi instruction");
}
return std::all_of(args.begin(), args.begin() + NumArgs(),
[](const IR::Value& value) { return value.IsImmediate(); });
}
IR::Type Inst::Type() const {
return TypeOf(op);
}
void Inst::SetArg(size_t index, Value value) {
if (index >= NumArgs()) {
throw InvalidArgument("Out of bounds argument index {} in opcode {}", index, op);
}
const IR::Value arg{Arg(index)};
if (!arg.IsImmediate()) {
UndoUse(arg);
}
if (!value.IsImmediate()) {
Use(value);
}
if (op == Opcode::Phi) {
phi_args[index].second = value;
} else {
args[index] = value;
}
}
Block* Inst::PhiBlock(size_t index) const {
if (op != Opcode::Phi) {
throw LogicError("{} is not a Phi instruction", op);
}
if (index >= phi_args.size()) {
throw InvalidArgument("Out of bounds argument index {} in phi instruction");
}
return phi_args[index].first;
}
void Inst::AddPhiOperand(Block* predecessor, const Value& value) {
if (!value.IsImmediate()) {
Use(value);
}
phi_args.emplace_back(predecessor, value);
}
void Inst::Invalidate() {
ClearArgs();
ReplaceOpcode(Opcode::Void);
}
void Inst::ClearArgs() {
if (op == Opcode::Phi) {
for (auto& pair : phi_args) {
IR::Value& value{pair.second};
if (!value.IsImmediate()) {
UndoUse(value);
}
}
phi_args.clear();
} else {
for (auto& value : args) {
if (!value.IsImmediate()) {
UndoUse(value);
}
}
// Reset arguments to null
// std::memset was measured to be faster on MSVC than std::ranges:fill
std::memset(reinterpret_cast<char*>(&args), 0, sizeof(args));
}
}
void Inst::ReplaceUsesWith(Value replacement) {
Invalidate();
ReplaceOpcode(Opcode::Identity);
if (!replacement.IsImmediate()) {
Use(replacement);
}
args[0] = replacement;
}
void Inst::ReplaceOpcode(IR::Opcode opcode) {
if (opcode == IR::Opcode::Phi) {
throw LogicError("Cannot transition into Phi");
}
if (op == Opcode::Phi) {
// Transition out of phi arguments into non-phi
std::destroy_at(&phi_args);
std::construct_at(&args);
}
op = opcode;
}
void Inst::Use(const Value& value) {
Inst* const inst{value.Inst()};
++inst->use_count;
}
void Inst::UndoUse(const Value& value) {
Inst* const inst{value.Inst()};
--inst->use_count;
}
} // namespace Shader::IR

View File

@ -0,0 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/ir/opcodes.h"
namespace Shader::IR {
std::string_view NameOf(Opcode op) {
return Detail::META_TABLE[static_cast<size_t>(op)].name;
}
} // namespace Shader::IR

View File

@ -0,0 +1,107 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <algorithm>
#include <array>
#include <fmt/format.h>
#include "common/types.h"
#include "shader_recompiler/ir/type.h"
namespace Shader::IR {
enum class Opcode {
#define OPCODE(name, ...) name,
#include "opcodes.inc"
#undef OPCODE
};
namespace Detail {
struct OpcodeMeta {
std::string_view name;
Type type;
std::array<Type, 5> arg_types;
};
// using enum Type;
constexpr Type Void{Type::Void};
constexpr Type Opaque{Type::Opaque};
constexpr Type ScalarReg{Type::ScalarReg};
constexpr Type VectorReg{Type::VectorReg};
constexpr Type Attribute{Type::Attribute};
constexpr Type SystemValue{Type::SystemValue};
constexpr Type U1{Type::U1};
constexpr Type U8{Type::U8};
constexpr Type U16{Type::U16};
constexpr Type U32{Type::U32};
constexpr Type U64{Type::U64};
constexpr Type F16{Type::F16};
constexpr Type F32{Type::F32};
constexpr Type F64{Type::F64};
constexpr Type U32x2{Type::U32x2};
constexpr Type U32x3{Type::U32x3};
constexpr Type U32x4{Type::U32x4};
constexpr Type F16x2{Type::F16x2};
constexpr Type F16x3{Type::F16x3};
constexpr Type F16x4{Type::F16x4};
constexpr Type F32x2{Type::F32x2};
constexpr Type F32x3{Type::F32x3};
constexpr Type F32x4{Type::F32x4};
constexpr Type F64x2{Type::F64x2};
constexpr Type F64x3{Type::F64x3};
constexpr Type F64x4{Type::F64x4};
constexpr OpcodeMeta META_TABLE[]{
#define OPCODE(name_token, type_token, ...) \
{ \
.name{#name_token}, \
.type = type_token, \
.arg_types{__VA_ARGS__}, \
},
#include "opcodes.inc"
#undef OPCODE
};
constexpr size_t CalculateNumArgsOf(Opcode op) {
const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
return static_cast<size_t>(
std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void)));
}
constexpr u8 NUM_ARGS[]{
#define OPCODE(name_token, type_token, ...) static_cast<u8>(CalculateNumArgsOf(Opcode::name_token)),
#include "opcodes.inc"
#undef OPCODE
};
} // namespace Detail
/// Get return type of an opcode
[[nodiscard]] inline Type TypeOf(Opcode op) noexcept {
return Detail::META_TABLE[static_cast<size_t>(op)].type;
}
/// Get the number of arguments an opcode accepts
[[nodiscard]] inline size_t NumArgsOf(Opcode op) noexcept {
return static_cast<size_t>(Detail::NUM_ARGS[static_cast<size_t>(op)]);
}
/// Get the required type of an argument of an opcode
[[nodiscard]] inline Type ArgTypeOf(Opcode op, size_t arg_index) noexcept {
return Detail::META_TABLE[static_cast<size_t>(op)].arg_types[arg_index];
}
/// Get the name of an opcode
[[nodiscard]] std::string_view NameOf(Opcode op);
} // namespace Shader::IR
template <>
struct fmt::formatter<Shader::IR::Opcode> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::IR::Opcode& op, FormatContext& ctx) const {
return fmt::format_to(ctx.out(), "{}", Shader::IR::NameOf(op));
}
};

View File

@ -0,0 +1,247 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
// opcode name, return type, arg1 type, arg2 type, arg3 type, arg4 type, arg4 type, ...
OPCODE(Phi, Opaque, )
OPCODE(Identity, Opaque, Opaque, )
OPCODE(Void, Void, )
OPCODE(ConditionRef, U1, U1, )
OPCODE(Reference, Void, Opaque, )
OPCODE(PhiMove, Void, Opaque, Opaque, )
// Special operations
OPCODE(Prologue, Void, )
OPCODE(Epilogue, Void, )
// Constant memory operations
OPCODE(ReadConst, U32, U64, U32, )
OPCODE(ReadConstBuffer, U32, Opaque, U32, U32 )
OPCODE(ReadConstBufferF32, F32, Opaque, U32, U32 )
// Context getters/setters
OPCODE(GetScalarRegister, U32, ScalarReg, )
OPCODE(SetScalarRegister, Void, ScalarReg, U32, )
OPCODE(GetVectorRegister, U32, VectorReg, )
OPCODE(SetVectorRegister, Void, VectorReg, U32, )
OPCODE(GetGotoVariable, U1, U32, )
OPCODE(SetGotoVariable, Void, U32, U1, )
OPCODE(GetAttribute, F32, Attribute, U32, )
OPCODE(GetAttributeU32, U32, Attribute, U32, )
OPCODE(SetAttribute, Void, Attribute, F32, U32, )
// Flags
//OPCODE(GetScc, U1, Void, )
OPCODE(GetVcc, U1, Void, )
//OPCODE(SetScc, Void, U1, )
OPCODE(SetVcc, Void, U1, )
// Undefined
OPCODE(UndefU1, U1, )
OPCODE(UndefU8, U8, )
OPCODE(UndefU16, U16, )
OPCODE(UndefU32, U32, )
OPCODE(UndefU64, U64, )
// Vector utility
OPCODE(CompositeConstructU32x2, U32x2, U32, U32, )
OPCODE(CompositeConstructU32x3, U32x3, U32, U32, U32, )
OPCODE(CompositeConstructU32x4, U32x4, U32, U32, U32, U32, )
OPCODE(CompositeExtractU32x2, U32, U32x2, U32, )
OPCODE(CompositeExtractU32x3, U32, U32x3, U32, )
OPCODE(CompositeExtractU32x4, U32, U32x4, U32, )
OPCODE(CompositeInsertU32x2, U32x2, U32x2, U32, U32, )
OPCODE(CompositeInsertU32x3, U32x3, U32x3, U32, U32, )
OPCODE(CompositeInsertU32x4, U32x4, U32x4, U32, U32, )
OPCODE(CompositeConstructF16x2, F16x2, F16, F16, )
OPCODE(CompositeConstructF16x3, F16x3, F16, F16, F16, )
OPCODE(CompositeConstructF16x4, F16x4, F16, F16, F16, F16, )
OPCODE(CompositeExtractF16x2, F16, F16x2, U32, )
OPCODE(CompositeExtractF16x3, F16, F16x3, U32, )
OPCODE(CompositeExtractF16x4, F16, F16x4, U32, )
OPCODE(CompositeInsertF16x2, F16x2, F16x2, F16, U32, )
OPCODE(CompositeInsertF16x3, F16x3, F16x3, F16, U32, )
OPCODE(CompositeInsertF16x4, F16x4, F16x4, F16, U32, )
OPCODE(CompositeConstructF32x2, F32x2, F32, F32, )
OPCODE(CompositeConstructF32x3, F32x3, F32, F32, F32, )
OPCODE(CompositeConstructF32x4, F32x4, F32, F32, F32, F32, )
OPCODE(CompositeExtractF32x2, F32, F32x2, U32, )
OPCODE(CompositeExtractF32x3, F32, F32x3, U32, )
OPCODE(CompositeExtractF32x4, F32, F32x4, U32, )
OPCODE(CompositeInsertF32x2, F32x2, F32x2, F32, U32, )
OPCODE(CompositeInsertF32x3, F32x3, F32x3, F32, U32, )
OPCODE(CompositeInsertF32x4, F32x4, F32x4, F32, U32, )
OPCODE(CompositeConstructF64x2, F64x2, F64, F64, )
OPCODE(CompositeConstructF64x3, F64x3, F64, F64, F64, )
OPCODE(CompositeConstructF64x4, F64x4, F64, F64, F64, F64, )
OPCODE(CompositeExtractF64x2, F64, F64x2, U32, )
OPCODE(CompositeExtractF64x3, F64, F64x3, U32, )
OPCODE(CompositeExtractF64x4, F64, F64x4, U32, )
OPCODE(CompositeInsertF64x2, F64x2, F64x2, F64, U32, )
OPCODE(CompositeInsertF64x3, F64x3, F64x3, F64, U32, )
OPCODE(CompositeInsertF64x4, F64x4, F64x4, F64, U32, )
// Select operations
OPCODE(SelectU1, U1, U1, U1, U1, )
OPCODE(SelectU8, U8, U1, U8, U8, )
OPCODE(SelectU16, U16, U1, U16, U16, )
OPCODE(SelectU32, U32, U1, U32, U32, )
OPCODE(SelectU64, U64, U1, U64, U64, )
OPCODE(SelectF32, F32, U1, F32, F32, )
OPCODE(SelectF64, F64, U1, F64, F64, )
// Bitwise conversions
OPCODE(BitCastU16F16, U16, F16, )
OPCODE(BitCastU32F32, U32, F32, )
OPCODE(BitCastU64F64, U64, F64, )
OPCODE(BitCastF16U16, F16, U16, )
OPCODE(BitCastF32U32, F32, U32, )
OPCODE(BitCastF64U64, F64, U64, )
OPCODE(PackUint2x32, U64, U32x2, )
OPCODE(UnpackUint2x32, U32x2, U64, )
OPCODE(PackFloat2x16, U32, F16x2, )
OPCODE(UnpackFloat2x16, F16x2, U32, )
OPCODE(PackHalf2x16, U32, F32x2, )
OPCODE(UnpackHalf2x16, F32x2, U32, )
// Floating-point operations
OPCODE(FPAbs32, F32, F32, )
OPCODE(FPAbs64, F64, F64, )
OPCODE(FPAdd32, F32, F32, F32, )
OPCODE(FPAdd64, F64, F64, F64, )
OPCODE(FPFma32, F32, F32, F32, F32, )
OPCODE(FPFma64, F64, F64, F64, F64, )
OPCODE(FPMax32, F32, F32, F32, )
OPCODE(FPMax64, F64, F64, F64, )
OPCODE(FPMin32, F32, F32, F32, )
OPCODE(FPMin64, F64, F64, F64, )
OPCODE(FPMul32, F32, F32, F32, )
OPCODE(FPMul64, F64, F64, F64, )
OPCODE(FPNeg32, F32, F32, )
OPCODE(FPNeg64, F64, F64, )
OPCODE(FPRecip32, F32, F32, )
OPCODE(FPRecip64, F64, F64, )
OPCODE(FPRecipSqrt32, F32, F32, )
OPCODE(FPRecipSqrt64, F64, F64, )
OPCODE(FPSqrt, F32, F32, )
OPCODE(FPSin, F32, F32, )
OPCODE(FPExp2, F32, F32, )
OPCODE(FPCos, F32, F32, )
OPCODE(FPLog2, F32, F32, )
OPCODE(FPSaturate32, F32, F32, )
OPCODE(FPSaturate64, F64, F64, )
OPCODE(FPClamp32, F32, F32, F32, F32, )
OPCODE(FPClamp64, F64, F64, F64, F64, )
OPCODE(FPRoundEven32, F32, F32, )
OPCODE(FPRoundEven64, F64, F64, )
OPCODE(FPFloor32, F32, F32, )
OPCODE(FPFloor64, F64, F64, )
OPCODE(FPCeil32, F32, F32, )
OPCODE(FPCeil64, F64, F64, )
OPCODE(FPTrunc32, F32, F32, )
OPCODE(FPTrunc64, F64, F64, )
OPCODE(FPOrdEqual32, U1, F32, F32, )
OPCODE(FPOrdEqual64, U1, F64, F64, )
OPCODE(FPUnordEqual32, U1, F32, F32, )
OPCODE(FPUnordEqual64, U1, F64, F64, )
OPCODE(FPOrdNotEqual32, U1, F32, F32, )
OPCODE(FPOrdNotEqual64, U1, F64, F64, )
OPCODE(FPUnordNotEqual32, U1, F32, F32, )
OPCODE(FPUnordNotEqual64, U1, F64, F64, )
OPCODE(FPOrdLessThan32, U1, F32, F32, )
OPCODE(FPOrdLessThan64, U1, F64, F64, )
OPCODE(FPUnordLessThan32, U1, F32, F32, )
OPCODE(FPUnordLessThan64, U1, F64, F64, )
OPCODE(FPOrdGreaterThan32, U1, F32, F32, )
OPCODE(FPOrdGreaterThan64, U1, F64, F64, )
OPCODE(FPUnordGreaterThan32, U1, F32, F32, )
OPCODE(FPUnordGreaterThan64, U1, F64, F64, )
OPCODE(FPOrdLessThanEqual32, U1, F32, F32, )
OPCODE(FPOrdLessThanEqual64, U1, F64, F64, )
OPCODE(FPUnordLessThanEqual32, U1, F32, F32, )
OPCODE(FPUnordLessThanEqual64, U1, F64, F64, )
OPCODE(FPOrdGreaterThanEqual32, U1, F32, F32, )
OPCODE(FPOrdGreaterThanEqual64, U1, F64, F64, )
OPCODE(FPUnordGreaterThanEqual32, U1, F32, F32, )
OPCODE(FPUnordGreaterThanEqual64, U1, F64, F64, )
OPCODE(FPIsNan32, U1, F32, )
OPCODE(FPIsNan64, U1, F64, )
// Integer operations
OPCODE(IAdd32, U32, U32, U32, )
OPCODE(IAdd64, U64, U64, U64, )
OPCODE(ISub32, U32, U32, U32, )
OPCODE(ISub64, U64, U64, U64, )
OPCODE(IMul32, U32, U32, U32, )
OPCODE(SDiv32, U32, U32, U32, )
OPCODE(UDiv32, U32, U32, U32, )
OPCODE(INeg32, U32, U32, )
OPCODE(INeg64, U64, U64, )
OPCODE(IAbs32, U32, U32, )
OPCODE(ShiftLeftLogical32, U32, U32, U32, )
OPCODE(ShiftLeftLogical64, U64, U64, U32, )
OPCODE(ShiftRightLogical32, U32, U32, U32, )
OPCODE(ShiftRightLogical64, U64, U64, U32, )
OPCODE(ShiftRightArithmetic32, U32, U32, U32, )
OPCODE(ShiftRightArithmetic64, U64, U64, U32, )
OPCODE(BitwiseAnd32, U32, U32, U32, )
OPCODE(BitwiseOr32, U32, U32, U32, )
OPCODE(BitwiseXor32, U32, U32, U32, )
OPCODE(BitFieldInsert, U32, U32, U32, U32, U32, )
OPCODE(BitFieldSExtract, U32, U32, U32, U32, )
OPCODE(BitFieldUExtract, U32, U32, U32, U32, )
OPCODE(BitReverse32, U32, U32, )
OPCODE(BitCount32, U32, U32, )
OPCODE(BitwiseNot32, U32, U32, )
OPCODE(FindSMsb32, U32, U32, )
OPCODE(FindUMsb32, U32, U32, )
OPCODE(SMin32, U32, U32, U32, )
OPCODE(UMin32, U32, U32, U32, )
OPCODE(SMax32, U32, U32, U32, )
OPCODE(UMax32, U32, U32, U32, )
OPCODE(SClamp32, U32, U32, U32, U32, )
OPCODE(UClamp32, U32, U32, U32, U32, )
OPCODE(SLessThan, U1, U32, U32, )
OPCODE(ULessThan, U1, U32, U32, )
OPCODE(IEqual, U1, U32, U32, )
OPCODE(SLessThanEqual, U1, U32, U32, )
OPCODE(ULessThanEqual, U1, U32, U32, )
OPCODE(SGreaterThan, U1, U32, U32, )
OPCODE(UGreaterThan, U1, U32, U32, )
OPCODE(INotEqual, U1, U32, U32, )
OPCODE(SGreaterThanEqual, U1, U32, U32, )
OPCODE(UGreaterThanEqual, U1, U32, U32, )
// Logical operations
OPCODE(LogicalOr, U1, U1, U1, )
OPCODE(LogicalAnd, U1, U1, U1, )
OPCODE(LogicalXor, U1, U1, U1, )
OPCODE(LogicalNot, U1, U1, )
// Conversion operations
OPCODE(ConvertS32F32, U32, F32, )
OPCODE(ConvertS32F64, U32, F64, )
OPCODE(ConvertU32F32, U32, F32, )
OPCODE(ConvertF16F32, F16, F32, )
OPCODE(ConvertF32F16, F32, F16, )
OPCODE(ConvertF32F64, F32, F64, )
OPCODE(ConvertF64F32, F64, F32, )
OPCODE(ConvertF32S32, F32, U32, )
OPCODE(ConvertF32U32, F32, U32, )
OPCODE(ConvertF64S32, F64, U32, )
OPCODE(ConvertF64U32, F64, U32, )
// Image operations
OPCODE(ImageSampleImplicitLod, F32x4, Opaque, Opaque, Opaque, Opaque, )
OPCODE(ImageSampleExplicitLod, F32x4, Opaque, Opaque, Opaque, Opaque, )
OPCODE(ImageSampleDrefImplicitLod, F32, Opaque, Opaque, F32, Opaque, Opaque, )
OPCODE(ImageSampleDrefExplicitLod, F32, Opaque, Opaque, F32, Opaque, Opaque, )
OPCODE(ImageGather, F32x4, Opaque, Opaque, Opaque, Opaque, )
OPCODE(ImageGatherDref, F32x4, Opaque, Opaque, Opaque, Opaque, F32, )
OPCODE(ImageFetch, F32x4, Opaque, Opaque, Opaque, U32, Opaque, )
OPCODE(ImageQueryDimensions, U32x4, Opaque, U32, U1, )
OPCODE(ImageQueryLod, F32x4, Opaque, Opaque, )
OPCODE(ImageGradient, F32x4, Opaque, Opaque, Opaque, Opaque, Opaque, )
OPCODE(ImageRead, U32x4, Opaque, Opaque, )
OPCODE(ImageWrite, Void, Opaque, Opaque, U32x4, )

View File

@ -0,0 +1,403 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <bit>
#include <optional>
#include <type_traits>
#include "common/func_traits.h"
#include "shader_recompiler/ir/basic_block.h"
namespace Shader::Optimization {
template <typename T>
[[nodiscard]] T Arg(const IR::Value& value) {
if constexpr (std::is_same_v<T, bool>) {
return value.U1();
} else if constexpr (std::is_same_v<T, u32>) {
return value.U32();
} else if constexpr (std::is_same_v<T, s32>) {
return static_cast<s32>(value.U32());
} else if constexpr (std::is_same_v<T, f32>) {
return value.F32();
} else if constexpr (std::is_same_v<T, u64>) {
return value.U64();
}
}
template <typename Func, size_t... I>
IR::Value EvalImmediates(const IR::Inst& inst, Func&& func, std::index_sequence<I...>) {
using Traits = Common::LambdaTraits<decltype(func)>;
return IR::Value{func(Arg<typename Traits::template ArgType<I>>(inst.Arg(I))...)};
}
template <typename T, typename ImmFn>
bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
const IR::Value lhs{inst.Arg(0)};
const IR::Value rhs{inst.Arg(1)};
const bool is_lhs_immediate{lhs.IsImmediate()};
const bool is_rhs_immediate{rhs.IsImmediate()};
if (is_lhs_immediate && is_rhs_immediate) {
const auto result{imm_fn(Arg<T>(lhs), Arg<T>(rhs))};
inst.ReplaceUsesWith(IR::Value{result});
return false;
}
if (is_lhs_immediate && !is_rhs_immediate) {
IR::Inst* const rhs_inst{rhs.InstRecursive()};
if (rhs_inst->GetOpcode() == inst.GetOpcode() && rhs_inst->Arg(1).IsImmediate()) {
const auto combined{imm_fn(Arg<T>(lhs), Arg<T>(rhs_inst->Arg(1)))};
inst.SetArg(0, rhs_inst->Arg(0));
inst.SetArg(1, IR::Value{combined});
} else {
// Normalize
inst.SetArg(0, rhs);
inst.SetArg(1, lhs);
}
}
if (!is_lhs_immediate && is_rhs_immediate) {
const IR::Inst* const lhs_inst{lhs.InstRecursive()};
if (lhs_inst->GetOpcode() == inst.GetOpcode() && lhs_inst->Arg(1).IsImmediate()) {
const auto combined{imm_fn(Arg<T>(rhs), Arg<T>(lhs_inst->Arg(1)))};
inst.SetArg(0, lhs_inst->Arg(0));
inst.SetArg(1, IR::Value{combined});
}
}
return true;
}
template <typename Func>
bool FoldWhenAllImmediates(IR::Inst& inst, Func&& func) {
if (!inst.AreAllArgsImmediates() /*|| inst.HasAssociatedPseudoOperation()*/) {
return false;
}
using Indices = std::make_index_sequence<Common::LambdaTraits<decltype(func)>::NUM_ARGS>;
inst.ReplaceUsesWith(EvalImmediates(inst, func, Indices{}));
return true;
}
template <IR::Opcode op, typename Dest, typename Source>
void FoldBitCast(IR::Inst& inst, IR::Opcode reverse) {
const IR::Value value{inst.Arg(0)};
if (value.IsImmediate()) {
inst.ReplaceUsesWith(IR::Value{std::bit_cast<Dest>(Arg<Source>(value))});
return;
}
IR::Inst* const arg_inst{value.InstRecursive()};
if (arg_inst->GetOpcode() == reverse) {
inst.ReplaceUsesWith(arg_inst->Arg(0));
return;
}
if constexpr (op == IR::Opcode::BitCastF32U32) {
if (arg_inst->GetOpcode() == IR::Opcode::ReadConstBuffer) {
// Replace the bitcast with a typed constant buffer read
inst.ReplaceOpcode(IR::Opcode::ReadConstBufferF32);
inst.SetArg(0, arg_inst->Arg(0));
inst.SetArg(1, arg_inst->Arg(1));
return;
}
}
}
std::optional<IR::Value> FoldCompositeExtractImpl(IR::Value inst_value, IR::Opcode insert,
IR::Opcode construct, u32 first_index) {
IR::Inst* const inst{inst_value.InstRecursive()};
if (inst->GetOpcode() == construct) {
return inst->Arg(first_index);
}
if (inst->GetOpcode() != insert) {
return std::nullopt;
}
IR::Value value_index{inst->Arg(2)};
if (!value_index.IsImmediate()) {
return std::nullopt;
}
const u32 second_index{value_index.U32()};
if (first_index != second_index) {
IR::Value value_composite{inst->Arg(0)};
if (value_composite.IsImmediate()) {
return std::nullopt;
}
return FoldCompositeExtractImpl(value_composite, insert, construct, first_index);
}
return inst->Arg(1);
}
void FoldCompositeExtract(IR::Inst& inst, IR::Opcode construct, IR::Opcode insert) {
const IR::Value value_1{inst.Arg(0)};
const IR::Value value_2{inst.Arg(1)};
if (value_1.IsImmediate()) {
return;
}
if (!value_2.IsImmediate()) {
return;
}
const u32 first_index{value_2.U32()};
const std::optional result{FoldCompositeExtractImpl(value_1, insert, construct, first_index)};
if (!result) {
return;
}
inst.ReplaceUsesWith(*result);
}
void FoldConvert(IR::Inst& inst, IR::Opcode opposite) {
const IR::Value value{inst.Arg(0)};
if (value.IsImmediate()) {
return;
}
IR::Inst* const producer{value.InstRecursive()};
if (producer->GetOpcode() == opposite) {
inst.ReplaceUsesWith(producer->Arg(0));
}
}
void FoldLogicalAnd(IR::Inst& inst) {
if (!FoldCommutative<bool>(inst, [](bool a, bool b) { return a && b; })) {
return;
}
const IR::Value rhs{inst.Arg(1)};
if (rhs.IsImmediate()) {
if (rhs.U1()) {
inst.ReplaceUsesWith(inst.Arg(0));
} else {
inst.ReplaceUsesWith(IR::Value{false});
}
}
}
void FoldSelect(IR::Inst& inst) {
const IR::Value cond{inst.Arg(0)};
if (cond.IsImmediate()) {
inst.ReplaceUsesWith(cond.U1() ? inst.Arg(1) : inst.Arg(2));
}
}
void FoldLogicalOr(IR::Inst& inst) {
if (!FoldCommutative<bool>(inst, [](bool a, bool b) { return a || b; })) {
return;
}
const IR::Value rhs{inst.Arg(1)};
if (rhs.IsImmediate()) {
if (rhs.U1()) {
inst.ReplaceUsesWith(IR::Value{true});
} else {
inst.ReplaceUsesWith(inst.Arg(0));
}
}
}
void FoldLogicalNot(IR::Inst& inst) {
const IR::U1 value{inst.Arg(0)};
if (value.IsImmediate()) {
inst.ReplaceUsesWith(IR::Value{!value.U1()});
return;
}
IR::Inst* const arg{value.InstRecursive()};
if (arg->GetOpcode() == IR::Opcode::LogicalNot) {
inst.ReplaceUsesWith(arg->Arg(0));
}
}
void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
const IR::Value value{inst.Arg(0)};
if (value.IsImmediate()) {
return;
}
IR::Inst* const arg_inst{value.InstRecursive()};
if (arg_inst->GetOpcode() == reverse) {
inst.ReplaceUsesWith(arg_inst->Arg(0));
return;
}
}
template <typename T>
void FoldAdd(IR::Block& block, IR::Inst& inst) {
if (!FoldCommutative<T>(inst, [](T a, T b) { return a + b; })) {
return;
}
const IR::Value rhs{inst.Arg(1)};
if (rhs.IsImmediate() && Arg<T>(rhs) == 0) {
inst.ReplaceUsesWith(inst.Arg(0));
return;
}
}
template <u32 idx>
bool IsArgImm(const IR::Inst& inst, u32 imm) {
const IR::Value& arg = inst.Arg(idx);
return arg.IsImmediate() && arg.U32() == imm;
};
void FoldBooleanConvert(IR::Inst& inst) {
// Eliminate pattern
// %4 = <some bool>
// %5 = SelectU32 %4, #1, #0 (uses: 2)
// %8 = INotEqual %5, #0 (uses: 1)
if (!IsArgImm<1>(inst, 0)) {
return;
}
IR::Inst* prod = inst.Arg(0).TryInstRecursive();
if (!prod || prod->GetOpcode() != IR::Opcode::SelectU32) {
return;
}
if (IsArgImm<1>(*prod, 1) && IsArgImm<2>(*prod, 0)) {
inst.ReplaceUsesWith(prod->Arg(0));
}
}
void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::IAdd32:
return FoldAdd<u32>(block, inst);
case IR::Opcode::IMul32:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a * b; });
return;
case IR::Opcode::ShiftRightArithmetic32:
FoldWhenAllImmediates(inst, [](s32 a, s32 b) { return static_cast<u32>(a >> b); });
return;
case IR::Opcode::BitCastF32U32:
return FoldBitCast<IR::Opcode::BitCastF32U32, f32, u32>(inst, IR::Opcode::BitCastU32F32);
case IR::Opcode::BitCastU32F32:
return FoldBitCast<IR::Opcode::BitCastU32F32, u32, f32>(inst, IR::Opcode::BitCastF32U32);
case IR::Opcode::PackHalf2x16:
return FoldInverseFunc(inst, IR::Opcode::UnpackHalf2x16);
case IR::Opcode::UnpackHalf2x16:
return FoldInverseFunc(inst, IR::Opcode::PackHalf2x16);
case IR::Opcode::PackFloat2x16:
return FoldInverseFunc(inst, IR::Opcode::UnpackFloat2x16);
case IR::Opcode::UnpackFloat2x16:
return FoldInverseFunc(inst, IR::Opcode::PackFloat2x16);
case IR::Opcode::SelectU1:
case IR::Opcode::SelectU8:
case IR::Opcode::SelectU16:
case IR::Opcode::SelectU32:
case IR::Opcode::SelectU64:
case IR::Opcode::SelectF32:
case IR::Opcode::SelectF64:
return FoldSelect(inst);
case IR::Opcode::FPNeg32:
FoldWhenAllImmediates(inst, [](f32 a) { return -a; });
return;
case IR::Opcode::LogicalAnd:
return FoldLogicalAnd(inst);
case IR::Opcode::LogicalOr:
return FoldLogicalOr(inst);
case IR::Opcode::LogicalNot:
return FoldLogicalNot(inst);
case IR::Opcode::SLessThan:
FoldWhenAllImmediates(inst, [](s32 a, s32 b) { return a < b; });
return;
case IR::Opcode::ULessThan:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a < b; });
return;
case IR::Opcode::SLessThanEqual:
FoldWhenAllImmediates(inst, [](s32 a, s32 b) { return a <= b; });
return;
case IR::Opcode::ULessThanEqual:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a <= b; });
return;
case IR::Opcode::SGreaterThan:
FoldWhenAllImmediates(inst, [](s32 a, s32 b) { return a > b; });
return;
case IR::Opcode::UGreaterThan:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a > b; });
return;
case IR::Opcode::SGreaterThanEqual:
FoldWhenAllImmediates(inst, [](s32 a, s32 b) { return a >= b; });
return;
case IR::Opcode::UGreaterThanEqual:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a >= b; });
return;
case IR::Opcode::IEqual:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a == b; });
return;
case IR::Opcode::INotEqual:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a != b; });
FoldBooleanConvert(inst);
return;
case IR::Opcode::BitwiseAnd32:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a & b; });
return;
case IR::Opcode::BitwiseOr32:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a | b; });
return;
case IR::Opcode::BitwiseXor32:
FoldWhenAllImmediates(inst, [](u32 a, u32 b) { return a ^ b; });
return;
case IR::Opcode::BitFieldUExtract:
FoldWhenAllImmediates(inst, [](u32 base, u32 shift, u32 count) {
if (static_cast<size_t>(shift) + static_cast<size_t>(count) > 32) {
throw LogicError("Undefined result in {}({}, {}, {})", IR::Opcode::BitFieldUExtract,
base, shift, count);
}
return (base >> shift) & ((1U << count) - 1);
});
return;
case IR::Opcode::BitFieldSExtract:
FoldWhenAllImmediates(inst, [](s32 base, u32 shift, u32 count) {
const size_t back_shift{static_cast<size_t>(shift) + static_cast<size_t>(count)};
const size_t left_shift{32 - back_shift};
const size_t right_shift{static_cast<size_t>(32 - count)};
if (back_shift > 32 || left_shift >= 32 || right_shift >= 32) {
throw LogicError("Undefined result in {}({}, {}, {})", IR::Opcode::BitFieldSExtract,
base, shift, count);
}
return static_cast<u32>((base << left_shift) >> right_shift);
});
return;
case IR::Opcode::BitFieldInsert:
FoldWhenAllImmediates(inst, [](u32 base, u32 insert, u32 offset, u32 bits) {
if (bits >= 32 || offset >= 32) {
throw LogicError("Undefined result in {}({}, {}, {}, {})",
IR::Opcode::BitFieldInsert, base, insert, offset, bits);
}
return (base & ~(~(~0u << bits) << offset)) | (insert << offset);
});
return;
case IR::Opcode::CompositeExtractU32x2:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructU32x2,
IR::Opcode::CompositeInsertU32x2);
case IR::Opcode::CompositeExtractU32x3:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructU32x3,
IR::Opcode::CompositeInsertU32x3);
case IR::Opcode::CompositeExtractU32x4:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructU32x4,
IR::Opcode::CompositeInsertU32x4);
case IR::Opcode::CompositeExtractF32x2:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF32x2,
IR::Opcode::CompositeInsertF32x2);
case IR::Opcode::CompositeExtractF32x3:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF32x3,
IR::Opcode::CompositeInsertF32x3);
case IR::Opcode::CompositeExtractF32x4:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF32x4,
IR::Opcode::CompositeInsertF32x4);
case IR::Opcode::CompositeExtractF16x2:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF16x2,
IR::Opcode::CompositeInsertF16x2);
case IR::Opcode::CompositeExtractF16x3:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF16x3,
IR::Opcode::CompositeInsertF16x3);
case IR::Opcode::CompositeExtractF16x4:
return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF16x4,
IR::Opcode::CompositeInsertF16x4);
case IR::Opcode::ConvertF32F16:
return FoldConvert(inst, IR::Opcode::ConvertF16F32);
case IR::Opcode::ConvertF16F32:
return FoldConvert(inst, IR::Opcode::ConvertF32F16);
default:
break;
}
}
void ConstantPropagationPass(IR::BlockList& program) {
const auto end{program.rend()};
for (auto it = program.rbegin(); it != end; ++it) {
IR::Block* const block{*it};
for (IR::Inst& inst : block->Instructions()) {
ConstantPropagation(*block, inst);
}
}
}
} // namespace Shader::Optimization

View File

@ -0,0 +1,16 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "shader_recompiler/ir/basic_block.h"
namespace Shader::Optimization {
void SsaRewritePass(IR::BlockList& program);
void IdentityRemovalPass(IR::BlockList& program);
void DeadCodeEliminationPass(IR::BlockList& program);
void ConstantPropagationPass(IR::BlockList& program);
void ResourceTrackingPass(IR::BlockList& program);
} // namespace Shader::Optimization

View File

@ -0,0 +1,131 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <bit>
#include <optional>
#include <boost/container/small_vector.hpp>
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/ir_emitter.h"
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Optimization {
namespace {
struct SharpLocation {
IR::ScalarReg eud_ptr;
u32 index_dwords;
auto operator<=>(const SharpLocation&) const = default;
};
bool IsResourceInstruction(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::ReadConstBuffer:
case IR::Opcode::ReadConstBufferF32:
case IR::Opcode::ImageSampleExplicitLod:
case IR::Opcode::ImageSampleImplicitLod:
case IR::Opcode::ImageSampleDrefExplicitLod:
case IR::Opcode::ImageSampleDrefImplicitLod:
case IR::Opcode::ImageFetch:
case IR::Opcode::ImageGather:
case IR::Opcode::ImageGatherDref:
case IR::Opcode::ImageQueryDimensions:
case IR::Opcode::ImageQueryLod:
case IR::Opcode::ImageGradient:
case IR::Opcode::ImageRead:
case IR::Opcode::ImageWrite:
return true;
default:
return false;
}
}
/*class Descriptors {
public:
explicit Descriptors(TextureDescriptors& texture_descriptors_)
: texture_descriptors{texture_descriptors_} {}
u32 Add(const TextureDescriptor& desc) {
const u32 index{Add(texture_descriptors, desc, [&desc](const auto& existing) {
return desc.type == existing.type && desc.is_depth == existing.is_depth &&
desc.has_secondary == existing.has_secondary &&
desc.cbuf_index == existing.cbuf_index &&
desc.cbuf_offset == existing.cbuf_offset &&
desc.shift_left == existing.shift_left &&
desc.secondary_cbuf_index == existing.secondary_cbuf_index &&
desc.secondary_cbuf_offset == existing.secondary_cbuf_offset &&
desc.secondary_shift_left == existing.secondary_shift_left &&
desc.count == existing.count && desc.size_shift == existing.size_shift;
})};
// TODO: Read this from TIC
texture_descriptors[index].is_multisample |= desc.is_multisample;
return index;
}
private:
template <typename Descriptors, typename Descriptor, typename Func>
static u32 Add(Descriptors& descriptors, const Descriptor& desc, Func&& pred) {
// TODO: Handle arrays
const auto it{std::ranges::find_if(descriptors, pred)};
if (it != descriptors.end()) {
return static_cast<u32>(std::distance(descriptors.begin(), it));
}
descriptors.push_back(desc);
return static_cast<u32>(descriptors.size()) - 1;
}
TextureDescriptors& texture_descriptors;
};*/
} // Anonymous namespace
SharpLocation TrackSharp(const IR::Value& handle) {
IR::Inst* inst = handle.InstRecursive();
if (inst->GetOpcode() == IR::Opcode::GetScalarRegister) {
return SharpLocation{
.eud_ptr = IR::ScalarReg::Max,
.index_dwords = inst->Arg(0).U32(),
};
}
ASSERT_MSG(inst->GetOpcode() == IR::Opcode::ReadConst, "Sharp load not from constant memory");
// Retrieve offset from base.
IR::Inst* addr = inst->Arg(0).InstRecursive();
u32 dword_offset = addr->Arg(1).U32();
addr = addr->Arg(0).InstRecursive();
ASSERT_MSG(addr->Arg(1).IsImmediate(), "Bindless not supported");
dword_offset += addr->Arg(1).U32() >> 2;
// Retrieve SGPR that holds sbase
inst = addr->Arg(0).InstRecursive()->Arg(0).InstRecursive();
ASSERT_MSG(inst->GetOpcode() == IR::Opcode::GetScalarRegister,
"Nested resource loads not supported");
const IR::ScalarReg base = inst->Arg(0).ScalarReg();
// Return retrieved location.
return SharpLocation{
.eud_ptr = base,
.index_dwords = dword_offset,
};
}
void ResourceTrackingPass(IR::BlockList& program) {
for (IR::Block* const block : program) {
for (IR::Inst& inst : block->Instructions()) {
if (!IsResourceInstruction(inst)) {
continue;
}
printf("ff\n");
IR::Inst* producer = inst.Arg(0).InstRecursive();
const auto loc = TrackSharp(producer->Arg(0));
fmt::print("Found resource s[{}:{}] is_eud = {}\n", loc.index_dwords,
loc.index_dwords + 4, loc.eud_ptr != IR::ScalarReg::Max);
}
}
}
} // namespace Shader::Optimization

View File

@ -0,0 +1,408 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
// This file implements the SSA rewriting algorithm proposed in
//
// Simple and Efficient Construction of Static Single Assignment Form.
// Braun M., Buchwald S., Hack S., Leiba R., Mallon C., Zwinkau A. (2013)
// In: Jhala R., De Bosschere K. (eds)
// Compiler Construction. CC 2013.
// Lecture Notes in Computer Science, vol 7791.
// Springer, Berlin, Heidelberg
//
// https://link.springer.com/chapter/10.1007/978-3-642-37051-9_6
//
#include <map>
#include <span>
#include <unordered_map>
#include <variant>
#include <vector>
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/ir_emitter.h"
#include "shader_recompiler/ir/opcodes.h"
#include "shader_recompiler/ir/reg.h"
#include "shader_recompiler/ir/value.h"
namespace Shader::Optimization {
namespace {
struct FlagTag {
auto operator<=>(const FlagTag&) const noexcept = default;
};
struct ZeroFlagTag : FlagTag {};
struct SignFlagTag : FlagTag {};
struct CarryFlagTag : FlagTag {};
struct OverflowFlagTag : FlagTag {};
struct VccFlagTag : FlagTag {};
struct GotoVariable : FlagTag {
GotoVariable() = default;
explicit GotoVariable(u32 index_) : index{index_} {}
auto operator<=>(const GotoVariable&) const noexcept = default;
u32 index;
};
using Variant = std::variant<IR::ScalarReg, IR::VectorReg, ZeroFlagTag, SignFlagTag, CarryFlagTag,
OverflowFlagTag, GotoVariable, VccFlagTag>;
using ValueMap = std::unordered_map<IR::Block*, IR::Value>;
struct DefTable {
const IR::Value& Def(IR::Block* block, IR::ScalarReg variable) {
return block->ssa_sreg_values[RegIndex(variable)];
}
void SetDef(IR::Block* block, IR::ScalarReg variable, const IR::Value& value) {
block->ssa_sreg_values[RegIndex(variable)] = value;
}
const IR::Value& Def(IR::Block* block, IR::VectorReg variable) {
return block->ssa_vreg_values[RegIndex(variable)];
}
void SetDef(IR::Block* block, IR::VectorReg variable, const IR::Value& value) {
block->ssa_vreg_values[RegIndex(variable)] = value;
}
const IR::Value& Def(IR::Block* block, GotoVariable variable) {
return goto_vars[variable.index][block];
}
void SetDef(IR::Block* block, GotoVariable variable, const IR::Value& value) {
goto_vars[variable.index].insert_or_assign(block, value);
}
const IR::Value& Def(IR::Block* block, ZeroFlagTag) {
return zero_flag[block];
}
void SetDef(IR::Block* block, ZeroFlagTag, const IR::Value& value) {
zero_flag.insert_or_assign(block, value);
}
const IR::Value& Def(IR::Block* block, SignFlagTag) {
return sign_flag[block];
}
void SetDef(IR::Block* block, SignFlagTag, const IR::Value& value) {
sign_flag.insert_or_assign(block, value);
}
const IR::Value& Def(IR::Block* block, CarryFlagTag) {
return carry_flag[block];
}
void SetDef(IR::Block* block, CarryFlagTag, const IR::Value& value) {
carry_flag.insert_or_assign(block, value);
}
const IR::Value& Def(IR::Block* block, OverflowFlagTag) {
return overflow_flag[block];
}
void SetDef(IR::Block* block, OverflowFlagTag, const IR::Value& value) {
overflow_flag.insert_or_assign(block, value);
}
const IR::Value& Def(IR::Block* block, VccFlagTag) {
return vcc_flag[block];
}
void SetDef(IR::Block* block, VccFlagTag, const IR::Value& value) {
vcc_flag.insert_or_assign(block, value);
}
std::unordered_map<u32, ValueMap> goto_vars;
ValueMap indirect_branch_var;
ValueMap zero_flag;
ValueMap sign_flag;
ValueMap carry_flag;
ValueMap overflow_flag;
ValueMap vcc_flag;
};
IR::Opcode UndefOpcode(IR::ScalarReg) noexcept {
return IR::Opcode::UndefU32;
}
IR::Opcode UndefOpcode(IR::VectorReg) noexcept {
return IR::Opcode::UndefU32;
}
IR::Opcode UndefOpcode(const FlagTag&) noexcept {
return IR::Opcode::UndefU1;
}
enum class Status {
Start,
SetValue,
PreparePhiArgument,
PushPhiArgument,
};
template <typename Type>
struct ReadState {
ReadState(IR::Block* block_) : block{block_} {}
ReadState() = default;
IR::Block* block{};
IR::Value result{};
IR::Inst* phi{};
IR::Block* const* pred_it{};
IR::Block* const* pred_end{};
Status pc{Status::Start};
};
class Pass {
public:
template <typename Type>
void WriteVariable(Type variable, IR::Block* block, const IR::Value& value) {
current_def.SetDef(block, variable, value);
}
template <typename Type>
IR::Value ReadVariable(Type variable, IR::Block* root_block) {
boost::container::small_vector<ReadState<Type>, 64> stack{
ReadState<Type>(nullptr),
ReadState<Type>(root_block),
};
const auto prepare_phi_operand = [&] {
if (stack.back().pred_it == stack.back().pred_end) {
IR::Inst* const phi{stack.back().phi};
IR::Block* const block{stack.back().block};
const IR::Value result{TryRemoveTrivialPhi(*phi, block, UndefOpcode(variable))};
stack.pop_back();
stack.back().result = result;
WriteVariable(variable, block, result);
} else {
IR::Block* const imm_pred{*stack.back().pred_it};
stack.back().pc = Status::PushPhiArgument;
stack.emplace_back(imm_pred);
}
};
do {
IR::Block* const block{stack.back().block};
switch (stack.back().pc) {
case Status::Start: {
if (const IR::Value& def = current_def.Def(block, variable); !def.IsEmpty()) {
stack.back().result = def;
} else if (!block->IsSsaSealed()) {
// Incomplete CFG
IR::Inst* phi{&*block->PrependNewInst(block->begin(), IR::Opcode::Phi)};
phi->SetFlags(IR::TypeOf(UndefOpcode(variable)));
incomplete_phis[block].insert_or_assign(variable, phi);
stack.back().result = IR::Value{&*phi};
} else if (const std::span imm_preds = block->ImmPredecessors();
imm_preds.size() == 1) {
// Optimize the common case of one predecessor: no phi needed
stack.back().pc = Status::SetValue;
stack.emplace_back(imm_preds.front());
break;
} else {
// Break potential cycles with operandless phi
IR::Inst* const phi{&*block->PrependNewInst(block->begin(), IR::Opcode::Phi)};
phi->SetFlags(IR::TypeOf(UndefOpcode(variable)));
WriteVariable(variable, block, IR::Value{phi});
stack.back().phi = phi;
stack.back().pred_it = imm_preds.data();
stack.back().pred_end = imm_preds.data() + imm_preds.size();
prepare_phi_operand();
break;
}
}
[[fallthrough]];
case Status::SetValue: {
const IR::Value result{stack.back().result};
WriteVariable(variable, block, result);
stack.pop_back();
stack.back().result = result;
break;
}
case Status::PushPhiArgument: {
IR::Inst* const phi{stack.back().phi};
phi->AddPhiOperand(*stack.back().pred_it, stack.back().result);
++stack.back().pred_it;
}
[[fallthrough]];
case Status::PreparePhiArgument:
prepare_phi_operand();
break;
}
} while (stack.size() > 1);
return stack.back().result;
}
void SealBlock(IR::Block* block) {
const auto it{incomplete_phis.find(block)};
if (it != incomplete_phis.end()) {
for (auto& pair : it->second) {
auto& variant{pair.first};
auto& phi{pair.second};
std::visit([&](auto& variable) { AddPhiOperands(variable, *phi, block); }, variant);
}
}
block->SsaSeal();
}
private:
template <typename Type>
IR::Value AddPhiOperands(Type variable, IR::Inst& phi, IR::Block* block) {
for (IR::Block* const imm_pred : block->ImmPredecessors()) {
phi.AddPhiOperand(imm_pred, ReadVariable(variable, imm_pred));
}
return TryRemoveTrivialPhi(phi, block, UndefOpcode(variable));
}
IR::Value TryRemoveTrivialPhi(IR::Inst& phi, IR::Block* block, IR::Opcode undef_opcode) {
IR::Value same;
const size_t num_args{phi.NumArgs()};
for (size_t arg_index = 0; arg_index < num_args; ++arg_index) {
const IR::Value& op{phi.Arg(arg_index)};
if (op.Resolve() == same.Resolve() || op == IR::Value{&phi}) {
// Unique value or self-reference
continue;
}
if (!same.IsEmpty()) {
// The phi merges at least two values: not trivial
return IR::Value{&phi};
}
same = op;
}
// Remove the phi node from the block, it will be reinserted
IR::Block::InstructionList& list{block->Instructions()};
list.erase(IR::Block::InstructionList::s_iterator_to(phi));
// Find the first non-phi instruction and use it as an insertion point
IR::Block::iterator reinsert_point{std::ranges::find_if_not(list, IR::IsPhi)};
if (same.IsEmpty()) {
// The phi is unreachable or in the start block
// Insert an undefined instruction and make it the phi node replacement
// The "phi" node reinsertion point is specified after this instruction
reinsert_point = block->PrependNewInst(reinsert_point, undef_opcode);
same = IR::Value{&*reinsert_point};
++reinsert_point;
}
// Reinsert the phi node and reroute all its uses to the "same" value
list.insert(reinsert_point, phi);
phi.ReplaceUsesWith(same);
// TODO: Try to recursively remove all phi users, which might have become trivial
return same;
}
std::unordered_map<IR::Block*, std::map<Variant, IR::Inst*>> incomplete_phis;
DefTable current_def;
};
void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
const IR::Opcode opcode{inst.GetOpcode()};
switch (opcode) {
case IR::Opcode::SetScalarRegister: {
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
pass.WriteVariable(reg, block, inst.Arg(1));
break;
}
case IR::Opcode::SetVectorRegister: {
const IR::VectorReg reg{inst.Arg(0).VectorReg()};
pass.WriteVariable(reg, block, inst.Arg(1));
break;
}
case IR::Opcode::SetGotoVariable:
pass.WriteVariable(GotoVariable{inst.Arg(0).U32()}, block, inst.Arg(1));
break;
case IR::Opcode::SetVcc:
pass.WriteVariable(VccFlagTag{}, block, inst.Arg(0));
break;
// case IR::Opcode::SetSFlag:
// pass.WriteVariable(SignFlagTag{}, block, inst.Arg(0));
// break;
// case IR::Opcode::SetCFlag:
// pass.WriteVariable(CarryFlagTag{}, block, inst.Arg(0));
// break;
// case IR::Opcode::SetOFlag:
// pass.WriteVariable(OverflowFlagTag{}, block, inst.Arg(0));
// break;
case IR::Opcode::GetScalarRegister: {
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
inst.ReplaceUsesWith(pass.ReadVariable(reg, block));
break;
}
case IR::Opcode::GetVectorRegister: {
const IR::VectorReg reg{inst.Arg(0).VectorReg()};
inst.ReplaceUsesWith(pass.ReadVariable(reg, block));
break;
}
case IR::Opcode::GetGotoVariable:
inst.ReplaceUsesWith(pass.ReadVariable(GotoVariable{inst.Arg(0).U32()}, block));
break;
case IR::Opcode::GetVcc:
inst.ReplaceUsesWith(pass.ReadVariable(VccFlagTag{}, block));
break;
// case IR::Opcode::GetSFlag:
// inst.ReplaceUsesWith(pass.ReadVariable(SignFlagTag{}, block));
// break;
// case IR::Opcode::GetCFlag:
// inst.ReplaceUsesWith(pass.ReadVariable(CarryFlagTag{}, block));
// break;
// case IR::Opcode::GetOFlag:
// inst.ReplaceUsesWith(pass.ReadVariable(OverflowFlagTag{}, block));
// break;
default:
break;
}
}
void VisitBlock(Pass& pass, IR::Block* block) {
for (IR::Inst& inst : block->Instructions()) {
VisitInst(pass, block, inst);
}
pass.SealBlock(block);
}
} // Anonymous namespace
void SsaRewritePass(IR::BlockList& program) {
Pass pass;
const auto end{program.rend()};
for (auto block = program.rbegin(); block != end; ++block) {
VisitBlock(pass, *block);
}
}
void IdentityRemovalPass(IR::BlockList& program) {
std::vector<IR::Inst*> to_invalidate;
for (IR::Block* const block : program) {
for (auto inst = block->begin(); inst != block->end();) {
const size_t num_args{inst->NumArgs()};
for (size_t i = 0; i < num_args; ++i) {
IR::Value arg;
while ((arg = inst->Arg(i)).IsIdentity()) {
inst->SetArg(i, arg.Inst()->Arg(0));
}
}
if (inst->GetOpcode() == IR::Opcode::Identity ||
inst->GetOpcode() == IR::Opcode::Void) {
to_invalidate.push_back(&*inst);
inst = block->Instructions().erase(inst);
} else {
++inst;
}
}
}
for (IR::Inst* const inst : to_invalidate) {
inst->Invalidate();
}
}
void DeadCodeEliminationPass(IR::BlockList& program) {
// We iterate over the instructions in reverse order.
// This is because removing an instruction reduces the number of uses for earlier instructions.
for (IR::Block* const block : program) {
auto it{block->end()};
while (it != block->begin()) {
--it;
if (!it->HasUses() && !it->MayHaveSideEffects()) {
it->Invalidate();
it = block->Instructions().erase(it);
}
}
}
}
} // namespace Shader::Optimization

View File

@ -0,0 +1,42 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <boost/container/flat_set.hpp>
#include <boost/container/small_vector.hpp>
#include "shader_recompiler/ir/post_order.h"
namespace Shader::IR {
BlockList PostOrder(const AbstractSyntaxNode& root) {
boost::container::small_vector<Block*, 16> block_stack;
boost::container::flat_set<Block*> visited;
BlockList post_order_blocks;
if (root.type != AbstractSyntaxNode::Type::Block) {
throw LogicError("First node in abstract syntax list root is not a block");
}
Block* const first_block{root.data.block};
visited.insert(first_block);
block_stack.push_back(first_block);
while (!block_stack.empty()) {
Block* const block = block_stack.back();
const auto visit = [&](Block* branch) {
if (!visited.insert(branch).second) {
return false;
}
// Calling push_back twice is faster than insert on MSVC
block_stack.push_back(block);
block_stack.push_back(branch);
return true;
};
block_stack.pop_back();
if (std::ranges::none_of(block->ImmSuccessors(), visit)) {
post_order_blocks.push_back(block);
}
}
return post_order_blocks;
}
} // namespace Shader::IR

View File

@ -0,0 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "shader_recompiler/ir/abstract_syntax_list.h"
#include "shader_recompiler/ir/basic_block.h"
namespace Shader::IR {
BlockList PostOrder(const AbstractSyntaxNode& root);
} // namespace Shader::IR

View File

@ -0,0 +1,31 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <map>
#include <string>
#include <fmt/format.h>
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/ir/value.h"
namespace Shader::IR {
std::string DumpProgram(const Program& program) {
size_t index{0};
std::map<const IR::Inst*, size_t> inst_to_index;
std::map<const IR::Block*, size_t> block_to_index;
for (const IR::Block* const block : program.blocks) {
block_to_index.emplace(block, index);
++index;
}
std::string ret;
for (const auto& block : program.blocks) {
ret += IR::DumpBlock(*block, block_to_index, inst_to_index, index) + '\n';
}
return ret;
}
} // namespace Shader::IR

View File

@ -0,0 +1,28 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "shader_recompiler/frontend/instruction.h"
#include "shader_recompiler/ir/abstract_syntax_list.h"
#include "shader_recompiler/ir/basic_block.h"
namespace Shader {
enum class Stage : u32;
}
namespace Shader::IR {
struct Program {
AbstractSyntaxList syntax_list;
BlockList blocks;
BlockList post_order_blocks;
std::vector<Gcn::GcnInst> ins_list;
Stage stage;
};
[[nodiscard]] std::string DumpProgram(const Program& program);
} // namespace Shader::IR

View File

@ -0,0 +1,471 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/bit_field.h"
#include "common/types.h"
#include "shader_recompiler/exception.h"
namespace Shader::IR {
enum class FpRoundMode : u32 {
NearestEven = 0,
PlusInf = 1,
MinInf = 2,
ToZero = 3,
};
enum class FpDenormMode : u32 {
InOutFlush = 0,
InAllowOutFlush = 1,
InFlushOutAllow = 2,
InOutAllow = 3,
};
union Mode {
BitField<0, 4, FpRoundMode> fp_round;
BitField<4, 2, FpDenormMode> fp_denorm_single;
BitField<6, 2, FpDenormMode> fp_denorm_double;
BitField<8, 1, u32> dx10_clamp;
};
union TextureInstInfo {
u32 raw;
BitField<0, 16, u32> descriptor_index;
BitField<19, 1, u32> is_depth;
BitField<20, 1, u32> has_bias;
BitField<21, 1, u32> has_lod_clamp;
BitField<22, 1, u32> relaxed_precision;
BitField<23, 2, u32> gather_component;
BitField<25, 2, u32> num_derivatives;
};
enum class ScalarReg : u32 {
S0,
S1,
S2,
S3,
S4,
S5,
S6,
S7,
S8,
S9,
S10,
S11,
S12,
S13,
S14,
S15,
S16,
S17,
S18,
S19,
S20,
S21,
S22,
S23,
S24,
S25,
S26,
S27,
S28,
S29,
S30,
S31,
S32,
S33,
S34,
S35,
S36,
S37,
S38,
S39,
S40,
S41,
S42,
S43,
S44,
S45,
S46,
S47,
S48,
S49,
S50,
S51,
S52,
S53,
S54,
S55,
S56,
S57,
S58,
S59,
S60,
S61,
S62,
S63,
S64,
S65,
S66,
S67,
S68,
S69,
S70,
S71,
S72,
S73,
S74,
S75,
S76,
S77,
S78,
S79,
S80,
S81,
S82,
S83,
S84,
S85,
S86,
S87,
S88,
S89,
S90,
S91,
S92,
S93,
S94,
S95,
S96,
S97,
S98,
S99,
S100,
S101,
S102,
S103,
Max,
};
static constexpr size_t NumScalarRegs = static_cast<size_t>(ScalarReg::Max);
enum class VectorReg : u32 {
V0,
V1,
V2,
V3,
V4,
V5,
V6,
V7,
V8,
V9,
V10,
V11,
V12,
V13,
V14,
V15,
V16,
V17,
V18,
V19,
V20,
V21,
V22,
V23,
V24,
V25,
V26,
V27,
V28,
V29,
V30,
V31,
V32,
V33,
V34,
V35,
V36,
V37,
V38,
V39,
V40,
V41,
V42,
V43,
V44,
V45,
V46,
V47,
V48,
V49,
V50,
V51,
V52,
V53,
V54,
V55,
V56,
V57,
V58,
V59,
V60,
V61,
V62,
V63,
V64,
V65,
V66,
V67,
V68,
V69,
V70,
V71,
V72,
V73,
V74,
V75,
V76,
V77,
V78,
V79,
V80,
V81,
V82,
V83,
V84,
V85,
V86,
V87,
V88,
V89,
V90,
V91,
V92,
V93,
V94,
V95,
V96,
V97,
V98,
V99,
V100,
V101,
V102,
V103,
V104,
V105,
V106,
V107,
V108,
V109,
V110,
V111,
V112,
V113,
V114,
V115,
V116,
V117,
V118,
V119,
V120,
V121,
V122,
V123,
V124,
V125,
V126,
V127,
V128,
V129,
V130,
V131,
V132,
V133,
V134,
V135,
V136,
V137,
V138,
V139,
V140,
V141,
V142,
V143,
V144,
V145,
V146,
V147,
V148,
V149,
V150,
V151,
V152,
V153,
V154,
V155,
V156,
V157,
V158,
V159,
V160,
V161,
V162,
V163,
V164,
V165,
V166,
V167,
V168,
V169,
V170,
V171,
V172,
V173,
V174,
V175,
V176,
V177,
V178,
V179,
V180,
V181,
V182,
V183,
V184,
V185,
V186,
V187,
V188,
V189,
V190,
V191,
V192,
V193,
V194,
V195,
V196,
V197,
V198,
V199,
V200,
V201,
V202,
V203,
V204,
V205,
V206,
V207,
V208,
V209,
V210,
V211,
V212,
V213,
V214,
V215,
V216,
V217,
V218,
V219,
V220,
V221,
V222,
V223,
V224,
V225,
V226,
V227,
V228,
V229,
V230,
V231,
V232,
V233,
V234,
V235,
V236,
V237,
V238,
V239,
V240,
V241,
V242,
V243,
V244,
V245,
V246,
V247,
V248,
V249,
V250,
V251,
V252,
V253,
V254,
V255,
Max,
};
static constexpr size_t NumVectorRegs = static_cast<size_t>(VectorReg::Max);
template <class T>
concept RegT = std::is_same_v<T, ScalarReg> || std::is_same_v<T, VectorReg>;
template <RegT Reg>
[[nodiscard]] constexpr Reg operator+(Reg reg, int num) {
const int result{static_cast<int>(reg) + num};
if (result >= static_cast<int>(Reg::Max)) {
throw LogicError("Overflow on register arithmetic");
}
if (result < 0) {
throw LogicError("Underflow on register arithmetic");
}
return static_cast<Reg>(result);
}
template <RegT Reg>
[[nodiscard]] constexpr Reg operator-(Reg reg, int num) {
return reg + (-num);
}
template <RegT Reg>
constexpr Reg operator++(Reg& reg) {
reg = reg + 1;
return reg;
}
template <RegT Reg>
constexpr Reg operator++(Reg& reg, int) {
const Reg copy{reg};
reg = reg + 1;
return copy;
}
template <RegT Reg>
[[nodiscard]] constexpr size_t RegIndex(Reg reg) noexcept {
return static_cast<size_t>(reg);
}
} // namespace Shader::IR
template <>
struct fmt::formatter<Shader::IR::ScalarReg> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
auto format(Shader::IR::ScalarReg reg, format_context& ctx) const {
return fmt::format_to(ctx.out(), "SGPR{}", static_cast<u32>(reg));
}
};
template <>
struct fmt::formatter<Shader::IR::VectorReg> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
auto format(Shader::IR::VectorReg reg, format_context& ctx) const {
return fmt::format_to(ctx.out(), "VGPR{}", static_cast<u32>(reg));
}
};

View File

@ -0,0 +1,36 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <string>
#include "shader_recompiler/ir/type.h"
namespace Shader::IR {
std::string NameOf(Type type) {
static constexpr std::array names{
"Opaque", "Label", "Reg", "Pred", "Attribute", "U1", "U8", "U16", "U32",
"U64", "F16", "F32", "F64", "U32x2", "U32x3", "U32x4", "F16x2", "F16x3",
"F16x4", "F32x2", "F32x3", "F32x4", "F64x2", "F64x3", "F64x4",
};
const size_t bits{static_cast<size_t>(type)};
if (bits == 0) {
return "Void";
}
std::string result;
for (size_t i = 0; i < names.size(); i++) {
if ((bits & (size_t{1} << i)) != 0) {
if (!result.empty()) {
result += '|';
}
result += names[i];
}
}
return result;
}
bool AreTypesCompatible(Type lhs, Type rhs) noexcept {
return lhs == rhs || lhs == Type::Opaque || rhs == Type::Opaque;
}
} // namespace Shader::IR

View File

@ -0,0 +1,56 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include <fmt/format.h>
#include "common/enum.h"
namespace Shader::IR {
enum class Type {
Void = 0,
Opaque = 1 << 0,
ScalarReg = 1 << 1,
VectorReg = 1 << 2,
Attribute = 1 << 3,
SystemValue = 1 << 4,
U1 = 1 << 5,
U8 = 1 << 6,
U16 = 1 << 7,
U32 = 1 << 8,
U64 = 1 << 9,
F16 = 1 << 10,
F32 = 1 << 11,
F64 = 1 << 12,
U32x2 = 1 << 13,
U32x3 = 1 << 14,
U32x4 = 1 << 15,
F16x2 = 1 << 16,
F16x3 = 1 << 17,
F16x4 = 1 << 18,
F32x2 = 1 << 19,
F32x3 = 1 << 20,
F32x4 = 1 << 21,
F64x2 = 1 << 22,
F64x3 = 1 << 23,
F64x4 = 1 << 24,
};
DECLARE_ENUM_FLAG_OPERATORS(Type)
[[nodiscard]] std::string NameOf(Type type);
[[nodiscard]] bool AreTypesCompatible(Type lhs, Type rhs) noexcept;
} // namespace Shader::IR
template <>
struct fmt::formatter<Shader::IR::Type> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
auto format(Shader::IR::Type type, format_context& ctx) const {
return fmt::format_to(ctx.out(), "{}", Shader::IR::NameOf(type));
}
};

View File

@ -0,0 +1,93 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/ir/value.h"
namespace Shader::IR {
Value::Value(IR::Inst* value) noexcept : type{Type::Opaque}, inst{value} {}
Value::Value(IR::ScalarReg reg) noexcept : type{Type::ScalarReg}, sreg{reg} {}
Value::Value(IR::VectorReg reg) noexcept : type{Type::VectorReg}, vreg{reg} {}
Value::Value(IR::Attribute value) noexcept : type{Type::Attribute}, attribute{value} {}
Value::Value(bool value) noexcept : type{Type::U1}, imm_u1{value} {}
Value::Value(u8 value) noexcept : type{Type::U8}, imm_u8{value} {}
Value::Value(u16 value) noexcept : type{Type::U16}, imm_u16{value} {}
Value::Value(u32 value) noexcept : type{Type::U32}, imm_u32{value} {}
Value::Value(f32 value) noexcept : type{Type::F32}, imm_f32{value} {}
Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {}
Value::Value(f64 value) noexcept : type{Type::F64}, imm_f64{value} {}
IR::Type Value::Type() const noexcept {
if (IsPhi()) {
// The type of a phi node is stored in its flags
return inst->Flags<IR::Type>();
}
if (IsIdentity()) {
return inst->Arg(0).Type();
}
if (type == Type::Opaque) {
return inst->Type();
}
return type;
}
bool Value::operator==(const Value& other) const {
if (type != other.type) {
return false;
}
switch (type) {
case Type::Void:
return true;
case Type::Opaque:
return inst == other.inst;
case Type::ScalarReg:
return sreg == other.sreg;
case Type::VectorReg:
return vreg == other.vreg;
case Type::Attribute:
return attribute == other.attribute;
case Type::U1:
return imm_u1 == other.imm_u1;
case Type::U8:
return imm_u8 == other.imm_u8;
case Type::U16:
case Type::F16:
return imm_u16 == other.imm_u16;
case Type::U32:
case Type::F32:
return imm_u32 == other.imm_u32;
case Type::U64:
case Type::F64:
return imm_u64 == other.imm_u64;
case Type::U32x2:
case Type::U32x3:
case Type::U32x4:
case Type::F16x2:
case Type::F16x3:
case Type::F16x4:
case Type::F32x2:
case Type::F32x3:
case Type::F32x4:
case Type::F64x2:
case Type::F64x3:
case Type::F64x4:
break;
}
throw LogicError("Invalid type {}", type);
}
bool Value::operator!=(const Value& other) const {
return !operator==(other);
}
} // namespace Shader::IR

View File

@ -0,0 +1,353 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <bit>
#include <cstring>
#include <type_traits>
#include <utility>
#include <boost/container/small_vector.hpp>
#include <boost/intrusive/list.hpp>
#include "common/assert.h"
#include "shader_recompiler/exception.h"
#include "shader_recompiler/ir/attribute.h"
#include "shader_recompiler/ir/opcodes.h"
#include "shader_recompiler/ir/reg.h"
#include "shader_recompiler/ir/type.h"
namespace Shader::IR {
class Block;
class Inst;
struct AssociatedInsts;
class Value {
public:
Value() noexcept = default;
explicit Value(IR::Inst* value) noexcept;
explicit Value(IR::ScalarReg reg) noexcept;
explicit Value(IR::VectorReg reg) noexcept;
explicit Value(IR::Attribute value) noexcept;
explicit Value(bool value) noexcept;
explicit Value(u8 value) noexcept;
explicit Value(u16 value) noexcept;
explicit Value(u32 value) noexcept;
explicit Value(f32 value) noexcept;
explicit Value(u64 value) noexcept;
explicit Value(f64 value) noexcept;
[[nodiscard]] bool IsIdentity() const noexcept;
[[nodiscard]] bool IsPhi() const noexcept;
[[nodiscard]] bool IsEmpty() const noexcept;
[[nodiscard]] bool IsImmediate() const noexcept;
[[nodiscard]] IR::Type Type() const noexcept;
[[nodiscard]] IR::Inst* Inst() const;
[[nodiscard]] IR::Inst* InstRecursive() const;
[[nodiscard]] IR::Inst* TryInstRecursive() const;
[[nodiscard]] IR::Value Resolve() const;
[[nodiscard]] IR::ScalarReg ScalarReg() const;
[[nodiscard]] IR::VectorReg VectorReg() const;
[[nodiscard]] IR::Attribute Attribute() const;
[[nodiscard]] bool U1() const;
[[nodiscard]] u8 U8() const;
[[nodiscard]] u16 U16() const;
[[nodiscard]] u32 U32() const;
[[nodiscard]] f32 F32() const;
[[nodiscard]] u64 U64() const;
[[nodiscard]] f64 F64() const;
[[nodiscard]] bool operator==(const Value& other) const;
[[nodiscard]] bool operator!=(const Value& other) const;
private:
IR::Type type{};
union {
IR::Inst* inst{};
IR::ScalarReg sreg;
IR::VectorReg vreg;
IR::Attribute attribute;
bool imm_u1;
u8 imm_u8;
u16 imm_u16;
u32 imm_u32;
f32 imm_f32;
u64 imm_u64;
f64 imm_f64;
};
};
static_assert(static_cast<u32>(IR::Type::Void) == 0, "memset relies on IR::Type being zero");
static_assert(std::is_trivially_copyable_v<Value>);
template <IR::Type type_>
class TypedValue : public Value {
public:
TypedValue() = default;
template <IR::Type other_type>
requires((other_type & type_) != IR::Type::Void)
explicit(false) TypedValue(const TypedValue<other_type>& value) : Value(value) {}
explicit TypedValue(const Value& value) : Value(value) {
if ((value.Type() & type_) == IR::Type::Void) {
throw InvalidArgument("Incompatible types {} and {}", type_, value.Type());
}
}
explicit TypedValue(IR::Inst* inst_) : TypedValue(Value(inst_)) {}
};
class Inst : public boost::intrusive::list_base_hook<> {
public:
explicit Inst(IR::Opcode op_, u32 flags_) noexcept;
explicit Inst(const Inst& base);
~Inst();
Inst& operator=(const Inst&) = delete;
Inst& operator=(Inst&&) = delete;
Inst(Inst&&) = delete;
/// Get the number of uses this instruction has.
[[nodiscard]] int UseCount() const noexcept {
return use_count;
}
/// Determines whether this instruction has uses or not.
[[nodiscard]] bool HasUses() const noexcept {
return use_count > 0;
}
/// Get the opcode this microinstruction represents.
[[nodiscard]] IR::Opcode GetOpcode() const noexcept {
return op;
}
/// Determines whether or not this instruction may have side effects.
[[nodiscard]] bool MayHaveSideEffects() const noexcept;
/// Determines if all arguments of this instruction are immediates.
[[nodiscard]] bool AreAllArgsImmediates() const;
/// Get the type this instruction returns.
[[nodiscard]] IR::Type Type() const;
/// Get the number of arguments this instruction has.
[[nodiscard]] size_t NumArgs() const {
return op == IR::Opcode::Phi ? phi_args.size() : NumArgsOf(op);
}
/// Get the value of a given argument index.
[[nodiscard]] Value Arg(size_t index) const noexcept {
if (op == IR::Opcode::Phi) {
return phi_args[index].second;
} else {
return args[index];
}
}
/// Set the value of a given argument index.
void SetArg(size_t index, Value value);
/// Get a pointer to the block of a phi argument.
[[nodiscard]] Block* PhiBlock(size_t index) const;
/// Add phi operand to a phi instruction.
void AddPhiOperand(Block* predecessor, const Value& value);
void Invalidate();
void ClearArgs();
void ReplaceUsesWith(Value replacement);
void ReplaceOpcode(IR::Opcode opcode);
template <typename FlagsType>
requires(sizeof(FlagsType) <= sizeof(u32) && std::is_trivially_copyable_v<FlagsType>)
[[nodiscard]] FlagsType Flags() const noexcept {
FlagsType ret;
std::memcpy(reinterpret_cast<char*>(&ret), &flags, sizeof(ret));
return ret;
}
template <typename FlagsType>
requires(sizeof(FlagsType) <= sizeof(u32) && std::is_trivially_copyable_v<FlagsType>)
void SetFlags(FlagsType value) noexcept {
std::memcpy(&flags, &value, sizeof(value));
}
/// Intrusively store the host definition of this instruction.
template <typename DefinitionType>
void SetDefinition(DefinitionType def) {
definition = std::bit_cast<u32>(def);
}
/// Return the intrusively stored host definition of this instruction.
template <typename DefinitionType>
[[nodiscard]] DefinitionType Definition() const noexcept {
return std::bit_cast<DefinitionType>(definition);
}
private:
struct NonTriviallyDummy {
NonTriviallyDummy() noexcept {}
};
void Use(const Value& value);
void UndoUse(const Value& value);
IR::Opcode op{};
int use_count{};
u32 flags{};
u32 definition{};
union {
NonTriviallyDummy dummy{};
boost::container::small_vector<std::pair<Block*, Value>, 2> phi_args;
std::array<Value, 5> args;
};
};
static_assert(sizeof(Inst) <= 128, "Inst size unintentionally increased");
using U1 = TypedValue<Type::U1>;
using U8 = TypedValue<Type::U8>;
using U16 = TypedValue<Type::U16>;
using U32 = TypedValue<Type::U32>;
using U64 = TypedValue<Type::U64>;
using F16 = TypedValue<Type::F16>;
using F32 = TypedValue<Type::F32>;
using F64 = TypedValue<Type::F64>;
using U32F32 = TypedValue<Type::U32 | Type::F32>;
using U32U64 = TypedValue<Type::U32 | Type::U64>;
using F32F64 = TypedValue<Type::F32 | Type::F64>;
using F16F32F64 = TypedValue<Type::F16 | Type::F32 | Type::F64>;
using UAny = TypedValue<Type::U8 | Type::U16 | Type::U32 | Type::U64>;
inline bool Value::IsIdentity() const noexcept {
return type == Type::Opaque && inst->GetOpcode() == Opcode::Identity;
}
inline bool Value::IsPhi() const noexcept {
return type == Type::Opaque && inst->GetOpcode() == Opcode::Phi;
}
inline bool Value::IsEmpty() const noexcept {
return type == Type::Void;
}
inline bool Value::IsImmediate() const noexcept {
IR::Type current_type{type};
const IR::Inst* current_inst{inst};
while (current_type == Type::Opaque && current_inst->GetOpcode() == Opcode::Identity) {
const Value& arg{current_inst->Arg(0)};
current_type = arg.type;
current_inst = arg.inst;
}
return current_type != Type::Opaque;
}
inline IR::Inst* Value::Inst() const {
DEBUG_ASSERT(type == Type::Opaque);
return inst;
}
inline IR::Inst* Value::InstRecursive() const {
DEBUG_ASSERT(type == Type::Opaque);
if (IsIdentity()) {
return inst->Arg(0).InstRecursive();
}
return inst;
}
inline IR::Inst* Value::TryInstRecursive() const {
if (IsIdentity()) {
return inst->Arg(0).TryInstRecursive();
}
return type == Type::Opaque ? inst : nullptr;
}
inline IR::Value Value::Resolve() const {
if (IsIdentity()) {
return inst->Arg(0).Resolve();
}
return *this;
}
inline IR::ScalarReg Value::ScalarReg() const {
DEBUG_ASSERT(type == Type::ScalarReg);
return sreg;
}
inline IR::VectorReg Value::VectorReg() const {
DEBUG_ASSERT(type == Type::VectorReg);
return vreg;
}
inline IR::Attribute Value::Attribute() const {
DEBUG_ASSERT(type == Type::Attribute);
return attribute;
}
inline bool Value::U1() const {
if (IsIdentity()) {
return inst->Arg(0).U1();
}
DEBUG_ASSERT(type == Type::U1);
return imm_u1;
}
inline u8 Value::U8() const {
if (IsIdentity()) {
return inst->Arg(0).U8();
}
DEBUG_ASSERT(type == Type::U8);
return imm_u8;
}
inline u16 Value::U16() const {
if (IsIdentity()) {
return inst->Arg(0).U16();
}
DEBUG_ASSERT(type == Type::U16);
return imm_u16;
}
inline u32 Value::U32() const {
if (IsIdentity()) {
return inst->Arg(0).U32();
}
DEBUG_ASSERT(type == Type::U32);
return imm_u32;
}
inline f32 Value::F32() const {
if (IsIdentity()) {
return inst->Arg(0).F32();
}
DEBUG_ASSERT(type == Type::F32);
return imm_f32;
}
inline u64 Value::U64() const {
if (IsIdentity()) {
return inst->Arg(0).U64();
}
DEBUG_ASSERT(type == Type::U64);
return imm_u64;
}
inline f64 Value::F64() const {
if (IsIdentity()) {
return inst->Arg(0).F64();
}
DEBUG_ASSERT(type == Type::F64);
return imm_f64;
}
[[nodiscard]] inline bool IsPhi(const Inst& inst) {
return inst.GetOpcode() == Opcode::Phi;
}
} // namespace Shader::IR

View File

@ -0,0 +1,107 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
namespace Shader {
template <typename T>
requires std::is_destructible_v<T>
class ObjectPool {
public:
explicit ObjectPool(size_t chunk_size = 8192) : new_chunk_size{chunk_size} {
node = &chunks.emplace_back(new_chunk_size);
}
template <typename... Args>
requires std::is_constructible_v<T, Args...>
[[nodiscard]] T* Create(Args&&... args) {
return std::construct_at(Memory(), std::forward<Args>(args)...);
}
void ReleaseContents() {
if (chunks.empty()) {
return;
}
Chunk& root{chunks.front()};
if (root.used_objects == root.num_objects) {
// Root chunk has been filled, squash allocations into it
const size_t total_objects{root.num_objects + new_chunk_size * (chunks.size() - 1)};
chunks.clear();
chunks.emplace_back(total_objects);
} else {
root.Release();
chunks.resize(1);
}
chunks.shrink_to_fit();
node = &chunks.front();
}
private:
struct NonTrivialDummy {
NonTrivialDummy() noexcept {}
};
union Storage {
Storage() noexcept {}
~Storage() noexcept {}
NonTrivialDummy dummy{};
T object;
};
struct Chunk {
explicit Chunk() = default;
explicit Chunk(size_t size)
: num_objects{size}, storage{std::make_unique<Storage[]>(size)} {}
Chunk& operator=(Chunk&& rhs) noexcept {
Release();
used_objects = std::exchange(rhs.used_objects, 0);
num_objects = std::exchange(rhs.num_objects, 0);
storage = std::move(rhs.storage);
return *this;
}
Chunk(Chunk&& rhs) noexcept
: used_objects{std::exchange(rhs.used_objects, 0)},
num_objects{std::exchange(rhs.num_objects, 0)}, storage{std::move(rhs.storage)} {}
~Chunk() {
Release();
}
void Release() {
std::destroy_n(storage.get(), used_objects);
used_objects = 0;
}
size_t used_objects{};
size_t num_objects{};
std::unique_ptr<Storage[]> storage;
};
[[nodiscard]] T* Memory() {
Chunk* const chunk{FreeChunk()};
return &chunk->storage[chunk->used_objects++].object;
}
[[nodiscard]] Chunk* FreeChunk() {
if (node->used_objects != node->num_objects) {
return node;
}
node = &chunks.emplace_back(new_chunk_size);
return node;
}
Chunk* node{};
std::vector<Chunk> chunks;
size_t new_chunk_size{};
};
} // namespace Shader

View File

@ -0,0 +1,69 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/types.h"
namespace Shader {
struct Profile {
u32 supported_spirv{0x00010000};
bool unified_descriptor_binding{};
bool support_descriptor_aliasing{};
bool support_int8{};
bool support_int16{};
bool support_int64{};
bool support_vertex_instance_id{};
bool support_float_controls{};
bool support_separate_denorm_behavior{};
bool support_separate_rounding_mode{};
bool support_fp16_denorm_preserve{};
bool support_fp32_denorm_preserve{};
bool support_fp16_denorm_flush{};
bool support_fp32_denorm_flush{};
bool support_fp16_signed_zero_nan_preserve{};
bool support_fp32_signed_zero_nan_preserve{};
bool support_fp64_signed_zero_nan_preserve{};
bool support_explicit_workgroup_layout{};
bool support_vote{};
bool support_viewport_mask{};
bool support_typeless_image_loads{};
bool support_derivative_control{};
bool support_geometry_shader_passthrough{};
bool support_native_ndc{};
bool support_scaled_attributes{};
bool support_multi_viewport{};
bool support_geometry_streams{};
bool warp_size_potentially_larger_than_guest{};
bool lower_left_origin_mode{};
/// Fragment outputs have to be declared even if they are not written to avoid undefined values.
/// See Ori and the Blind Forest's main menu for reference.
bool need_declared_frag_colors{};
/// Prevents fast math optimizations that may cause inaccuracies
bool need_fastmath_off{};
/// OpFClamp is broken and OpFMax + OpFMin should be used instead
bool has_broken_spirv_clamp{};
/// The Position builtin needs to be wrapped in a struct when used as an input
bool has_broken_spirv_position_input{};
/// Offset image operands with an unsigned type do not work
bool has_broken_unsigned_image_offsets{};
/// Signed instructions with unsigned data types are misinterpreted
bool has_broken_signed_operations{};
/// Float controls break when fp16 is enabled
bool has_broken_fp16_float_controls{};
/// Ignores SPIR-V ordered vs unordered using GLSL semantics
bool ignore_nan_fp_comparisons{};
/// Maxwell and earlier nVidia architectures have broken robust support
bool has_broken_robust{};
u64 min_ssbo_alignment{};
u32 max_user_clip_distances{};
};
} // namespace Shader

View File

@ -0,0 +1,72 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <fstream>
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/frontend/control_flow_graph.h"
#include "shader_recompiler/frontend/decode.h"
#include "shader_recompiler/frontend/structured_control_flow.h"
#include "shader_recompiler/ir/passes/passes.h"
#include "shader_recompiler/ir/post_order.h"
#include "shader_recompiler/recompiler.h"
namespace Shader {
IR::BlockList GenerateBlocks(const IR::AbstractSyntaxList& syntax_list) {
size_t num_syntax_blocks{};
for (const auto& node : syntax_list) {
if (node.type == IR::AbstractSyntaxNode::Type::Block) {
++num_syntax_blocks;
}
}
IR::BlockList blocks;
blocks.reserve(num_syntax_blocks);
u32 order_index{};
for (const auto& node : syntax_list) {
if (node.type == IR::AbstractSyntaxNode::Type::Block) {
blocks.push_back(node.data.block);
}
}
return blocks;
}
std::vector<u32> TranslateProgram(ObjectPool<IR::Inst>& inst_pool,
ObjectPool<IR::Block>& block_pool, Stage stage,
std::span<const u32> token) {
// Ensure first instruction is expected.
constexpr u32 token_mov_vcchi = 0xBEEB03FF;
ASSERT_MSG(token[0] == token_mov_vcchi, "First instruction is not s_mov_b32 vcc_hi, #imm");
Gcn::GcnCodeSlice slice(token.data(), token.data() + token.size());
Gcn::GcnDecodeContext decoder;
// Decode and save instructions
IR::Program program;
program.ins_list.reserve(token.size());
while (!slice.atEnd()) {
program.ins_list.emplace_back(decoder.decodeInstruction(slice));
}
// Create control flow graph
ObjectPool<Gcn::Block> gcn_block_pool{64};
Gcn::CFG cfg{gcn_block_pool, program.ins_list};
// Structurize control flow graph and create program.
program.syntax_list = Shader::Gcn::BuildASL(inst_pool, block_pool, cfg, stage);
program.blocks = GenerateBlocks(program.syntax_list);
program.post_order_blocks = Shader::IR::PostOrder(program.syntax_list.front());
program.stage = stage;
// Run optimization passes
Shader::Optimization::SsaRewritePass(program.post_order_blocks);
Shader::Optimization::ConstantPropagationPass(program.post_order_blocks);
Shader::Optimization::IdentityRemovalPass(program.blocks);
// Shader::Optimization::ResourceTrackingPass(program.post_order_blocks);
Shader::Optimization::DeadCodeEliminationPass(program.blocks);
// TODO: Pass profile from vulkan backend
const auto code = Backend::SPIRV::EmitSPIRV(Profile{}, program);
return code;
}
} // namespace Shader

View File

@ -0,0 +1,33 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "shader_recompiler/ir/program.h"
namespace Shader {
struct BinaryInfo {
u8 signature[7];
u8 version;
u32 pssl_or_cg : 1;
u32 cached : 1;
u32 type : 4;
u32 source_type : 2;
u32 length : 24;
u8 chunk_usage_base_offset_in_dw;
u8 num_input_usage_slots;
u8 is_srt : 1;
u8 is_srt_used_info_valid : 1;
u8 is_extended_usage_info : 1;
u8 reserved2 : 5;
u8 reserved3;
u64 shader_hash;
u32 crc32;
};
[[nodiscard]] std::vector<u32> TranslateProgram(ObjectPool<IR::Inst>& inst_pool,
ObjectPool<IR::Block>& block_pool, Stage stage,
std::span<const u32> code);
} // namespace Shader

View File

@ -0,0 +1,139 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <boost/container/small_vector.hpp>
#include "shader_recompiler/ir/type.h"
namespace Shader {
enum class AttributeType : u8 {
Float,
SignedInt,
UnsignedInt,
SignedScaled,
UnsignedScaled,
Disabled,
};
enum class InputTopology {
Points,
Lines,
LinesAdjacency,
Triangles,
TrianglesAdjacency,
};
enum class CompareFunction {
Never,
Less,
Equal,
LessThanEqual,
Greater,
NotEqual,
GreaterThanEqual,
Always,
};
enum class Stage : u32 {
Vertex,
TessellationControl,
TessellationEval,
Geometry,
Fragment,
Compute,
};
constexpr u32 MaxStageTypes = 6;
[[nodiscard]] constexpr Stage StageFromIndex(size_t index) noexcept {
return static_cast<Stage>(static_cast<size_t>(Stage::Vertex) + index);
}
enum class TextureType : u32 {
Color1D,
ColorArray1D,
Color2D,
ColorArray2D,
Color3D,
ColorCube,
Buffer,
};
constexpr u32 NUM_TEXTURE_TYPES = 7;
enum class Interpolation {
Smooth,
Flat,
NoPerspective,
};
struct ConstantBufferDescriptor {
u32 index;
u32 count;
auto operator<=>(const ConstantBufferDescriptor&) const = default;
};
struct TextureDescriptor {
TextureType type;
bool is_eud;
bool is_depth;
bool is_multisample;
bool is_storage;
u32 count;
u32 eud_offset_dwords;
u32 ud_index_dwords;
auto operator<=>(const TextureDescriptor&) const = default;
};
using TextureDescriptors = boost::container::small_vector<TextureDescriptor, 12>;
struct Info {
bool uses_workgroup_id{};
bool uses_local_invocation_id{};
bool uses_invocation_id{};
bool uses_invocation_info{};
bool uses_sample_id{};
std::array<Interpolation, 32> interpolation{};
// VaryingState loads;
// VaryingState stores;
// VaryingState passthrough;
std::array<bool, 8> stores_frag_color{};
bool stores_sample_mask{};
bool stores_frag_depth{};
bool uses_fp16{};
bool uses_fp64{};
bool uses_fp16_denorms_flush{};
bool uses_fp16_denorms_preserve{};
bool uses_fp32_denorms_flush{};
bool uses_fp32_denorms_preserve{};
bool uses_int8{};
bool uses_int16{};
bool uses_int64{};
bool uses_image_1d{};
bool uses_sampled_1d{};
bool uses_subgroup_vote{};
bool uses_subgroup_mask{};
bool uses_derivatives{};
IR::Type used_constant_buffer_types{};
IR::Type used_storage_buffer_types{};
IR::Type used_indirect_cbuf_types{};
// std::array<u32, MAX_CBUFS> constant_buffer_used_sizes{};
u32 used_clip_distances{};
// boost::container::static_vector<ConstantBufferDescriptor, MAX_CBUFS>
// constant_buffer_descriptors;
// boost::container::static_vector<StorageBufferDescriptor, MAX_SSBOS>
// storage_buffers_descriptors; TextureBufferDescriptors texture_buffer_descriptors;
// ImageBufferDescriptors image_buffer_descriptors;
// TextureDescriptors texture_descriptors;
// ImageDescriptors image_descriptors;
};
} // namespace Shader

View File

@ -6,6 +6,7 @@
#include "common/thread.h"
#include "video_core/amdgpu/liverpool.h"
#include "video_core/amdgpu/pm4_cmds.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
namespace AmdGpu {
@ -107,7 +108,7 @@ void Liverpool::ProcessCmdList(const u32* cmdbuf, u32 size_in_bytes) {
regs.index_base_address.base_addr_hi.Assign(draw_index->index_base_hi);
regs.num_indices = draw_index->index_count;
regs.draw_initiator = draw_index->draw_initiator;
// rasterizer->DrawIndex();
rasterizer->DrawIndex();
break;
}
case PM4ItOpcode::DrawIndexAuto: {

View File

@ -15,6 +15,10 @@
#include <thread>
#include <queue>
namespace Vulkan {
class Rasterizer;
}
namespace AmdGpu {
#define GFX6_3D_REG_INDEX(field_name) (offsetof(AmdGpu::Liverpool::Regs, field_name) / sizeof(u32))
@ -46,9 +50,10 @@ struct Liverpool {
} settings;
UserData user_data;
const u8* Address() const {
template <typename T = u8>
const T* Address() const {
const uintptr_t addr = uintptr_t(address_hi) << 40 | uintptr_t(address_lo) << 8;
return reinterpret_cast<const u8*>(addr);
return reinterpret_cast<const T*>(addr);
}
};
@ -631,10 +636,15 @@ public:
void WaitGpuIdle();
void BindRasterizer(Vulkan::Rasterizer* rasterizer_) {
rasterizer = rasterizer_;
}
private:
void ProcessCmdList(const u32* cmdbuf, u32 size_in_bytes);
void Process(std::stop_token stoken);
Vulkan::Rasterizer* rasterizer;
std::jthread process_thread{};
std::queue<std::span<const u32>> gfx_ring{};
std::condition_variable_any cv_submit{};

View File

@ -0,0 +1,113 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "video_core/renderer_vulkan/liverpool_to_vk.h"
namespace Vulkan::LiverpoolToVK {
vk::StencilOp StencilOp(Liverpool::StencilFunc op) {
switch (op) {
case Liverpool::StencilFunc::Keep:
return vk::StencilOp::eKeep;
case Liverpool::StencilFunc::Zero:
return vk::StencilOp::eZero;
case Liverpool::StencilFunc::AddClamp:
return vk::StencilOp::eIncrementAndClamp;
case Liverpool::StencilFunc::SubClamp:
return vk::StencilOp::eDecrementAndClamp;
case Liverpool::StencilFunc::Invert:
return vk::StencilOp::eInvert;
case Liverpool::StencilFunc::AddWrap:
return vk::StencilOp::eIncrementAndWrap;
case Liverpool::StencilFunc::SubWrap:
return vk::StencilOp::eDecrementAndWrap;
default:
UNREACHABLE();
return vk::StencilOp::eKeep;
}
}
vk::CompareOp CompareOp(Liverpool::CompareFunc func) {
switch (func) {
case Liverpool::CompareFunc::Always:
return vk::CompareOp::eAlways;
case Liverpool::CompareFunc::Equal:
return vk::CompareOp::eEqual;
case Liverpool::CompareFunc::GreaterEqual:
return vk::CompareOp::eGreaterOrEqual;
case Liverpool::CompareFunc::Greater:
return vk::CompareOp::eGreater;
case Liverpool::CompareFunc::LessEqual:
return vk::CompareOp::eLessOrEqual;
case Liverpool::CompareFunc::Less:
return vk::CompareOp::eLess;
case Liverpool::CompareFunc::NotEqual:
return vk::CompareOp::eNotEqual;
case Liverpool::CompareFunc::Never:
return vk::CompareOp::eNever;
default:
UNREACHABLE();
return vk::CompareOp::eAlways;
}
}
vk::PrimitiveTopology PrimitiveType(Liverpool::PrimitiveType type) {
switch (type) {
case Liverpool::PrimitiveType::PointList:
return vk::PrimitiveTopology::ePointList;
case Liverpool::PrimitiveType::LineList:
return vk::PrimitiveTopology::eLineList;
case Liverpool::PrimitiveType::LineStrip:
return vk::PrimitiveTopology::eLineStrip;
case Liverpool::PrimitiveType::TriangleList:
return vk::PrimitiveTopology::eTriangleList;
case Liverpool::PrimitiveType::TriangleFan:
return vk::PrimitiveTopology::eTriangleFan;
case Liverpool::PrimitiveType::TriangleStrip:
return vk::PrimitiveTopology::eTriangleStrip;
case Liverpool::PrimitiveType::AdjLineList:
return vk::PrimitiveTopology::eLineListWithAdjacency;
case Liverpool::PrimitiveType::AdjLineStrip:
return vk::PrimitiveTopology::eLineStripWithAdjacency;
case Liverpool::PrimitiveType::AdjTriangleList:
return vk::PrimitiveTopology::eTriangleListWithAdjacency;
case Liverpool::PrimitiveType::AdjTriangleStrip:
return vk::PrimitiveTopology::eTriangleStripWithAdjacency;
default:
UNREACHABLE();
return vk::PrimitiveTopology::eTriangleList;
}
}
vk::PolygonMode PolygonMode(Liverpool::PolygonMode mode) {
switch (mode) {
case Liverpool::PolygonMode::Point:
return vk::PolygonMode::ePoint;
case Liverpool::PolygonMode::Line:
return vk::PolygonMode::eLine;
case Liverpool::PolygonMode::Fill:
return vk::PolygonMode::eFill;
default:
UNREACHABLE();
return vk::PolygonMode::eFill;
}
}
vk::CullModeFlags CullMode(Liverpool::CullMode mode) {
switch (mode) {
case Liverpool::CullMode::None:
return vk::CullModeFlagBits::eNone;
case Liverpool::CullMode::Front:
return vk::CullModeFlagBits::eFront;
case Liverpool::CullMode::Back:
return vk::CullModeFlagBits::eBack;
case Liverpool::CullMode::FrontAndBack:
return vk::CullModeFlagBits::eFrontAndBack;
default:
UNREACHABLE();
return vk::CullModeFlagBits::eNone;
}
}
} // namespace Vulkan::LiverpoolToVK

View File

@ -0,0 +1,23 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "video_core/amdgpu/liverpool.h"
#include "video_core/renderer_vulkan/vk_common.h"
namespace Vulkan::LiverpoolToVK {
using Liverpool = AmdGpu::Liverpool;
vk::StencilOp StencilOp(Liverpool::StencilFunc op);
vk::CompareOp CompareOp(Liverpool::CompareFunc func);
vk::PrimitiveTopology PrimitiveType(Liverpool::PrimitiveType type);
vk::PolygonMode PolygonMode(Liverpool::PolygonMode mode);
vk::CullModeFlags CullMode(Liverpool::CullMode mode);
} // namespace Vulkan::LiverpoolToVK

View File

@ -7,6 +7,7 @@
#include "core/libraries/system/systemservice.h"
#include "sdl_window.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include <vk_mem_alloc.h>
@ -60,9 +61,10 @@ bool CanBlitToSwapchain(const vk::PhysicalDevice physical_device, vk::Format for
};
}
RendererVulkan::RendererVulkan(Frontend::WindowSDL& window_)
RendererVulkan::RendererVulkan(Frontend::WindowSDL& window_, AmdGpu::Liverpool* liverpool)
: window{window_}, instance{window, Config::getGpuId()}, scheduler{instance},
swapchain{instance, window}, texture_cache{instance, scheduler} {
rasterizer = std::make_unique<Rasterizer>(instance, scheduler, texture_cache, liverpool);
const u32 num_images = swapchain.GetImageCount();
const vk::Device device = instance.GetDevice();

View File

@ -13,6 +13,10 @@ namespace Frontend {
class WindowSDL;
}
namespace AmdGpu {
struct Liverpool;
}
namespace Vulkan {
struct Frame {
@ -26,9 +30,11 @@ struct Frame {
vk::CommandBuffer cmdbuf;
};
class Rasterizer;
class RendererVulkan {
public:
explicit RendererVulkan(Frontend::WindowSDL& window);
explicit RendererVulkan(Frontend::WindowSDL& window, AmdGpu::Liverpool* liverpool);
~RendererVulkan();
Frame* PrepareFrame(const Libraries::VideoOut::BufferAttributeGroup& attribute,
@ -47,6 +53,7 @@ private:
Instance instance;
Scheduler scheduler;
Swapchain swapchain;
std::unique_ptr<Rasterizer> rasterizer;
VideoCore::TextureCache texture_cache;
vk::UniqueCommandPool command_pool;
std::vector<Frame> present_frames;

View File

@ -0,0 +1,162 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <boost/container/static_vector.hpp>
#include "common/assert.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_instance.h"
namespace Vulkan {
GraphicsPipeline::GraphicsPipeline(const Instance& instance_, const PipelineKey& key_,
vk::PipelineCache pipeline_cache_, vk::PipelineLayout layout_,
std::array<vk::ShaderModule, MaxShaderStages> modules)
: instance{instance_}, pipeline_layout{layout_}, pipeline_cache{pipeline_cache_}, key{key_} {
const vk::Device device = instance.GetDevice();
const vk::PipelineVertexInputStateCreateInfo vertex_input_info = {
.vertexBindingDescriptionCount = 0U,
.pVertexBindingDescriptions = nullptr,
.vertexAttributeDescriptionCount = 0U,
.pVertexAttributeDescriptions = nullptr,
};
const vk::PipelineInputAssemblyStateCreateInfo input_assembly = {
.topology = LiverpoolToVK::PrimitiveType(key.prim_type),
.primitiveRestartEnable = false,
};
const vk::PipelineRasterizationStateCreateInfo raster_state = {
.depthClampEnable = false,
.rasterizerDiscardEnable = false,
.polygonMode = LiverpoolToVK::PolygonMode(key.polygon_mode),
.cullMode = LiverpoolToVK::CullMode(key.cull_mode),
.frontFace = vk::FrontFace::eClockwise,
.depthBiasEnable = false,
.lineWidth = 1.0f,
};
const vk::PipelineMultisampleStateCreateInfo multisampling = {
.rasterizationSamples = vk::SampleCountFlagBits::e1,
.sampleShadingEnable = false,
};
const vk::PipelineColorBlendAttachmentState colorblend_attachment = {
.blendEnable = false,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA,
};
const vk::PipelineColorBlendStateCreateInfo color_blending = {
.logicOpEnable = false,
.logicOp = vk::LogicOp::eCopy,
.attachmentCount = 1,
.pAttachments = &colorblend_attachment,
.blendConstants = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
const vk::Viewport viewport = {
.x = 0.0f,
.y = 0.0f,
.width = 1.0f,
.height = 1.0f,
.minDepth = 0.0f,
.maxDepth = 1.0f,
};
const vk::Rect2D scissor = {
.offset = {0, 0},
.extent = {1, 1},
};
const vk::PipelineViewportStateCreateInfo viewport_info = {
.viewportCount = 1,
.pViewports = &viewport,
.scissorCount = 1,
.pScissors = &scissor,
};
boost::container::static_vector<vk::DynamicState, 14> dynamic_states = {
vk::DynamicState::eViewport,
vk::DynamicState::eScissor,
};
const vk::PipelineDynamicStateCreateInfo dynamic_info = {
.dynamicStateCount = static_cast<u32>(dynamic_states.size()),
.pDynamicStates = dynamic_states.data(),
};
const vk::PipelineDepthStencilStateCreateInfo depth_info = {
.depthTestEnable = key.depth.depth_enable,
.depthWriteEnable = key.depth.depth_write_enable,
.depthCompareOp = LiverpoolToVK::CompareOp(key.depth.depth_func),
.depthBoundsTestEnable = key.depth.depth_bounds_enable,
.stencilTestEnable = key.depth.stencil_enable,
.front{
.failOp = LiverpoolToVK::StencilOp(key.stencil.stencil_fail_front),
.passOp = LiverpoolToVK::StencilOp(key.stencil.stencil_zpass_front),
.depthFailOp = LiverpoolToVK::StencilOp(key.stencil.stencil_zfail_front),
.compareOp = LiverpoolToVK::CompareOp(key.depth.stencil_ref_func),
.compareMask = key.stencil_ref_front.stencil_mask,
.writeMask = key.stencil_ref_front.stencil_write_mask,
.reference = key.stencil_ref_front.stencil_test_val,
},
.back{
.failOp = LiverpoolToVK::StencilOp(key.stencil.stencil_fail_back),
.passOp = LiverpoolToVK::StencilOp(key.stencil.stencil_zpass_back),
.depthFailOp = LiverpoolToVK::StencilOp(key.stencil.stencil_zfail_back),
.compareOp = LiverpoolToVK::CompareOp(key.depth.stencil_bf_func),
.compareMask = key.stencil_ref_back.stencil_mask,
.writeMask = key.stencil_ref_back.stencil_write_mask,
.reference = key.stencil_ref_back.stencil_test_val,
},
};
u32 shader_count = 2;
std::array<vk::PipelineShaderStageCreateInfo, MaxShaderStages> shader_stages;
shader_stages[0] = vk::PipelineShaderStageCreateInfo{
.stage = vk::ShaderStageFlagBits::eVertex,
.module = modules[0],
.pName = "main",
};
shader_stages[1] = vk::PipelineShaderStageCreateInfo{
.stage = vk::ShaderStageFlagBits::eFragment,
.module = modules[4],
.pName = "main",
};
const vk::Format color_format = vk::Format::eB8G8R8A8Srgb;
const vk::PipelineRenderingCreateInfoKHR pipeline_rendering_ci = {
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &color_format,
.depthAttachmentFormat = vk::Format::eUndefined,
.stencilAttachmentFormat = vk::Format::eUndefined,
};
const vk::GraphicsPipelineCreateInfo pipeline_info = {
.pNext = &pipeline_rendering_ci,
.stageCount = shader_count,
.pStages = shader_stages.data(),
.pVertexInputState = &vertex_input_info,
.pInputAssemblyState = &input_assembly,
.pViewportState = &viewport_info,
.pRasterizationState = &raster_state,
.pMultisampleState = &multisampling,
.pDepthStencilState = &depth_info,
.pColorBlendState = &color_blending,
.pDynamicState = &dynamic_info,
.layout = pipeline_layout,
};
auto result = device.createGraphicsPipelineUnique(pipeline_cache, pipeline_info);
if (result.result == vk::Result::eSuccess) {
pipeline = std::move(result.value);
} else {
UNREACHABLE_MSG("Graphics pipeline creation failed!");
}
}
GraphicsPipeline::~GraphicsPipeline() = default;
} // namespace Vulkan

View File

@ -0,0 +1,46 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/types.h"
#include "video_core/renderer_vulkan/liverpool_to_vk.h"
#include "video_core/renderer_vulkan/vk_common.h"
namespace Vulkan {
static constexpr u32 MaxShaderStages = 5;
class Instance;
using Liverpool = AmdGpu::Liverpool;
struct PipelineKey {
Liverpool::DepthControl depth;
Liverpool::StencilControl stencil;
Liverpool::StencilRefMask stencil_ref_front;
Liverpool::StencilRefMask stencil_ref_back;
Liverpool::PrimitiveType prim_type;
Liverpool::PolygonMode polygon_mode;
Liverpool::CullMode cull_mode;
};
static_assert(std::has_unique_object_representations_v<PipelineKey>);
class GraphicsPipeline {
public:
explicit GraphicsPipeline(const Instance& instance, const PipelineKey& key,
vk::PipelineCache pipeline_cache, vk::PipelineLayout layout,
std::array<vk::ShaderModule, MaxShaderStages> modules);
~GraphicsPipeline();
[[nodiscard]] vk::Pipeline Handle() const noexcept {
return *pipeline;
}
private:
const Instance& instance;
vk::UniquePipeline pipeline;
vk::PipelineLayout pipeline_layout;
vk::PipelineCache pipeline_cache;
PipelineKey key;
};
} // namespace Vulkan

View File

@ -196,9 +196,15 @@ bool Instance::CreateDevice() {
.shaderClipDistance = features.shaderClipDistance,
},
},
vk::PhysicalDeviceVulkan11Features{
.shaderDrawParameters = true,
},
vk::PhysicalDeviceVulkan12Features{
.timelineSemaphore = true,
},
vk::PhysicalDeviceVulkan13Features{
.dynamicRendering = true,
},
vk::PhysicalDeviceCustomBorderColorFeaturesEXT{
.customBorderColors = true,
.customBorderColorWithoutFormat = true,

View File

@ -0,0 +1,70 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
#include "shader_recompiler/recompiler.h"
#include "shader_recompiler/runtime_info.h"
#include "video_core/renderer_vulkan/vk_instance.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
namespace Vulkan {
PipelineCache::PipelineCache(const Instance& instance_, Scheduler& scheduler_,
AmdGpu::Liverpool* liverpool_)
: instance{instance_}, scheduler{scheduler_}, liverpool{liverpool_}, inst_pool{4096},
block_pool{512} {
const vk::PipelineLayoutCreateInfo layout_info = {
.setLayoutCount = 0U,
.pSetLayouts = nullptr,
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
};
pipeline_layout = instance.GetDevice().createPipelineLayoutUnique(layout_info);
pipeline_cache = instance.GetDevice().createPipelineCacheUnique({});
}
void PipelineCache::BindPipeline() {
SCOPE_EXIT {
const auto cmdbuf = scheduler.CommandBuffer();
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline->Handle());
};
if (pipeline) {
return;
}
const auto get_program = [&](const AmdGpu::Liverpool::ShaderProgram& pgm, Shader::Stage stage) {
const u32* token = pgm.Address<u32>();
// Retrieve shader header.
Shader::BinaryInfo bininfo;
std::memcpy(&bininfo, token + (token[1] + 1) * 2, sizeof(bininfo));
// Lookup if the shader already exists.
const auto it = module_map.find(bininfo.shader_hash);
if (it != module_map.end()) {
return *it->second;
}
// Compile and cache shader.
const auto data = std::span{token, bininfo.length / sizeof(u32)};
const auto program = Shader::TranslateProgram(inst_pool, block_pool, stage, data);
return CompileSPV(program, instance.GetDevice());
};
// Retrieve shader stage modules.
// TODO: Only do this when program address is changed.
stages[0] = get_program(liverpool->regs.vs_program, Shader::Stage::Vertex);
stages[4] = get_program(liverpool->regs.ps_program, Shader::Stage::Fragment);
// Bind pipeline.
// TODO: Read entire key based on reg state.
graphics_key.prim_type = liverpool->regs.primitive_type;
graphics_key.polygon_mode = liverpool->regs.polygon_control.PolyMode();
pipeline = std::make_unique<GraphicsPipeline>(instance, graphics_key, *pipeline_cache,
*pipeline_layout, stages);
}
} // namespace Vulkan

View File

@ -0,0 +1,40 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <tsl/robin_map.h>
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/object_pool.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
namespace Vulkan {
class Instance;
class Scheduler;
class PipelineCache {
static constexpr size_t MaxShaderStages = 5;
public:
explicit PipelineCache(const Instance& instance, Scheduler& scheduler,
AmdGpu::Liverpool* liverpool);
~PipelineCache() = default;
void BindPipeline();
private:
const Instance& instance;
Scheduler& scheduler;
AmdGpu::Liverpool* liverpool;
vk::UniquePipelineCache pipeline_cache;
vk::UniquePipelineLayout pipeline_layout;
tsl::robin_map<size_t, vk::UniqueShaderModule> module_map;
std::array<vk::ShaderModule, MaxShaderStages> stages{};
std::unique_ptr<GraphicsPipeline> pipeline;
PipelineKey graphics_key{};
Shader::ObjectPool<Shader::IR::Inst> inst_pool;
Shader::ObjectPool<Shader::IR::Block> block_pool;
};
} // namespace Vulkan

View File

@ -0,0 +1,99 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "video_core/amdgpu/liverpool.h"
#include "video_core/renderer_vulkan/vk_instance.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/texture_cache/image_view.h"
#include "video_core/texture_cache/texture_cache.h"
namespace Vulkan {
static constexpr vk::BufferUsageFlags VertexIndexFlags = vk::BufferUsageFlagBits::eVertexBuffer |
vk::BufferUsageFlagBits::eIndexBuffer |
vk::BufferUsageFlagBits::eTransferDst;
Rasterizer::Rasterizer(const Instance& instance_, Scheduler& scheduler_,
VideoCore::TextureCache& texture_cache_, AmdGpu::Liverpool* liverpool_)
: instance{instance_}, scheduler{scheduler_}, texture_cache{texture_cache_},
liverpool{liverpool_}, pipeline_cache{instance, scheduler, liverpool},
vertex_index_buffer{instance, scheduler, VertexIndexFlags, 64_MB} {
liverpool->BindRasterizer(this);
}
Rasterizer::~Rasterizer() = default;
void Rasterizer::DrawIndex() {
const auto cmdbuf = scheduler.CommandBuffer();
auto& regs = liverpool->regs;
static bool first_time = true;
if (first_time) {
first_time = false;
return;
}
UpdateDynamicState();
pipeline_cache.BindPipeline();
const u32 pitch = regs.color_buffers[0].Pitch();
const u32 height = regs.color_buffers[0].Height();
const u32 tile_max = regs.color_buffers[0].slice.tile_max;
auto& image_view = texture_cache.RenderTarget(regs.color_buffers[0].Address(), pitch);
const vk::RenderingAttachmentInfo color_info = {
.imageView = *image_view.image_view,
.imageLayout = vk::ImageLayout::eGeneral,
.loadOp = vk::AttachmentLoadOp::eLoad,
.storeOp = vk::AttachmentStoreOp::eStore,
};
// TODO: Don't restart renderpass every draw
const vk::RenderingInfo rendering_info = {
.renderArea = {.offset = {0, 0}, .extent = {1920, 1080}},
.layerCount = 1,
.colorAttachmentCount = 1,
.pColorAttachments = &color_info,
};
cmdbuf.beginRendering(rendering_info);
cmdbuf.bindIndexBuffer(vertex_index_buffer.Handle(), 0, vk::IndexType::eUint32);
cmdbuf.bindVertexBuffers(0, vertex_index_buffer.Handle(), vk::DeviceSize(0));
cmdbuf.draw(regs.num_indices, regs.num_instances.NumInstances(), 0, 0);
cmdbuf.endRendering();
}
void Rasterizer::UpdateDynamicState() {
UpdateViewportScissorState();
}
void Rasterizer::UpdateViewportScissorState() {
auto& regs = liverpool->regs;
const auto cmdbuf = scheduler.CommandBuffer();
const vk::Viewport viewport{
.x = regs.viewports[0].xoffset - regs.viewports[0].xscale,
.y = regs.viewports[0].yoffset - regs.viewports[0].yscale,
.width = regs.viewports[0].xscale * 2.0f,
.height = regs.viewports[0].yscale * 2.0f,
.minDepth = regs.viewports[0].zoffset - regs.viewports[0].zscale,
.maxDepth = regs.viewports[0].zscale + regs.viewports[0].zoffset,
};
const vk::Rect2D scissor{
.offset = {regs.screen_scissor.top_left_x, regs.screen_scissor.top_left_y},
.extent = {regs.screen_scissor.GetWidth(), regs.screen_scissor.GetHeight()},
};
cmdbuf.setViewport(0, viewport);
cmdbuf.setScissor(0, scissor);
}
void Rasterizer::UpdateDepthStencilState() {
auto& depth = liverpool->regs.depth_control;
const auto cmdbuf = scheduler.CommandBuffer();
cmdbuf.setDepthBoundsTestEnable(depth.depth_bounds_enable);
}
} // namespace Vulkan

View File

@ -0,0 +1,51 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
namespace AmdGpu {
struct Liverpool;
}
namespace VideoCore {
class TextureCache;
}
namespace Vulkan {
class Scheduler;
class GraphicsPipeline;
class Rasterizer {
public:
explicit Rasterizer(const Instance& instance, Scheduler& scheduler,
VideoCore::TextureCache& texture_cache, AmdGpu::Liverpool* liverpool);
~Rasterizer();
/// Performs a draw call with an index buffer.
void DrawIndex();
/// Updates graphics state that is not part of the bound pipeline.
void UpdateDynamicState();
private:
/// Updates viewport and scissor from liverpool registers.
void UpdateViewportScissorState();
/// Updates depth and stencil pipeline state from liverpool registers.
void UpdateDepthStencilState();
private:
const Instance& instance;
Scheduler& scheduler;
VideoCore::TextureCache& texture_cache;
AmdGpu::Liverpool* liverpool;
PipelineCache pipeline_cache;
StreamBuffer vertex_index_buffer;
};
} // namespace Vulkan

View File

@ -38,7 +38,7 @@ using Libraries::VideoOut::TilingMode;
if (false /*&& IsDepthStencilFormat(format)*/) {
usage |= vk::ImageUsageFlagBits::eDepthStencilAttachment;
} else {
// usage |= vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eStorage;
usage |= vk::ImageUsageFlagBits::eColorAttachment;
}
return usage;
}

View File

@ -7,6 +7,7 @@
#include "common/types.h"
#include "core/libraries/videoout/buffer.h"
#include "video_core/renderer_vulkan/vk_common.h"
#include "video_core/texture_cache/image_view.h"
#include "video_core/texture_cache/types.h"
namespace Vulkan {
@ -41,23 +42,6 @@ struct ImageInfo {
u32 guest_size_bytes = 0;
};
struct Handle {
VmaAllocation allocation;
VkImage image;
Handle() = default;
Handle(Handle&& other)
: image{std::exchange(other.image, VK_NULL_HANDLE)},
allocation{std::exchange(other.allocation, VK_NULL_HANDLE)} {}
Handle& operator=(Handle&& other) {
image = std::exchange(other.image, VK_NULL_HANDLE);
allocation = std::exchange(other.allocation, VK_NULL_HANDLE);
return *this;
}
};
struct UniqueImage {
explicit UniqueImage(vk::Device device, VmaAllocator allocator);
~UniqueImage();
@ -100,6 +84,14 @@ struct Image {
return cpu_addr < overlap_end && overlap_cpu_addr < cpu_addr_end;
}
ImageViewId FindView(const ImageViewInfo& info) const {
const auto it = std::ranges::find(image_view_infos, info);
if (it == image_view_infos.end()) {
return {};
}
return image_view_ids[std::distance(it, image_view_infos.begin())];
}
void Transit(vk::ImageLayout dst_layout, vk::Flags<vk::AccessFlagBits> dst_mask);
const Vulkan::Instance* instance;
@ -110,6 +102,8 @@ struct Image {
ImageFlagBits flags = ImageFlagBits::CpuModified;
VAddr cpu_addr = 0;
VAddr cpu_addr_end = 0;
std::vector<ImageViewInfo> image_view_infos;
std::vector<ImageViewId> image_view_ids;
// Resource state tracking
vk::Flags<vk::PipelineStageFlagBits> pl_stage = vk::PipelineStageFlagBits::eAllCommands;

View File

@ -13,20 +13,9 @@ class Scheduler;
namespace VideoCore {
enum class ImageViewType : u32 {
e1D,
e2D,
Cube,
e3D,
e1DArray,
e2DArray,
CubeArray,
Buffer,
};
struct ImageViewInfo {
vk::ImageViewType type{};
vk::Format format{};
vk::ImageViewType type = vk::ImageViewType::e2D;
vk::Format format = vk::Format::eR8G8B8A8Unorm;
SubresourceRange range;
vk::ComponentMapping mapping{};

View File

@ -31,10 +31,6 @@ class SlotVector {
constexpr static std::size_t InitialCapacity = 1024;
public:
SlotVector() {
Reserve(InitialCapacity);
}
~SlotVector() noexcept {
std::size_t index = 0;
for (u64 bits : stored_bitset) {

View File

@ -83,6 +83,15 @@ TextureCache::TextureCache(const Vulkan::Instance& instance_, Vulkan::Scheduler&
ASSERT_MSG(veh_handle, "Failed to register an exception handler");
#endif
g_texture_cache = this;
ImageInfo info;
info.pixel_format = vk::Format::eR8G8B8A8Unorm;
info.type = vk::ImageType::e2D;
const ImageId null_id = slot_images.insert(instance, scheduler, info, 0);
ASSERT(null_id.index == 0);
ImageViewInfo view_info;
void(slot_image_views.insert(instance, scheduler, view_info, slot_images[null_id].image));
}
TextureCache::~TextureCache() {
@ -128,6 +137,29 @@ Image& TextureCache::FindImage(const ImageInfo& info, VAddr cpu_address) {
return image;
}
ImageView& TextureCache::RenderTarget(VAddr cpu_address, u32 pitch) {
boost::container::small_vector<ImageId, 2> image_ids;
ForEachImageInRegion(cpu_address, pitch * 4, [&](ImageId image_id, Image& image) {
if (image.cpu_addr == cpu_address) {
image_ids.push_back(image_id);
}
});
ASSERT_MSG(image_ids.size() <= 1, "Overlapping framebuffers not allowed!");
auto* image = &slot_images[image_ids.empty() ? ImageId{0} : image_ids.back()];
ImageViewInfo info;
info.format = vk::Format::eB8G8R8A8Srgb;
if (const ImageViewId view_id = image->FindView(info); view_id) {
return slot_image_views[view_id];
}
const ImageViewId view_id = slot_image_views.insert(instance, scheduler, info, image->image);
image->image_view_infos.emplace_back(info);
image->image_view_ids.emplace_back(view_id);
return slot_image_views[view_id];
}
void TextureCache::RefreshImage(Image& image) {
// Mark image as validated.
image.flags &= ~ImageFlagBits::CpuModified;

View File

@ -9,6 +9,7 @@
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/texture_cache/image.h"
#include "video_core/texture_cache/image_view.h"
#include "video_core/texture_cache/slot_vector.h"
namespace Core::Libraries::VideoOut {
@ -35,6 +36,9 @@ public:
/// Retrieves the image handle of the image with the provided attributes and address.
Image& FindImage(const ImageInfo& info, VAddr cpu_address);
/// Retrieves the render target with specified properties
ImageView& RenderTarget(VAddr cpu_address, u32 pitch);
/// Reuploads image contents.
void RefreshImage(Image& image);
@ -116,6 +120,7 @@ private:
Vulkan::Scheduler& scheduler;
Vulkan::StreamBuffer staging;
SlotVector<Image> slot_images;
SlotVector<ImageView> slot_image_views;
tsl::robin_pg_map<u64, std::vector<ImageId>> page_table;
boost::icl::interval_map<VAddr, s32> cached_pages;
#ifdef _WIN64

Some files were not shown because too many files have changed in this diff Show More