shader_recompiler: Better branch detection + more opcodes
This commit is contained in:
parent
f624f7749c
commit
02a50265f8
|
@ -9,6 +9,7 @@
|
||||||
#include "shader_recompiler/backend/spirv/emit_spirv.h"
|
#include "shader_recompiler/backend/spirv/emit_spirv.h"
|
||||||
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
|
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
|
||||||
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
|
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
|
||||||
|
#include "shader_recompiler/frontend/translate/translate.h"
|
||||||
#include "shader_recompiler/ir/basic_block.h"
|
#include "shader_recompiler/ir/basic_block.h"
|
||||||
#include "shader_recompiler/ir/program.h"
|
#include "shader_recompiler/ir/program.h"
|
||||||
|
|
||||||
|
@ -28,6 +29,8 @@ ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
|
||||||
return arg;
|
return arg;
|
||||||
} else if constexpr (std::is_same_v<ArgType, u32>) {
|
} else if constexpr (std::is_same_v<ArgType, u32>) {
|
||||||
return arg.U32();
|
return arg.U32();
|
||||||
|
} else if constexpr (std::is_same_v<ArgType, u64>) {
|
||||||
|
return arg.U64();
|
||||||
} else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
|
} else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
|
||||||
return arg.Attribute();
|
return arg.Attribute();
|
||||||
} else if constexpr (std::is_same_v<ArgType, IR::ScalarReg>) {
|
} else if constexpr (std::is_same_v<ArgType, IR::ScalarReg>) {
|
||||||
|
@ -279,6 +282,10 @@ void EmitGetVccLo(EmitContext& ctx) {
|
||||||
throw LogicError("Unreachable instruction");
|
throw LogicError("Unreachable instruction");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmitGetVccHi(EmitContext& ctx) {
|
||||||
|
throw LogicError("Unreachable instruction");
|
||||||
|
}
|
||||||
|
|
||||||
void EmitSetScc(EmitContext& ctx) {
|
void EmitSetScc(EmitContext& ctx) {
|
||||||
throw LogicError("Unreachable instruction");
|
throw LogicError("Unreachable instruction");
|
||||||
}
|
}
|
||||||
|
@ -295,4 +302,8 @@ void EmitSetVccLo(EmitContext& ctx) {
|
||||||
throw LogicError("Unreachable instruction");
|
throw LogicError("Unreachable instruction");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmitSetVccHi(EmitContext& ctx) {
|
||||||
|
throw LogicError("Unreachable instruction");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Shader::Backend::SPIRV
|
} // namespace Shader::Backend::SPIRV
|
||||||
|
|
|
@ -33,6 +33,14 @@ Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) {
|
||||||
return ctx.ConstU32(ctx.info.user_data[static_cast<size_t>(reg)]);
|
return ctx.ConstU32(ctx.info.user_data[static_cast<size_t>(reg)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmitGetThreadBitScalarReg(EmitContext& ctx) {
|
||||||
|
throw LogicError("Unreachable instruction");
|
||||||
|
}
|
||||||
|
|
||||||
|
void EmitSetThreadBitScalarReg(EmitContext& ctx) {
|
||||||
|
throw LogicError("Unreachable instruction");
|
||||||
|
}
|
||||||
|
|
||||||
void EmitGetScalarRegister(EmitContext&) {
|
void EmitGetScalarRegister(EmitContext&) {
|
||||||
throw LogicError("Unreachable instruction");
|
throw LogicError("Unreachable instruction");
|
||||||
}
|
}
|
||||||
|
@ -68,7 +76,7 @@ Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitReadConstBufferU32(EmitContext& ctx, u32 handle, Id index) {
|
Id EmitReadConstBufferU32(EmitContext& ctx, u32 handle, Id index) {
|
||||||
return EmitReadConstBuffer(ctx, handle, index);
|
return ctx.OpBitcast(ctx.U32[1], EmitReadConstBuffer(ctx, handle, index));
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
||||||
|
@ -86,8 +94,14 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
||||||
return ctx.OpLoad(param.component_type, param.id);
|
return ctx.OpLoad(param.component_type, param.id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
switch (attr) {
|
||||||
|
case IR::Attribute::FragCoord:
|
||||||
|
return ctx.OpLoad(ctx.F32[1],
|
||||||
|
ctx.OpAccessChain(ctx.input_f32, ctx.frag_coord, ctx.ConstU32(comp)));
|
||||||
|
default:
|
||||||
throw NotImplementedException("Read attribute {}", attr);
|
throw NotImplementedException("Read attribute {}", attr);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
||||||
switch (attr) {
|
switch (attr) {
|
||||||
|
@ -98,6 +112,9 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
||||||
case IR::Attribute::LocalInvocationId:
|
case IR::Attribute::LocalInvocationId:
|
||||||
return ctx.OpCompositeExtract(ctx.U32[1], ctx.OpLoad(ctx.U32[3], ctx.local_invocation_id),
|
return ctx.OpCompositeExtract(ctx.U32[1], ctx.OpLoad(ctx.U32[3], ctx.local_invocation_id),
|
||||||
comp);
|
comp);
|
||||||
|
case IR::Attribute::IsFrontFace:
|
||||||
|
return ctx.OpSelect(ctx.U32[1], ctx.OpLoad(ctx.U1[1], ctx.front_facing), ctx.u32_one_value,
|
||||||
|
ctx.u32_zero_value);
|
||||||
default:
|
default:
|
||||||
throw NotImplementedException("Read U32 attribute {}", attr);
|
throw NotImplementedException("Read U32 attribute {}", attr);
|
||||||
}
|
}
|
||||||
|
@ -136,20 +153,14 @@ Id EmitLoadBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address)
|
||||||
Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||||
const auto info = inst->Flags<IR::BufferInstInfo>();
|
const auto info = inst->Flags<IR::BufferInstInfo>();
|
||||||
const auto& buffer = ctx.buffers[handle];
|
const auto& buffer = ctx.buffers[handle];
|
||||||
if (info.index_enable && info.offset_enable) {
|
|
||||||
UNREACHABLE();
|
|
||||||
} else if (info.index_enable) {
|
|
||||||
boost::container::static_vector<Id, 4> ids;
|
boost::container::static_vector<Id, 4> ids;
|
||||||
for (u32 i = 0; i < 4; i++) {
|
for (u32 i = 0; i < 4; i++) {
|
||||||
const Id index{ctx.OpIAdd(ctx.U32[1], address, ctx.ConstU32(i))};
|
const Id index{ctx.OpIAdd(ctx.U32[1], address, ctx.ConstU32(i))};
|
||||||
const Id ptr{
|
const Id ptr{ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||||
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
|
||||||
ids.push_back(ctx.OpLoad(buffer.data_types->Get(1), ptr));
|
ids.push_back(ctx.OpLoad(buffer.data_types->Get(1), ptr));
|
||||||
}
|
}
|
||||||
return ctx.OpCompositeConstruct(buffer.data_types->Get(4), ids);
|
return ctx.OpCompositeConstruct(buffer.data_types->Get(4), ids);
|
||||||
}
|
}
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
|
|
|
@ -34,14 +34,18 @@ void EmitGetScc(EmitContext& ctx);
|
||||||
void EmitGetExec(EmitContext& ctx);
|
void EmitGetExec(EmitContext& ctx);
|
||||||
void EmitGetVcc(EmitContext& ctx);
|
void EmitGetVcc(EmitContext& ctx);
|
||||||
void EmitGetVccLo(EmitContext& ctx);
|
void EmitGetVccLo(EmitContext& ctx);
|
||||||
|
void EmitGetVccHi(EmitContext& ctx);
|
||||||
void EmitSetScc(EmitContext& ctx);
|
void EmitSetScc(EmitContext& ctx);
|
||||||
void EmitSetExec(EmitContext& ctx);
|
void EmitSetExec(EmitContext& ctx);
|
||||||
void EmitSetVcc(EmitContext& ctx);
|
void EmitSetVcc(EmitContext& ctx);
|
||||||
void EmitSetVccLo(EmitContext& ctx);
|
void EmitSetVccLo(EmitContext& ctx);
|
||||||
|
void EmitSetVccHi(EmitContext& ctx);
|
||||||
void EmitPrologue(EmitContext& ctx);
|
void EmitPrologue(EmitContext& ctx);
|
||||||
void EmitEpilogue(EmitContext& ctx);
|
void EmitEpilogue(EmitContext& ctx);
|
||||||
void EmitDiscard(EmitContext& ctx);
|
void EmitDiscard(EmitContext& ctx);
|
||||||
Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg);
|
Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg);
|
||||||
|
void EmitGetThreadBitScalarReg(EmitContext& ctx);
|
||||||
|
void EmitSetThreadBitScalarReg(EmitContext& ctx);
|
||||||
void EmitGetScalarRegister(EmitContext& ctx);
|
void EmitGetScalarRegister(EmitContext& ctx);
|
||||||
void EmitSetScalarRegister(EmitContext& ctx);
|
void EmitSetScalarRegister(EmitContext& ctx);
|
||||||
void EmitGetVectorRegister(EmitContext& ctx);
|
void EmitGetVectorRegister(EmitContext& ctx);
|
||||||
|
|
|
@ -94,6 +94,7 @@ void EmitContext::DefineArithmeticTypes() {
|
||||||
|
|
||||||
true_value = ConstantTrue(U1[1]);
|
true_value = ConstantTrue(U1[1]);
|
||||||
false_value = ConstantFalse(U1[1]);
|
false_value = ConstantFalse(U1[1]);
|
||||||
|
u32_one_value = ConstU32(1U);
|
||||||
u32_zero_value = ConstU32(0U);
|
u32_zero_value = ConstU32(0U);
|
||||||
f32_zero_value = ConstF32(0.0f);
|
f32_zero_value = ConstF32(0.0f);
|
||||||
|
|
||||||
|
@ -177,21 +178,24 @@ void EmitContext::DefineInputs(const Info& info) {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Stage::Fragment:
|
case Stage::Fragment:
|
||||||
|
frag_coord = DefineVariable(F32[4], spv::BuiltIn::FragCoord, spv::StorageClass::Input);
|
||||||
|
front_facing = DefineVariable(U1[1], spv::BuiltIn::FrontFacing, spv::StorageClass::Input);
|
||||||
for (const auto& input : info.ps_inputs) {
|
for (const auto& input : info.ps_inputs) {
|
||||||
|
const u32 semantic = input.param_index;
|
||||||
if (input.is_default) {
|
if (input.is_default) {
|
||||||
input_params[input.semantic] = {MakeDefaultValue(*this, input.default_value),
|
input_params[semantic] = {MakeDefaultValue(*this, input.default_value), input_f32,
|
||||||
input_f32, F32[1]};
|
F32[1]};
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const IR::Attribute param{IR::Attribute::Param0 + input.param_index};
|
const IR::Attribute param{IR::Attribute::Param0 + input.param_index};
|
||||||
const u32 num_components = info.loads.NumComponents(param);
|
const u32 num_components = info.loads.NumComponents(param);
|
||||||
const Id type{F32[num_components]};
|
const Id type{F32[num_components]};
|
||||||
const Id id{DefineInput(type, input.semantic)};
|
const Id id{DefineInput(type, semantic)};
|
||||||
if (input.is_flat) {
|
if (input.is_flat) {
|
||||||
Decorate(id, spv::Decoration::Flat);
|
Decorate(id, spv::Decoration::Flat);
|
||||||
}
|
}
|
||||||
Name(id, fmt::format("fs_in_attr{}", input.semantic));
|
Name(id, fmt::format("fs_in_attr{}", semantic));
|
||||||
input_params[input.semantic] = {id, input_f32, F32[1], num_components};
|
input_params[semantic] = {id, input_f32, F32[1], num_components};
|
||||||
interfaces.push_back(id);
|
interfaces.push_back(id);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -260,7 +264,7 @@ void EmitContext::DefineBuffers(const Info& info) {
|
||||||
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
|
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
|
||||||
Decorate(id, spv::Decoration::Binding, binding);
|
Decorate(id, spv::Decoration::Binding, binding);
|
||||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||||
Name(id, fmt::format("{}{}", buffer.is_storage ? "ssbo" : "cbuf", i));
|
Name(id, fmt::format("{}_{}", buffer.is_storage ? "ssbo" : "cbuf", buffer.sgpr_base));
|
||||||
|
|
||||||
binding++;
|
binding++;
|
||||||
buffers.push_back({
|
buffers.push_back({
|
||||||
|
@ -318,7 +322,9 @@ Id ImageType(EmitContext& ctx, const ImageResource& desc, Id sampled_type) {
|
||||||
case AmdGpu::ImageType::Color2DArray:
|
case AmdGpu::ImageType::Color2DArray:
|
||||||
return ctx.TypeImage(sampled_type, spv::Dim::Dim2D, false, true, false, 1, format);
|
return ctx.TypeImage(sampled_type, spv::Dim::Dim2D, false, true, false, 1, format);
|
||||||
case AmdGpu::ImageType::Color3D:
|
case AmdGpu::ImageType::Color3D:
|
||||||
return ctx.TypeImage(sampled_type, spv::Dim::Dim3D, false, false, false, 2, format);
|
return ctx.TypeImage(sampled_type, spv::Dim::Dim3D, false, false, false, 1, format);
|
||||||
|
case AmdGpu::ImageType::Cube:
|
||||||
|
return ctx.TypeImage(sampled_type, spv::Dim::Cube, false, false, false, 1, format);
|
||||||
case AmdGpu::ImageType::Buffer:
|
case AmdGpu::ImageType::Buffer:
|
||||||
throw NotImplementedException("Image buffer");
|
throw NotImplementedException("Image buffer");
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -140,6 +140,7 @@ public:
|
||||||
|
|
||||||
Id true_value{};
|
Id true_value{};
|
||||||
Id false_value{};
|
Id false_value{};
|
||||||
|
Id u32_one_value{};
|
||||||
Id u32_zero_value{};
|
Id u32_zero_value{};
|
||||||
Id f32_zero_value{};
|
Id f32_zero_value{};
|
||||||
|
|
||||||
|
@ -154,6 +155,8 @@ public:
|
||||||
Id output_position{};
|
Id output_position{};
|
||||||
Id vertex_index{};
|
Id vertex_index{};
|
||||||
Id base_vertex{};
|
Id base_vertex{};
|
||||||
|
Id frag_coord{};
|
||||||
|
Id front_facing{};
|
||||||
std::array<Id, 8> frag_color{};
|
std::array<Id, 8> frag_color{};
|
||||||
|
|
||||||
Id workgroup_id{};
|
Id workgroup_id{};
|
||||||
|
|
|
@ -38,8 +38,145 @@ void Translator::S_CMP(ConditionOp cond, bool is_signed, const GcnInst& inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Translator::S_ANDN2_B64(const GcnInst& inst) {
|
void Translator::S_ANDN2_B64(const GcnInst& inst) {
|
||||||
// TODO: Actually implement this.
|
// TODO: What if this is used for something other than EXEC masking?
|
||||||
ir.SetScc(ir.GetVcc());
|
const auto get_src = [&](const InstOperand& operand) {
|
||||||
|
switch (operand.field) {
|
||||||
|
case OperandField::VccLo:
|
||||||
|
return ir.GetVcc();
|
||||||
|
case OperandField::ExecLo:
|
||||||
|
return ir.GetExec();
|
||||||
|
case OperandField::ScalarGPR:
|
||||||
|
return ir.GetThreadBitScalarReg(IR::ScalarReg(operand.code));
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const IR::U1 src0{get_src(inst.src[0])};
|
||||||
|
const IR::U1 src1{get_src(inst.src[1])};
|
||||||
|
const IR::U1 result{ir.LogicalAnd(src0, ir.LogicalNot(src1))};
|
||||||
|
SetDst(inst.dst[0], result);
|
||||||
|
ir.SetScc(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_AND_SAVEEXEC_B64(const GcnInst& inst) {
|
||||||
|
// This instruction normally operates on 64-bit data (EXEC, VCC, SGPRs)
|
||||||
|
// However here we flatten it to 1-bit EXEC and 1-bit VCC. For the destination
|
||||||
|
// SGPR we have a special IR opcode for SPGRs that act as thread masks.
|
||||||
|
const IR::U1 exec{ir.GetExec()};
|
||||||
|
|
||||||
|
// Mark destination SPGR as an EXEC context. This means we will use 1-bit
|
||||||
|
// IR instruction whenever it's loaded.
|
||||||
|
ASSERT(inst.dst[0].field == OperandField::ScalarGPR);
|
||||||
|
const u32 reg = inst.dst[0].code;
|
||||||
|
exec_contexts[reg] = true;
|
||||||
|
ir.SetThreadBitScalarReg(IR::ScalarReg(reg), exec);
|
||||||
|
|
||||||
|
// Update EXEC.
|
||||||
|
ASSERT(inst.src[0].field == OperandField::VccLo);
|
||||||
|
ir.SetExec(ir.LogicalAnd(exec, ir.GetVcc()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_MOV_B64(const GcnInst& inst) {
|
||||||
|
// TODO: Using VCC as EXEC context.
|
||||||
|
if (inst.src[0].field == OperandField::VccLo || inst.dst[0].field == OperandField::VccLo) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const IR::U1 src0{GetSrc(inst.src[0])};
|
||||||
|
if (inst.dst[0].field == OperandField::ScalarGPR && inst.src[0].field == OperandField::ExecLo) {
|
||||||
|
// Exec context push
|
||||||
|
exec_contexts[inst.dst[0].code] = true;
|
||||||
|
} else if (inst.dst[0].field == OperandField::ExecLo &&
|
||||||
|
inst.src[0].field == OperandField::ScalarGPR) {
|
||||||
|
// Exec context pop
|
||||||
|
exec_contexts[inst.src[0].code] = false;
|
||||||
|
} else if (inst.src[0].field != OperandField::ConstZero) {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
SetDst(inst.dst[0], src0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_OR_B64(bool negate, const GcnInst& inst) {
|
||||||
|
const auto get_src = [&](const InstOperand& operand) {
|
||||||
|
switch (operand.field) {
|
||||||
|
case OperandField::VccLo:
|
||||||
|
return ir.GetVcc();
|
||||||
|
case OperandField::ScalarGPR:
|
||||||
|
return ir.GetThreadBitScalarReg(IR::ScalarReg(operand.code));
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const IR::U1 src0{get_src(inst.src[0])};
|
||||||
|
const IR::U1 src1{get_src(inst.src[1])};
|
||||||
|
IR::U1 result = ir.LogicalOr(src0, src1);
|
||||||
|
if (negate) {
|
||||||
|
result = ir.LogicalNot(result);
|
||||||
|
}
|
||||||
|
ASSERT(inst.dst[0].field == OperandField::VccLo);
|
||||||
|
ir.SetVcc(result);
|
||||||
|
ir.SetScc(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_AND_B64(const GcnInst& inst) {
|
||||||
|
const auto get_src = [&](const InstOperand& operand) {
|
||||||
|
switch (operand.field) {
|
||||||
|
case OperandField::VccLo:
|
||||||
|
return ir.GetVcc();
|
||||||
|
case OperandField::ExecLo:
|
||||||
|
return ir.GetExec();
|
||||||
|
case OperandField::ScalarGPR:
|
||||||
|
return ir.GetThreadBitScalarReg(IR::ScalarReg(operand.code));
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
const IR::U1 src0{get_src(inst.src[0])};
|
||||||
|
const IR::U1 src1{get_src(inst.src[1])};
|
||||||
|
const IR::U1 result = ir.LogicalAnd(src0, src1);
|
||||||
|
ASSERT(inst.dst[0].field == OperandField::VccLo);
|
||||||
|
ir.SetVcc(result);
|
||||||
|
ir.SetScc(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_ADD_I32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.IAdd(src0, src1));
|
||||||
|
// TODO: Overflow flag
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_AND_B32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
const IR::U32 result{ir.BitwiseAnd(src0, src1)};
|
||||||
|
SetDst(inst.dst[0], result);
|
||||||
|
ir.SetScc(ir.INotEqual(result, ir.Imm32(0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_LSHR_B32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
const IR::U32 result{ir.ShiftRightLogical(src0, src1)};
|
||||||
|
SetDst(inst.dst[0], result);
|
||||||
|
ir.SetScc(ir.INotEqual(result, ir.Imm32(0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_CSELECT_B32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], IR::U32{ir.Select(ir.GetScc(), src0, src1)});
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::S_BFE_U32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
const IR::U32 offset{ir.BitwiseAnd(src1, ir.Imm32(0x1F))};
|
||||||
|
const IR::U32 count{ir.BitFieldExtract(src1, ir.Imm32(16), ir.Imm32(7))};
|
||||||
|
const IR::U32 result{ir.BitFieldExtract(src0, offset, count)};
|
||||||
|
SetDst(inst.dst[0], result);
|
||||||
|
ir.SetScc(ir.INotEqual(result, ir.Imm32(0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Shader::Gcn
|
} // namespace Shader::Gcn
|
||||||
|
|
|
@ -9,11 +9,15 @@
|
||||||
|
|
||||||
namespace Shader::Gcn {
|
namespace Shader::Gcn {
|
||||||
|
|
||||||
|
std::array<bool, IR::NumScalarRegs> Translator::exec_contexts{};
|
||||||
|
|
||||||
Translator::Translator(IR::Block* block_, Info& info_)
|
Translator::Translator(IR::Block* block_, Info& info_)
|
||||||
: ir{*block_, block_->begin()}, info{info_} {}
|
: ir{*block_, block_->begin()}, info{info_} {}
|
||||||
|
|
||||||
void Translator::EmitPrologue() {
|
void Translator::EmitPrologue() {
|
||||||
|
exec_contexts.fill(false);
|
||||||
ir.Prologue();
|
ir.Prologue();
|
||||||
|
ir.SetExec(ir.Imm1(true));
|
||||||
|
|
||||||
// Initialize user data.
|
// Initialize user data.
|
||||||
IR::ScalarReg dst_sreg = IR::ScalarReg::S0;
|
IR::ScalarReg dst_sreg = IR::ScalarReg::S0;
|
||||||
|
@ -54,10 +58,16 @@ void Translator::EmitPrologue() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
IR::U32F32 Translator::GetSrc(const InstOperand& operand, bool force_flt) {
|
IR::U1U32F32 Translator::GetSrc(const InstOperand& operand, bool force_flt) {
|
||||||
IR::U32F32 value{};
|
// Input modifiers work on float values.
|
||||||
|
force_flt |= operand.input_modifier.abs | operand.input_modifier.neg;
|
||||||
|
|
||||||
|
IR::U1U32F32 value{};
|
||||||
switch (operand.field) {
|
switch (operand.field) {
|
||||||
case OperandField::ScalarGPR:
|
case OperandField::ScalarGPR:
|
||||||
|
if (exec_contexts[operand.code]) {
|
||||||
|
value = ir.GetThreadBitScalarReg(IR::ScalarReg(operand.code));
|
||||||
|
}
|
||||||
if (operand.type == ScalarType::Float32 || force_flt) {
|
if (operand.type == ScalarType::Float32 || force_flt) {
|
||||||
value = ir.GetScalarReg<IR::F32>(IR::ScalarReg(operand.code));
|
value = ir.GetScalarReg<IR::F32>(IR::ScalarReg(operand.code));
|
||||||
} else {
|
} else {
|
||||||
|
@ -114,9 +124,15 @@ IR::U32F32 Translator::GetSrc(const InstOperand& operand, bool force_flt) {
|
||||||
case OperandField::ConstFloatNeg_2_0:
|
case OperandField::ConstFloatNeg_2_0:
|
||||||
value = ir.Imm32(-2.0f);
|
value = ir.Imm32(-2.0f);
|
||||||
break;
|
break;
|
||||||
|
case OperandField::ExecLo:
|
||||||
|
value = ir.GetExec();
|
||||||
|
break;
|
||||||
case OperandField::VccLo:
|
case OperandField::VccLo:
|
||||||
value = ir.GetVccLo();
|
value = ir.GetVccLo();
|
||||||
break;
|
break;
|
||||||
|
case OperandField::VccHi:
|
||||||
|
value = ir.GetVccHi();
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -130,8 +146,8 @@ IR::U32F32 Translator::GetSrc(const InstOperand& operand, bool force_flt) {
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Translator::SetDst(const InstOperand& operand, const IR::U32F32& value) {
|
void Translator::SetDst(const InstOperand& operand, const IR::U1U32F32& value) {
|
||||||
IR::U32F32 result = value;
|
IR::U1U32F32 result = value;
|
||||||
if (operand.output_modifier.multiplier != 0.f) {
|
if (operand.output_modifier.multiplier != 0.f) {
|
||||||
result = ir.FPMul(result, ir.Imm32(operand.output_modifier.multiplier));
|
result = ir.FPMul(result, ir.Imm32(operand.output_modifier.multiplier));
|
||||||
}
|
}
|
||||||
|
@ -140,14 +156,20 @@ void Translator::SetDst(const InstOperand& operand, const IR::U32F32& value) {
|
||||||
}
|
}
|
||||||
switch (operand.field) {
|
switch (operand.field) {
|
||||||
case OperandField::ScalarGPR:
|
case OperandField::ScalarGPR:
|
||||||
|
if (value.Type() == IR::Type::U1) {
|
||||||
|
return ir.SetThreadBitScalarReg(IR::ScalarReg(operand.code), result);
|
||||||
|
}
|
||||||
return ir.SetScalarReg(IR::ScalarReg(operand.code), result);
|
return ir.SetScalarReg(IR::ScalarReg(operand.code), result);
|
||||||
case OperandField::VectorGPR:
|
case OperandField::VectorGPR:
|
||||||
return ir.SetVectorReg(IR::VectorReg(operand.code), result);
|
return ir.SetVectorReg(IR::VectorReg(operand.code), result);
|
||||||
|
case OperandField::ExecLo:
|
||||||
|
return ir.SetExec(result);
|
||||||
case OperandField::VccLo:
|
case OperandField::VccLo:
|
||||||
return ir.SetVccLo(result);
|
return ir.SetVccLo(result);
|
||||||
case OperandField::VccHi:
|
case OperandField::VccHi:
|
||||||
|
return ir.SetVccHi(result);
|
||||||
case OperandField::M0:
|
case OperandField::M0:
|
||||||
break; // Ignore for now
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -279,11 +301,32 @@ void Translate(IR::Block* block, std::span<const GcnInst> inst_list, Info& info)
|
||||||
case Opcode::IMAGE_SAMPLE:
|
case Opcode::IMAGE_SAMPLE:
|
||||||
translator.IMAGE_SAMPLE(inst);
|
translator.IMAGE_SAMPLE(inst);
|
||||||
break;
|
break;
|
||||||
case Opcode::V_CMP_EQ_U32:
|
case Opcode::V_CMP_EQ_I32:
|
||||||
translator.V_CMP_EQ_U32(inst);
|
translator.V_CMP_U32(ConditionOp::EQ, true, false, inst);
|
||||||
break;
|
break;
|
||||||
case Opcode::V_CMPX_GT_U32:
|
case Opcode::V_CMP_NE_U32:
|
||||||
translator.V_CMPX_GT_U32(inst);
|
translator.V_CMP_U32(ConditionOp::LG, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_EQ_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::EQ, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_F_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::F, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_LT_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::LT, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_GT_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::GT, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_GE_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::GE, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_TRU_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::TRU, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_NEQ_F32:
|
||||||
|
translator.V_CMP_F32(ConditionOp::LG, inst);
|
||||||
break;
|
break;
|
||||||
case Opcode::V_CMP_F_F32:
|
case Opcode::V_CMP_F_F32:
|
||||||
translator.V_CMP_F32(ConditionOp::F, inst);
|
translator.V_CMP_F32(ConditionOp::F, inst);
|
||||||
|
@ -309,6 +352,9 @@ void Translate(IR::Block* block, std::span<const GcnInst> inst_list, Info& info)
|
||||||
case Opcode::S_CMP_LG_U32:
|
case Opcode::S_CMP_LG_U32:
|
||||||
translator.S_CMP(ConditionOp::LG, false, inst);
|
translator.S_CMP(ConditionOp::LG, false, inst);
|
||||||
break;
|
break;
|
||||||
|
case Opcode::S_CMP_EQ_I32:
|
||||||
|
translator.S_CMP(ConditionOp::EQ, true, inst);
|
||||||
|
break;
|
||||||
case Opcode::V_CNDMASK_B32:
|
case Opcode::V_CNDMASK_B32:
|
||||||
translator.V_CNDMASK_B32(inst);
|
translator.V_CNDMASK_B32(inst);
|
||||||
break;
|
break;
|
||||||
|
@ -348,13 +394,125 @@ void Translate(IR::Block* block, std::span<const GcnInst> inst_list, Info& info)
|
||||||
case Opcode::V_MIN3_F32:
|
case Opcode::V_MIN3_F32:
|
||||||
translator.V_MIN3_F32(inst);
|
translator.V_MIN3_F32(inst);
|
||||||
break;
|
break;
|
||||||
case Opcode::S_NOP:
|
case Opcode::V_MADMK_F32:
|
||||||
|
translator.V_MADMK_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CUBEMA_F32:
|
||||||
|
translator.V_CUBEMA_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CUBESC_F32:
|
||||||
|
translator.V_CUBESC_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CUBETC_F32:
|
||||||
|
translator.V_CUBETC_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CUBEID_F32:
|
||||||
|
translator.V_CUBEID_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CVT_U32_F32:
|
||||||
|
translator.V_CVT_U32_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_SUBREV_F32:
|
||||||
|
translator.V_SUBREV_F32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_AND_SAVEEXEC_B64:
|
||||||
|
translator.S_AND_SAVEEXEC_B64(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_MOV_B64:
|
||||||
|
translator.S_MOV_B64(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_SUBREV_I32:
|
||||||
|
translator.V_SUBREV_I32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_LE_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::LE, false, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMP_GT_I32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::GT, true, false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_F_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::F, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_LT_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::LT, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_EQ_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::EQ, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_LE_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::LE, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_GT_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::GT, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_NE_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::LG, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_GE_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::GE, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_CMPX_TRU_U32:
|
||||||
|
translator.V_CMP_U32(ConditionOp::TRU, false, true, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_OR_B64:
|
||||||
|
translator.S_OR_B64(false, inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_NOR_B64:
|
||||||
|
translator.S_OR_B64(true, inst);
|
||||||
|
break;
|
||||||
case Opcode::S_AND_B64:
|
case Opcode::S_AND_B64:
|
||||||
|
translator.S_AND_B64(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_LSHRREV_B32:
|
||||||
|
translator.V_LSHRREV_B32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_ADD_I32:
|
||||||
|
translator.S_ADD_I32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_MUL_LO_I32:
|
||||||
|
translator.V_MUL_LO_I32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_SAD_U32:
|
||||||
|
translator.V_SAD_U32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_BFE_U32:
|
||||||
|
translator.V_BFE_U32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_MAD_I32_I24:
|
||||||
|
translator.V_MAD_I32_I24(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_MUL_I32_I24:
|
||||||
|
translator.V_MUL_I32_I24(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_SUB_I32:
|
||||||
|
translator.V_SUB_I32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_LSHR_B32:
|
||||||
|
translator.V_LSHR_B32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_ASHRREV_I32:
|
||||||
|
translator.V_ASHRREV_I32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::V_MAD_U32_U24:
|
||||||
|
translator.V_MAD_U32_U24(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_AND_B32:
|
||||||
|
translator.S_AND_B32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_LSHR_B32:
|
||||||
|
translator.S_LSHR_B32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_CSELECT_B32:
|
||||||
|
translator.S_CSELECT_B32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_BFE_U32:
|
||||||
|
translator.S_BFE_U32(inst);
|
||||||
|
break;
|
||||||
|
case Opcode::S_NOP:
|
||||||
case Opcode::S_CBRANCH_EXECZ:
|
case Opcode::S_CBRANCH_EXECZ:
|
||||||
case Opcode::S_CBRANCH_SCC0:
|
case Opcode::S_CBRANCH_SCC0:
|
||||||
case Opcode::S_CBRANCH_SCC1:
|
case Opcode::S_CBRANCH_SCC1:
|
||||||
case Opcode::S_BRANCH:
|
case Opcode::S_BRANCH:
|
||||||
case Opcode::S_MOV_B64:
|
|
||||||
case Opcode::S_WQM_B64:
|
case Opcode::S_WQM_B64:
|
||||||
case Opcode::V_INTERP_P1_F32:
|
case Opcode::V_INTERP_P1_F32:
|
||||||
case Opcode::S_ENDPGM:
|
case Opcode::S_ENDPGM:
|
||||||
|
|
|
@ -23,6 +23,7 @@ enum class ConditionOp : u32 {
|
||||||
GE,
|
GE,
|
||||||
LT,
|
LT,
|
||||||
LE,
|
LE,
|
||||||
|
TRU,
|
||||||
};
|
};
|
||||||
|
|
||||||
class Translator {
|
class Translator {
|
||||||
|
@ -37,6 +38,15 @@ public:
|
||||||
void S_MUL_I32(const GcnInst& inst);
|
void S_MUL_I32(const GcnInst& inst);
|
||||||
void S_CMP(ConditionOp cond, bool is_signed, const GcnInst& inst);
|
void S_CMP(ConditionOp cond, bool is_signed, const GcnInst& inst);
|
||||||
void S_ANDN2_B64(const GcnInst& inst);
|
void S_ANDN2_B64(const GcnInst& inst);
|
||||||
|
void S_AND_SAVEEXEC_B64(const GcnInst& inst);
|
||||||
|
void S_MOV_B64(const GcnInst& inst);
|
||||||
|
void S_OR_B64(bool negate, const GcnInst& inst);
|
||||||
|
void S_AND_B64(const GcnInst& inst);
|
||||||
|
void S_ADD_I32(const GcnInst& inst);
|
||||||
|
void S_AND_B32(const GcnInst& inst);
|
||||||
|
void S_LSHR_B32(const GcnInst& inst);
|
||||||
|
void S_CSELECT_B32(const GcnInst& inst);
|
||||||
|
void S_BFE_U32(const GcnInst& inst);
|
||||||
|
|
||||||
// Scalar Memory
|
// Scalar Memory
|
||||||
void S_LOAD_DWORD(int num_dwords, const GcnInst& inst);
|
void S_LOAD_DWORD(int num_dwords, const GcnInst& inst);
|
||||||
|
@ -48,7 +58,6 @@ public:
|
||||||
void V_MAC_F32(const GcnInst& inst);
|
void V_MAC_F32(const GcnInst& inst);
|
||||||
void V_CVT_PKRTZ_F16_F32(const GcnInst& inst);
|
void V_CVT_PKRTZ_F16_F32(const GcnInst& inst);
|
||||||
void V_MUL_F32(const GcnInst& inst);
|
void V_MUL_F32(const GcnInst& inst);
|
||||||
void V_CMP_EQ_U32(const GcnInst& inst);
|
|
||||||
void V_CNDMASK_B32(const GcnInst& inst);
|
void V_CNDMASK_B32(const GcnInst& inst);
|
||||||
void V_AND_B32(const GcnInst& inst);
|
void V_AND_B32(const GcnInst& inst);
|
||||||
void V_LSHLREV_B32(const GcnInst& inst);
|
void V_LSHLREV_B32(const GcnInst& inst);
|
||||||
|
@ -63,7 +72,6 @@ public:
|
||||||
void V_FLOOR_F32(const GcnInst& inst);
|
void V_FLOOR_F32(const GcnInst& inst);
|
||||||
void V_SUB_F32(const GcnInst& inst);
|
void V_SUB_F32(const GcnInst& inst);
|
||||||
void V_RCP_F32(const GcnInst& inst);
|
void V_RCP_F32(const GcnInst& inst);
|
||||||
void V_CMPX_GT_U32(const GcnInst& inst);
|
|
||||||
void V_FMA_F32(const GcnInst& inst);
|
void V_FMA_F32(const GcnInst& inst);
|
||||||
void V_CMP_F32(ConditionOp op, const GcnInst& inst);
|
void V_CMP_F32(ConditionOp op, const GcnInst& inst);
|
||||||
void V_MAX_F32(const GcnInst& inst);
|
void V_MAX_F32(const GcnInst& inst);
|
||||||
|
@ -74,6 +82,25 @@ public:
|
||||||
void V_SQRT_F32(const GcnInst& inst);
|
void V_SQRT_F32(const GcnInst& inst);
|
||||||
void V_MIN_F32(const GcnInst& inst);
|
void V_MIN_F32(const GcnInst& inst);
|
||||||
void V_MIN3_F32(const GcnInst& inst);
|
void V_MIN3_F32(const GcnInst& inst);
|
||||||
|
void V_MADMK_F32(const GcnInst& inst);
|
||||||
|
void V_CUBEMA_F32(const GcnInst& inst);
|
||||||
|
void V_CUBESC_F32(const GcnInst& inst);
|
||||||
|
void V_CUBETC_F32(const GcnInst& inst);
|
||||||
|
void V_CUBEID_F32(const GcnInst& inst);
|
||||||
|
void V_CVT_U32_F32(const GcnInst& inst);
|
||||||
|
void V_SUBREV_F32(const GcnInst& inst);
|
||||||
|
void V_SUBREV_I32(const GcnInst& inst);
|
||||||
|
void V_CMP_U32(ConditionOp op, bool is_signed, bool set_exec, const GcnInst& inst);
|
||||||
|
void V_LSHRREV_B32(const GcnInst& inst);
|
||||||
|
void V_MUL_LO_I32(const GcnInst& inst);
|
||||||
|
void V_SAD_U32(const GcnInst& inst);
|
||||||
|
void V_BFE_U32(const GcnInst& inst);
|
||||||
|
void V_MAD_I32_I24(const GcnInst& inst);
|
||||||
|
void V_MUL_I32_I24(const GcnInst& inst);
|
||||||
|
void V_SUB_I32(const GcnInst& inst);
|
||||||
|
void V_LSHR_B32(const GcnInst& inst);
|
||||||
|
void V_ASHRREV_I32(const GcnInst& inst);
|
||||||
|
void V_MAD_U32_U24(const GcnInst& inst);
|
||||||
|
|
||||||
// Vector Memory
|
// Vector Memory
|
||||||
void BUFFER_LOAD_FORMAT(u32 num_dwords, bool is_typed, const GcnInst& inst);
|
void BUFFER_LOAD_FORMAT(u32 num_dwords, bool is_typed, const GcnInst& inst);
|
||||||
|
@ -94,12 +121,13 @@ public:
|
||||||
void EXP(const GcnInst& inst);
|
void EXP(const GcnInst& inst);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
IR::U32F32 GetSrc(const InstOperand& operand, bool flt_zero = false);
|
IR::U1U32F32 GetSrc(const InstOperand& operand, bool flt_zero = false);
|
||||||
void SetDst(const InstOperand& operand, const IR::U32F32& value);
|
void SetDst(const InstOperand& operand, const IR::U1U32F32& value);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
IR::IREmitter ir;
|
IR::IREmitter ir;
|
||||||
Info& info;
|
Info& info;
|
||||||
|
static std::array<bool, IR::NumScalarRegs> exec_contexts;
|
||||||
};
|
};
|
||||||
|
|
||||||
void Translate(IR::Block* block, std::span<const GcnInst> inst_list, Info& info);
|
void Translate(IR::Block* block, std::span<const GcnInst> inst_list, Info& info);
|
||||||
|
|
|
@ -29,17 +29,6 @@ void Translator::V_MUL_F32(const GcnInst& inst) {
|
||||||
ir.SetVectorReg(dst_reg, ir.FPMul(GetSrc(inst.src[0], true), GetSrc(inst.src[1], true)));
|
ir.SetVectorReg(dst_reg, ir.FPMul(GetSrc(inst.src[0], true), GetSrc(inst.src[1], true)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Translator::V_CMP_EQ_U32(const GcnInst& inst) {
|
|
||||||
const IR::U1 result = ir.IEqual(GetSrc(inst.src[0]), GetSrc(inst.src[1]));
|
|
||||||
if (inst.dst[1].field == OperandField::VccLo) {
|
|
||||||
return ir.SetVcc(result);
|
|
||||||
} else if (inst.dst[1].field == OperandField::ScalarGPR) {
|
|
||||||
const IR::ScalarReg dst_reg{inst.dst[1].code};
|
|
||||||
return ir.SetScalarReg(dst_reg, IR::U32{ir.Select(result, ir.Imm32(1U), ir.Imm32(0U))});
|
|
||||||
}
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Translator::V_CNDMASK_B32(const GcnInst& inst) {
|
void Translator::V_CNDMASK_B32(const GcnInst& inst) {
|
||||||
const IR::VectorReg dst_reg{inst.dst[0].code};
|
const IR::VectorReg dst_reg{inst.dst[0].code};
|
||||||
const IR::ScalarReg flag_reg{inst.src[2].code};
|
const IR::ScalarReg flag_reg{inst.src[2].code};
|
||||||
|
@ -70,9 +59,9 @@ void Translator::V_AND_B32(const GcnInst& inst) {
|
||||||
|
|
||||||
void Translator::V_LSHLREV_B32(const GcnInst& inst) {
|
void Translator::V_LSHLREV_B32(const GcnInst& inst) {
|
||||||
const IR::U32 src0{GetSrc(inst.src[0])};
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
const IR::U32 src1{ir.GetVectorReg(IR::VectorReg(inst.src[1].code))};
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
const IR::VectorReg dst_reg{inst.dst[0].code};
|
const IR::VectorReg dst_reg{inst.dst[0].code};
|
||||||
ir.SetVectorReg(dst_reg, ir.ShiftLeftLogical(src1, src0));
|
ir.SetVectorReg(dst_reg, ir.ShiftLeftLogical(src1, ir.BitwiseAnd(src0, ir.Imm32(0x1F))));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Translator::V_ADD_I32(const GcnInst& inst) {
|
void Translator::V_ADD_I32(const GcnInst& inst) {
|
||||||
|
@ -148,14 +137,6 @@ void Translator::V_RCP_F32(const GcnInst& inst) {
|
||||||
SetDst(inst.dst[0], ir.FPRecip(src0));
|
SetDst(inst.dst[0], ir.FPRecip(src0));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Translator::V_CMPX_GT_U32(const GcnInst& inst) {
|
|
||||||
const IR::U32 src0{GetSrc(inst.src[0])};
|
|
||||||
const IR::U32 src1{GetSrc(inst.src[1])};
|
|
||||||
const IR::U1 result = ir.IGreaterThan(src0, src1, false);
|
|
||||||
ir.SetVcc(result);
|
|
||||||
ir.SetExec(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Translator::V_FMA_F32(const GcnInst& inst) {
|
void Translator::V_FMA_F32(const GcnInst& inst) {
|
||||||
const IR::F32 src0{GetSrc(inst.src[0], true)};
|
const IR::F32 src0{GetSrc(inst.src[0], true)};
|
||||||
const IR::F32 src1{GetSrc(inst.src[1], true)};
|
const IR::F32 src1{GetSrc(inst.src[1], true)};
|
||||||
|
@ -182,6 +163,8 @@ void Translator::V_CMP_F32(ConditionOp op, const GcnInst& inst) {
|
||||||
return ir.FPLessThanEqual(src0, src1);
|
return ir.FPLessThanEqual(src0, src1);
|
||||||
case ConditionOp::GE:
|
case ConditionOp::GE:
|
||||||
return ir.FPGreaterThanEqual(src0, src1);
|
return ir.FPGreaterThanEqual(src0, src1);
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}();
|
}();
|
||||||
ir.SetVcc(result);
|
ir.SetVcc(result);
|
||||||
|
@ -231,4 +214,147 @@ void Translator::V_MIN3_F32(const GcnInst& inst) {
|
||||||
SetDst(inst.dst[0], ir.FPMin(src0, ir.FPMin(src1, src2)));
|
SetDst(inst.dst[0], ir.FPMin(src0, ir.FPMin(src1, src2)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Translator::V_MADMK_F32(const GcnInst& inst) {
|
||||||
|
const IR::F32 src0{GetSrc(inst.src[0], true)};
|
||||||
|
const IR::F32 src1{GetSrc(inst.src[1], true)};
|
||||||
|
const IR::F32 k{GetSrc(inst.src[2], true)};
|
||||||
|
SetDst(inst.dst[0], ir.FPFma(src0, k, src1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_CUBEMA_F32(const GcnInst& inst) {
|
||||||
|
SetDst(inst.dst[0], ir.Imm32(1.f));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_CUBESC_F32(const GcnInst& inst) {
|
||||||
|
SetDst(inst.dst[0], GetSrc(inst.src[0], true));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_CUBETC_F32(const GcnInst& inst) {
|
||||||
|
SetDst(inst.dst[0], GetSrc(inst.src[1], true));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_CUBEID_F32(const GcnInst& inst) {
|
||||||
|
SetDst(inst.dst[0], GetSrc(inst.src[2], true));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_CVT_U32_F32(const GcnInst& inst) {
|
||||||
|
const IR::F32 src0{GetSrc(inst.src[0], true)};
|
||||||
|
SetDst(inst.dst[0], ir.ConvertFToU(32, src0));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_SUBREV_F32(const GcnInst& inst) {
|
||||||
|
const IR::F32 src0{GetSrc(inst.src[0], true)};
|
||||||
|
const IR::F32 src1{GetSrc(inst.src[1], true)};
|
||||||
|
SetDst(inst.dst[0], ir.FPSub(src1, src0));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_SUBREV_I32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.ISub(src1, src0));
|
||||||
|
// TODO: Carry-out
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_CMP_U32(ConditionOp op, bool is_signed, bool set_exec, const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
const IR::U1 result = [&] {
|
||||||
|
switch (op) {
|
||||||
|
case ConditionOp::F:
|
||||||
|
return ir.Imm1(false);
|
||||||
|
case ConditionOp::TRU:
|
||||||
|
return ir.Imm1(true);
|
||||||
|
case ConditionOp::EQ:
|
||||||
|
return ir.IEqual(src0, src1);
|
||||||
|
case ConditionOp::LG:
|
||||||
|
return ir.INotEqual(src0, src1);
|
||||||
|
case ConditionOp::GT:
|
||||||
|
return ir.IGreaterThan(src0, src1, is_signed);
|
||||||
|
case ConditionOp::LT:
|
||||||
|
return ir.ILessThan(src0, src1, is_signed);
|
||||||
|
case ConditionOp::LE:
|
||||||
|
return ir.ILessThanEqual(src0, src1, is_signed);
|
||||||
|
case ConditionOp::GE:
|
||||||
|
return ir.IGreaterThanEqual(src0, src1, is_signed);
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}();
|
||||||
|
if (set_exec) {
|
||||||
|
ir.SetExec(result);
|
||||||
|
}
|
||||||
|
switch (inst.dst[1].field) {
|
||||||
|
case OperandField::VccLo:
|
||||||
|
return ir.SetVcc(result);
|
||||||
|
case OperandField::ScalarGPR:
|
||||||
|
return ir.SetThreadBitScalarReg(IR::ScalarReg(inst.dst[0].code), result);
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_LSHRREV_B32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.ShiftRightLogical(src1, ir.BitwiseAnd(src0, ir.Imm32(0x1F))));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_MUL_LO_I32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.IMul(src0, src1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_SAD_U32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
const IR::U32 src2{GetSrc(inst.src[2])};
|
||||||
|
const IR::U32 max{ir.IMax(src0, src1, false)};
|
||||||
|
const IR::U32 min{ir.IMin(src0, src1, false)};
|
||||||
|
SetDst(inst.dst[0], ir.IAdd(ir.ISub(max, min), src2));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_BFE_U32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{ir.BitwiseAnd(GetSrc(inst.src[1]), ir.Imm32(0x1F))};
|
||||||
|
const IR::U32 src2{ir.BitwiseAnd(GetSrc(inst.src[2]), ir.Imm32(0x1F))};
|
||||||
|
SetDst(inst.dst[0], ir.BitFieldExtract(src0, src1, src2));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_MAD_I32_I24(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{ir.BitFieldExtract(GetSrc(inst.src[0]), ir.Imm32(0), ir.Imm32(24), true)};
|
||||||
|
const IR::U32 src1{ir.BitFieldExtract(GetSrc(inst.src[1]), ir.Imm32(0), ir.Imm32(24), true)};
|
||||||
|
const IR::U32 src2{GetSrc(inst.src[2])};
|
||||||
|
SetDst(inst.dst[0], ir.IAdd(ir.IMul(src0, src1), src2));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_MUL_I32_I24(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{ir.BitFieldExtract(GetSrc(inst.src[0]), ir.Imm32(0), ir.Imm32(24), true)};
|
||||||
|
const IR::U32 src1{ir.BitFieldExtract(GetSrc(inst.src[1]), ir.Imm32(0), ir.Imm32(24), true)};
|
||||||
|
SetDst(inst.dst[0], ir.IMul(src0, src1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_SUB_I32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.ISub(src0, src1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_LSHR_B32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.ShiftRightLogical(src0, ir.BitwiseAnd(src1, ir.Imm32(0x1F))));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_ASHRREV_I32(const GcnInst& inst) {
|
||||||
|
const IR::U32 src0{GetSrc(inst.src[0])};
|
||||||
|
const IR::U32 src1{GetSrc(inst.src[1])};
|
||||||
|
SetDst(inst.dst[0], ir.ShiftRightArithmetic(src1, ir.BitwiseAnd(src0, ir.Imm32(0x1F))));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Translator::V_MAD_U32_U24(const GcnInst& inst) {
|
||||||
|
// TODO:
|
||||||
|
V_MAD_I32_I24(inst);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Shader::Gcn
|
} // namespace Shader::Gcn
|
||||||
|
|
|
@ -8,7 +8,6 @@ namespace Shader::Gcn {
|
||||||
void Translator::V_INTERP_P2_F32(const GcnInst& inst) {
|
void Translator::V_INTERP_P2_F32(const GcnInst& inst) {
|
||||||
const IR::VectorReg dst_reg{inst.dst[0].code};
|
const IR::VectorReg dst_reg{inst.dst[0].code};
|
||||||
auto& attr = info.ps_inputs.at(inst.control.vintrp.attr);
|
auto& attr = info.ps_inputs.at(inst.control.vintrp.attr);
|
||||||
attr.semantic = inst.control.vintrp.attr;
|
|
||||||
const IR::Attribute attrib{IR::Attribute::Param0 + attr.param_index};
|
const IR::Attribute attrib{IR::Attribute::Param0 + attr.param_index};
|
||||||
ir.SetVectorReg(dst_reg, ir.GetAttribute(attrib, inst.control.vintrp.chan));
|
ir.SetVectorReg(dst_reg, ir.GetAttribute(attrib, inst.control.vintrp.chan));
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,6 +119,14 @@ U32 IREmitter::GetUserData(IR::ScalarReg reg) {
|
||||||
return Inst<U32>(Opcode::GetUserData, reg);
|
return Inst<U32>(Opcode::GetUserData, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
U1 IREmitter::GetThreadBitScalarReg(IR::ScalarReg reg) {
|
||||||
|
return Inst<U1>(Opcode::GetThreadBitScalarReg, reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void IREmitter::SetThreadBitScalarReg(IR::ScalarReg reg, const U1& value) {
|
||||||
|
Inst(Opcode::SetThreadBitScalarReg, reg, value);
|
||||||
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
U32 IREmitter::GetScalarReg(IR::ScalarReg reg) {
|
U32 IREmitter::GetScalarReg(IR::ScalarReg reg) {
|
||||||
return Inst<U32>(Opcode::GetScalarRegister, reg);
|
return Inst<U32>(Opcode::GetScalarRegister, reg);
|
||||||
|
@ -196,6 +204,10 @@ U32 IREmitter::GetVccLo() {
|
||||||
return Inst<U32>(Opcode::GetVccLo);
|
return Inst<U32>(Opcode::GetVccLo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
U32 IREmitter::GetVccHi() {
|
||||||
|
return Inst<U32>(Opcode::GetVccHi);
|
||||||
|
}
|
||||||
|
|
||||||
void IREmitter::SetScc(const U1& value) {
|
void IREmitter::SetScc(const U1& value) {
|
||||||
Inst(Opcode::SetScc, value);
|
Inst(Opcode::SetScc, value);
|
||||||
}
|
}
|
||||||
|
@ -212,6 +224,10 @@ void IREmitter::SetVccLo(const U32& value) {
|
||||||
Inst(Opcode::SetVccLo, value);
|
Inst(Opcode::SetVccLo, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IREmitter::SetVccHi(const U32& value) {
|
||||||
|
Inst(Opcode::SetVccHi, value);
|
||||||
|
}
|
||||||
|
|
||||||
F32 IREmitter::GetAttribute(IR::Attribute attribute, u32 comp) {
|
F32 IREmitter::GetAttribute(IR::Attribute attribute, u32 comp) {
|
||||||
return Inst<F32>(Opcode::GetAttribute, attribute, Imm32(comp));
|
return Inst<F32>(Opcode::GetAttribute, attribute, Imm32(comp));
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,9 @@ public:
|
||||||
void Epilogue();
|
void Epilogue();
|
||||||
void Discard();
|
void Discard();
|
||||||
|
|
||||||
U32 GetUserData(IR::ScalarReg reg);
|
[[nodiscard]] U32 GetUserData(IR::ScalarReg reg);
|
||||||
|
[[nodiscard]] U1 GetThreadBitScalarReg(IR::ScalarReg reg);
|
||||||
|
void SetThreadBitScalarReg(IR::ScalarReg reg, const U1& value);
|
||||||
|
|
||||||
template <typename T = U32>
|
template <typename T = U32>
|
||||||
[[nodiscard]] T GetScalarReg(IR::ScalarReg reg);
|
[[nodiscard]] T GetScalarReg(IR::ScalarReg reg);
|
||||||
|
@ -59,10 +61,12 @@ public:
|
||||||
[[nodiscard]] U1 GetExec();
|
[[nodiscard]] U1 GetExec();
|
||||||
[[nodiscard]] U1 GetVcc();
|
[[nodiscard]] U1 GetVcc();
|
||||||
[[nodiscard]] U32 GetVccLo();
|
[[nodiscard]] U32 GetVccLo();
|
||||||
|
[[nodiscard]] U32 GetVccHi();
|
||||||
void SetScc(const U1& value);
|
void SetScc(const U1& value);
|
||||||
void SetExec(const U1& value);
|
void SetExec(const U1& value);
|
||||||
void SetVcc(const U1& value);
|
void SetVcc(const U1& value);
|
||||||
void SetVccLo(const U32& value);
|
void SetVccLo(const U32& value);
|
||||||
|
void SetVccHi(const U32& value);
|
||||||
|
|
||||||
[[nodiscard]] U1 Condition(IR::Condition cond);
|
[[nodiscard]] U1 Condition(IR::Condition cond);
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,8 @@ OPCODE(ReadConstBufferU32, U32, Opaq
|
||||||
|
|
||||||
// Context getters/setters
|
// Context getters/setters
|
||||||
OPCODE(GetUserData, U32, ScalarReg, )
|
OPCODE(GetUserData, U32, ScalarReg, )
|
||||||
|
OPCODE(GetThreadBitScalarReg, U1, ScalarReg, )
|
||||||
|
OPCODE(SetThreadBitScalarReg, Void, ScalarReg, U1, )
|
||||||
OPCODE(GetScalarRegister, U32, ScalarReg, )
|
OPCODE(GetScalarRegister, U32, ScalarReg, )
|
||||||
OPCODE(SetScalarRegister, Void, ScalarReg, U32, )
|
OPCODE(SetScalarRegister, Void, ScalarReg, U32, )
|
||||||
OPCODE(GetVectorRegister, U32, VectorReg, )
|
OPCODE(GetVectorRegister, U32, VectorReg, )
|
||||||
|
@ -36,10 +38,12 @@ OPCODE(GetScc, U1, Void,
|
||||||
OPCODE(GetExec, U1, Void, )
|
OPCODE(GetExec, U1, Void, )
|
||||||
OPCODE(GetVcc, U1, Void, )
|
OPCODE(GetVcc, U1, Void, )
|
||||||
OPCODE(GetVccLo, U32, Void, )
|
OPCODE(GetVccLo, U32, Void, )
|
||||||
|
OPCODE(GetVccHi, U32, Void, )
|
||||||
OPCODE(SetScc, Void, U1, )
|
OPCODE(SetScc, Void, U1, )
|
||||||
OPCODE(SetExec, Void, U1, )
|
OPCODE(SetExec, Void, U1, )
|
||||||
OPCODE(SetVcc, Void, U1, )
|
OPCODE(SetVcc, Void, U1, )
|
||||||
OPCODE(SetVccLo, Void, U32, )
|
OPCODE(SetVccLo, Void, U32, )
|
||||||
|
OPCODE(SetVccHi, Void, U32, )
|
||||||
|
|
||||||
// Undefined
|
// Undefined
|
||||||
OPCODE(UndefU1, U1, )
|
OPCODE(UndefU1, U1, )
|
||||||
|
|
|
@ -206,9 +206,12 @@ void PatchBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
|
||||||
const u32 dword_offset = inst_info.inst_offset.Value() / sizeof(u32);
|
const u32 dword_offset = inst_info.inst_offset.Value() / sizeof(u32);
|
||||||
IR::U32 address = ir.Imm32(dword_offset);
|
IR::U32 address = ir.Imm32(dword_offset);
|
||||||
if (inst_info.index_enable && inst_info.offset_enable) {
|
if (inst_info.index_enable && inst_info.offset_enable) {
|
||||||
UNREACHABLE();
|
const IR::U32 offset{ir.CompositeExtract(inst.Arg(1), 0)};
|
||||||
|
const IR::U32 index{ir.CompositeExtract(inst.Arg(1), 1)};
|
||||||
|
address = ir.IAdd(ir.IMul(index, ir.Imm32(dword_stride)), address);
|
||||||
|
address = ir.IAdd(address, ir.ShiftRightLogical(offset, ir.Imm32(2)));
|
||||||
} else if (inst_info.index_enable) {
|
} else if (inst_info.index_enable) {
|
||||||
IR::U32 index{inst.Arg(1)};
|
const IR::U32 index{inst.Arg(1)};
|
||||||
address = ir.IAdd(ir.IMul(index, ir.Imm32(dword_stride)), address);
|
address = ir.IAdd(ir.IMul(index, ir.Imm32(dword_stride)), address);
|
||||||
} else if (inst_info.offset_enable) {
|
} else if (inst_info.offset_enable) {
|
||||||
const IR::U32 offset{inst.Arg(1)};
|
const IR::U32 offset{inst.Arg(1)};
|
||||||
|
@ -216,6 +219,17 @@ void PatchBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
|
||||||
inst.SetArg(1, address);
|
inst.SetArg(1, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IR::Value PatchCubeCoord(IR::IREmitter& ir, const IR::Value& s, const IR::Value& t,
|
||||||
|
const IR::Value& z) {
|
||||||
|
// We need to fix x and y coordinate,
|
||||||
|
// because the s and t coordinate will be scaled and plus 1.5 by v_madak_f32.
|
||||||
|
// We already force the scale value to be 1.0 when handling v_cubema_f32,
|
||||||
|
// here we subtract 1.5 to recover the original value.
|
||||||
|
const IR::Value x = ir.FPSub(IR::F32{s}, ir.Imm32(1.5f));
|
||||||
|
const IR::Value y = ir.FPSub(IR::F32{t}, ir.Imm32(1.5f));
|
||||||
|
return ir.CompositeConstruct(x, y, z);
|
||||||
|
}
|
||||||
|
|
||||||
void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descriptors& descriptors) {
|
void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descriptors& descriptors) {
|
||||||
IR::Inst* producer = inst.Arg(0).InstRecursive();
|
IR::Inst* producer = inst.Arg(0).InstRecursive();
|
||||||
ASSERT(producer->GetOpcode() == IR::Opcode::CompositeConstructU32x2);
|
ASSERT(producer->GetOpcode() == IR::Opcode::CompositeConstructU32x2);
|
||||||
|
@ -256,8 +270,9 @@ void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descrip
|
||||||
return {ir.CompositeConstruct(body->Arg(0), body->Arg(1)), body->Arg(2)};
|
return {ir.CompositeConstruct(body->Arg(0), body->Arg(1)), body->Arg(2)};
|
||||||
case AmdGpu::ImageType::Color2DArray:
|
case AmdGpu::ImageType::Color2DArray:
|
||||||
case AmdGpu::ImageType::Color3D:
|
case AmdGpu::ImageType::Color3D:
|
||||||
case AmdGpu::ImageType::Cube:
|
|
||||||
return {ir.CompositeConstruct(body->Arg(0), body->Arg(1), body->Arg(2)), body->Arg(3)};
|
return {ir.CompositeConstruct(body->Arg(0), body->Arg(1), body->Arg(2)), body->Arg(3)};
|
||||||
|
case AmdGpu::ImageType::Cube:
|
||||||
|
return {PatchCubeCoord(ir, body->Arg(0), body->Arg(1), body->Arg(2)), body->Arg(3)};
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -276,6 +291,7 @@ void ResourceTrackingPass(IR::Program& program) {
|
||||||
// Most of the time it is float so that is the default. This pass detects float buffer loads
|
// Most of the time it is float so that is the default. This pass detects float buffer loads
|
||||||
// combined with bitcasts and patches them to be integer loads.
|
// combined with bitcasts and patches them to be integer loads.
|
||||||
for (IR::Block* const block : program.post_order_blocks) {
|
for (IR::Block* const block : program.post_order_blocks) {
|
||||||
|
break;
|
||||||
for (IR::Inst& inst : block->Instructions()) {
|
for (IR::Inst& inst : block->Instructions()) {
|
||||||
if (inst.GetOpcode() != IR::Opcode::BitCastU32F32) {
|
if (inst.GetOpcode() != IR::Opcode::BitCastU32F32) {
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -32,6 +32,7 @@ struct SccFlagTag : FlagTag {};
|
||||||
struct ExecFlagTag : FlagTag {};
|
struct ExecFlagTag : FlagTag {};
|
||||||
struct VccFlagTag : FlagTag {};
|
struct VccFlagTag : FlagTag {};
|
||||||
struct VccLoTag : FlagTag {};
|
struct VccLoTag : FlagTag {};
|
||||||
|
struct VccHiTag : FlagTag {};
|
||||||
|
|
||||||
struct GotoVariable : FlagTag {
|
struct GotoVariable : FlagTag {
|
||||||
GotoVariable() = default;
|
GotoVariable() = default;
|
||||||
|
@ -43,7 +44,7 @@ struct GotoVariable : FlagTag {
|
||||||
};
|
};
|
||||||
|
|
||||||
using Variant = std::variant<IR::ScalarReg, IR::VectorReg, GotoVariable, SccFlagTag, ExecFlagTag,
|
using Variant = std::variant<IR::ScalarReg, IR::VectorReg, GotoVariable, SccFlagTag, ExecFlagTag,
|
||||||
VccFlagTag, VccLoTag>;
|
VccFlagTag, VccLoTag, VccHiTag>;
|
||||||
using ValueMap = std::unordered_map<IR::Block*, IR::Value>;
|
using ValueMap = std::unordered_map<IR::Block*, IR::Value>;
|
||||||
|
|
||||||
struct DefTable {
|
struct DefTable {
|
||||||
|
@ -89,6 +90,13 @@ struct DefTable {
|
||||||
vcc_lo_flag.insert_or_assign(block, value);
|
vcc_lo_flag.insert_or_assign(block, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const IR::Value& Def(IR::Block* block, VccHiTag) {
|
||||||
|
return vcc_hi_flag[block];
|
||||||
|
}
|
||||||
|
void SetDef(IR::Block* block, VccHiTag, const IR::Value& value) {
|
||||||
|
vcc_hi_flag.insert_or_assign(block, value);
|
||||||
|
}
|
||||||
|
|
||||||
const IR::Value& Def(IR::Block* block, VccFlagTag) {
|
const IR::Value& Def(IR::Block* block, VccFlagTag) {
|
||||||
return vcc_flag[block];
|
return vcc_flag[block];
|
||||||
}
|
}
|
||||||
|
@ -101,6 +109,7 @@ struct DefTable {
|
||||||
ValueMap exec_flag;
|
ValueMap exec_flag;
|
||||||
ValueMap vcc_flag;
|
ValueMap vcc_flag;
|
||||||
ValueMap vcc_lo_flag;
|
ValueMap vcc_lo_flag;
|
||||||
|
ValueMap vcc_hi_flag;
|
||||||
};
|
};
|
||||||
|
|
||||||
IR::Opcode UndefOpcode(IR::ScalarReg) noexcept {
|
IR::Opcode UndefOpcode(IR::ScalarReg) noexcept {
|
||||||
|
@ -111,6 +120,14 @@ IR::Opcode UndefOpcode(IR::VectorReg) noexcept {
|
||||||
return IR::Opcode::UndefU32;
|
return IR::Opcode::UndefU32;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IR::Opcode UndefOpcode(const VccLoTag&) noexcept {
|
||||||
|
return IR::Opcode::UndefU32;
|
||||||
|
}
|
||||||
|
|
||||||
|
IR::Opcode UndefOpcode(const VccHiTag&) noexcept {
|
||||||
|
return IR::Opcode::UndefU32;
|
||||||
|
}
|
||||||
|
|
||||||
IR::Opcode UndefOpcode(const FlagTag&) noexcept {
|
IR::Opcode UndefOpcode(const FlagTag&) noexcept {
|
||||||
return IR::Opcode::UndefU1;
|
return IR::Opcode::UndefU1;
|
||||||
}
|
}
|
||||||
|
@ -281,6 +298,7 @@ private:
|
||||||
void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
|
void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
|
||||||
const IR::Opcode opcode{inst.GetOpcode()};
|
const IR::Opcode opcode{inst.GetOpcode()};
|
||||||
switch (opcode) {
|
switch (opcode) {
|
||||||
|
case IR::Opcode::SetThreadBitScalarReg:
|
||||||
case IR::Opcode::SetScalarRegister: {
|
case IR::Opcode::SetScalarRegister: {
|
||||||
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
|
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
|
||||||
pass.WriteVariable(reg, block, inst.Arg(1));
|
pass.WriteVariable(reg, block, inst.Arg(1));
|
||||||
|
@ -306,6 +324,10 @@ void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
|
||||||
case IR::Opcode::SetVccLo:
|
case IR::Opcode::SetVccLo:
|
||||||
pass.WriteVariable(VccLoTag{}, block, inst.Arg(0));
|
pass.WriteVariable(VccLoTag{}, block, inst.Arg(0));
|
||||||
break;
|
break;
|
||||||
|
case IR::Opcode::SetVccHi:
|
||||||
|
pass.WriteVariable(VccHiTag{}, block, inst.Arg(0));
|
||||||
|
break;
|
||||||
|
case IR::Opcode::GetThreadBitScalarReg:
|
||||||
case IR::Opcode::GetScalarRegister: {
|
case IR::Opcode::GetScalarRegister: {
|
||||||
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
|
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
|
||||||
inst.ReplaceUsesWith(pass.ReadVariable(reg, block));
|
inst.ReplaceUsesWith(pass.ReadVariable(reg, block));
|
||||||
|
@ -331,6 +353,9 @@ void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
|
||||||
case IR::Opcode::GetVccLo:
|
case IR::Opcode::GetVccLo:
|
||||||
inst.ReplaceUsesWith(pass.ReadVariable(VccLoTag{}, block));
|
inst.ReplaceUsesWith(pass.ReadVariable(VccLoTag{}, block));
|
||||||
break;
|
break;
|
||||||
|
case IR::Opcode::GetVccHi:
|
||||||
|
inst.ReplaceUsesWith(pass.ReadVariable(VccHiTag{}, block));
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -219,6 +219,7 @@ using U64 = TypedValue<Type::U64>;
|
||||||
using F16 = TypedValue<Type::F16>;
|
using F16 = TypedValue<Type::F16>;
|
||||||
using F32 = TypedValue<Type::F32>;
|
using F32 = TypedValue<Type::F32>;
|
||||||
using F64 = TypedValue<Type::F64>;
|
using F64 = TypedValue<Type::F64>;
|
||||||
|
using U1U32F32 = TypedValue<Type::U1 | Type::U32 | Type::F32>;
|
||||||
using U32F32 = TypedValue<Type::U32 | Type::F32>;
|
using U32F32 = TypedValue<Type::U32 | Type::F32>;
|
||||||
using U32U64 = TypedValue<Type::U32 | Type::U64>;
|
using U32U64 = TypedValue<Type::U32 | Type::U64>;
|
||||||
using F32F64 = TypedValue<Type::F32 | Type::F64>;
|
using F32F64 = TypedValue<Type::F32 | Type::F64>;
|
||||||
|
|
|
@ -61,7 +61,7 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
|
||||||
Shader::Optimization::DeadCodeEliminationPass(program.blocks);
|
Shader::Optimization::DeadCodeEliminationPass(program.blocks);
|
||||||
Shader::Optimization::CollectShaderInfoPass(program);
|
Shader::Optimization::CollectShaderInfoPass(program);
|
||||||
|
|
||||||
fmt::print("{}\n", Shader::IR::DumpProgram(program));
|
fmt::print("Post passes\n\n{}\n", Shader::IR::DumpProgram(program));
|
||||||
std::fflush(stdout);
|
std::fflush(stdout);
|
||||||
|
|
||||||
return program;
|
return program;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <span>
|
#include <span>
|
||||||
|
#include <vector>
|
||||||
#include <boost/container/static_vector.hpp>
|
#include <boost/container/static_vector.hpp>
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/types.h"
|
#include "common/types.h"
|
||||||
|
@ -81,7 +82,6 @@ struct Info {
|
||||||
|
|
||||||
struct PsInput {
|
struct PsInput {
|
||||||
u32 param_index;
|
u32 param_index;
|
||||||
u32 semantic;
|
|
||||||
bool is_default;
|
bool is_default;
|
||||||
bool is_flat;
|
bool is_flat;
|
||||||
u32 default_value;
|
u32 default_value;
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/io_file.h"
|
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "video_core/amdgpu/liverpool.h"
|
#include "video_core/amdgpu/liverpool.h"
|
||||||
#include "video_core/amdgpu/pm4_cmds.h"
|
#include "video_core/amdgpu/pm4_cmds.h"
|
||||||
|
|
|
@ -374,10 +374,16 @@ struct Liverpool {
|
||||||
FrontAndBack = 3,
|
FrontAndBack = 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class FrontFace : u32 {
|
||||||
|
CounterClockwise = 0,
|
||||||
|
Clockwise = 1,
|
||||||
|
};
|
||||||
|
|
||||||
union PolygonControl {
|
union PolygonControl {
|
||||||
u32 raw;
|
u32 raw;
|
||||||
BitField<0, 1, u32> cull_front;
|
BitField<0, 1, u32> cull_front;
|
||||||
BitField<1, 1, u32> cull_back;
|
BitField<1, 1, u32> cull_back;
|
||||||
|
BitField<2, 1, FrontFace> front_face;
|
||||||
BitField<3, 2, u32> enable_polygon_mode;
|
BitField<3, 2, u32> enable_polygon_mode;
|
||||||
BitField<5, 3, PolygonMode> polygon_mode_front;
|
BitField<5, 3, PolygonMode> polygon_mode_front;
|
||||||
BitField<8, 3, PolygonMode> polygon_mode_back;
|
BitField<8, 3, PolygonMode> polygon_mode_back;
|
||||||
|
|
|
@ -110,11 +110,29 @@ struct Image {
|
||||||
BitField<59, 1, u64> atc;
|
BitField<59, 1, u64> atc;
|
||||||
BitField<60, 4, ImageType> type;
|
BitField<60, 4, ImageType> type;
|
||||||
};
|
};
|
||||||
|
union {
|
||||||
|
BitField<0, 13, u64> depth;
|
||||||
|
BitField<13, 14, u64> pitch;
|
||||||
|
BitField<32, 13, u64> base_array;
|
||||||
|
BitField<45, 13, u64> last_array;
|
||||||
|
};
|
||||||
|
|
||||||
VAddr Address() const {
|
VAddr Address() const {
|
||||||
return base_address << 8;
|
return base_address << 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u32 Pitch() const {
|
||||||
|
return pitch;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 NumLayers() const {
|
||||||
|
return last_array - base_array + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 NumLevels() const {
|
||||||
|
return last_level + 1;
|
||||||
|
}
|
||||||
|
|
||||||
DataFormat GetDataFmt() const noexcept {
|
DataFormat GetDataFmt() const noexcept {
|
||||||
return static_cast<DataFormat>(data_format.Value());
|
return static_cast<DataFormat>(data_format.Value());
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,7 +287,7 @@ vk::Format SurfaceFormat(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat nu
|
||||||
}
|
}
|
||||||
if (data_format == AmdGpu::DataFormat::Format8_8_8_8 &&
|
if (data_format == AmdGpu::DataFormat::Format8_8_8_8 &&
|
||||||
num_format == AmdGpu::NumberFormat::Srgb) {
|
num_format == AmdGpu::NumberFormat::Srgb) {
|
||||||
return vk::Format::eR8G8B8A8Srgb;
|
return vk::Format::eB8G8R8A8Srgb;
|
||||||
}
|
}
|
||||||
if (data_format == AmdGpu::DataFormat::Format32_32_32 &&
|
if (data_format == AmdGpu::DataFormat::Format32_32_32 &&
|
||||||
num_format == AmdGpu::NumberFormat::Float) {
|
num_format == AmdGpu::NumberFormat::Float) {
|
||||||
|
@ -304,6 +304,9 @@ vk::Format SurfaceFormat(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat nu
|
||||||
if (data_format == AmdGpu::DataFormat::Format8 && num_format == AmdGpu::NumberFormat::Unorm) {
|
if (data_format == AmdGpu::DataFormat::Format8 && num_format == AmdGpu::NumberFormat::Unorm) {
|
||||||
return vk::Format::eR8Unorm;
|
return vk::Format::eR8Unorm;
|
||||||
}
|
}
|
||||||
|
if (data_format == AmdGpu::DataFormat::FormatBc3 && num_format == AmdGpu::NumberFormat::Srgb) {
|
||||||
|
return vk::Format::eBc3SrgbBlock;
|
||||||
|
}
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,8 +75,10 @@ GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& schedul
|
||||||
.depthClampEnable = false,
|
.depthClampEnable = false,
|
||||||
.rasterizerDiscardEnable = false,
|
.rasterizerDiscardEnable = false,
|
||||||
.polygonMode = LiverpoolToVK::PolygonMode(key.polygon_mode),
|
.polygonMode = LiverpoolToVK::PolygonMode(key.polygon_mode),
|
||||||
.cullMode = LiverpoolToVK::CullMode(key.cull_mode),
|
.cullMode = vk::CullModeFlagBits::eNone, /*LiverpoolToVK::CullMode(key.cull_mode),*/
|
||||||
.frontFace = vk::FrontFace::eClockwise,
|
.frontFace = key.front_face == Liverpool::FrontFace::Clockwise
|
||||||
|
? vk::FrontFace::eClockwise
|
||||||
|
: vk::FrontFace::eCounterClockwise,
|
||||||
.depthBiasEnable = false,
|
.depthBiasEnable = false,
|
||||||
.lineWidth = 1.0f,
|
.lineWidth = 1.0f,
|
||||||
};
|
};
|
||||||
|
@ -177,14 +179,23 @@ GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& schedul
|
||||||
std::array<vk::PipelineColorBlendAttachmentState, Liverpool::NumColorBuffers> attachments;
|
std::array<vk::PipelineColorBlendAttachmentState, Liverpool::NumColorBuffers> attachments;
|
||||||
for (u32 i = 0; i < num_color_formats; i++) {
|
for (u32 i = 0; i < num_color_formats; i++) {
|
||||||
const auto& control = key.blend_controls[i];
|
const auto& control = key.blend_controls[i];
|
||||||
|
const auto src_color = LiverpoolToVK::BlendFactor(control.color_src_factor);
|
||||||
|
const auto dst_color = LiverpoolToVK::BlendFactor(control.color_dst_factor);
|
||||||
|
const auto color_blend = LiverpoolToVK::BlendOp(control.color_func);
|
||||||
attachments[i] = vk::PipelineColorBlendAttachmentState{
|
attachments[i] = vk::PipelineColorBlendAttachmentState{
|
||||||
.blendEnable = key.blend_controls[i].enable,
|
.blendEnable = key.blend_controls[i].enable,
|
||||||
.srcColorBlendFactor = LiverpoolToVK::BlendFactor(control.color_src_factor),
|
.srcColorBlendFactor = src_color,
|
||||||
.dstColorBlendFactor = LiverpoolToVK::BlendFactor(control.color_dst_factor),
|
.dstColorBlendFactor = dst_color,
|
||||||
.colorBlendOp = LiverpoolToVK::BlendOp(control.color_func),
|
.colorBlendOp = color_blend,
|
||||||
.srcAlphaBlendFactor = LiverpoolToVK::BlendFactor(control.alpha_src_factor),
|
.srcAlphaBlendFactor = control.separate_alpha_blend
|
||||||
.dstAlphaBlendFactor = LiverpoolToVK::BlendFactor(control.color_dst_factor),
|
? LiverpoolToVK::BlendFactor(control.alpha_src_factor)
|
||||||
.alphaBlendOp = LiverpoolToVK::BlendOp(control.alpha_func),
|
: src_color,
|
||||||
|
.dstAlphaBlendFactor = control.separate_alpha_blend
|
||||||
|
? LiverpoolToVK::BlendFactor(control.alpha_dst_factor)
|
||||||
|
: dst_color,
|
||||||
|
.alphaBlendOp = control.separate_alpha_blend
|
||||||
|
? LiverpoolToVK::BlendOp(control.alpha_func)
|
||||||
|
: color_blend,
|
||||||
.colorWriteMask =
|
.colorWriteMask =
|
||||||
instance.IsColorWriteEnableSupported()
|
instance.IsColorWriteEnableSupported()
|
||||||
? vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
|
? vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
|
||||||
|
|
|
@ -38,6 +38,8 @@ struct GraphicsPipelineKey {
|
||||||
Liverpool::PrimitiveType prim_type;
|
Liverpool::PrimitiveType prim_type;
|
||||||
Liverpool::PolygonMode polygon_mode;
|
Liverpool::PolygonMode polygon_mode;
|
||||||
Liverpool::CullMode cull_mode;
|
Liverpool::CullMode cull_mode;
|
||||||
|
Liverpool::FrontFace front_face;
|
||||||
|
u32 pad{};
|
||||||
std::array<Liverpool::BlendControl, Liverpool::NumColorBuffers> blend_controls;
|
std::array<Liverpool::BlendControl, Liverpool::NumColorBuffers> blend_controls;
|
||||||
std::array<vk::ColorComponentFlags, Liverpool::NumColorBuffers> write_masks;
|
std::array<vk::ColorComponentFlags, Liverpool::NumColorBuffers> write_masks;
|
||||||
|
|
||||||
|
|
|
@ -207,6 +207,7 @@ bool Instance::CreateDevice() {
|
||||||
.shaderDrawParameters = true,
|
.shaderDrawParameters = true,
|
||||||
},
|
},
|
||||||
vk::PhysicalDeviceVulkan12Features{
|
vk::PhysicalDeviceVulkan12Features{
|
||||||
|
.scalarBlockLayout = true,
|
||||||
.timelineSemaphore = true,
|
.timelineSemaphore = true,
|
||||||
},
|
},
|
||||||
vk::PhysicalDeviceVulkan13Features{
|
vk::PhysicalDeviceVulkan13Features{
|
||||||
|
|
|
@ -94,6 +94,7 @@ void PipelineCache::RefreshGraphicsKey() {
|
||||||
key.prim_type = regs.primitive_type;
|
key.prim_type = regs.primitive_type;
|
||||||
key.polygon_mode = regs.polygon_control.PolyMode();
|
key.polygon_mode = regs.polygon_control.PolyMode();
|
||||||
key.cull_mode = regs.polygon_control.CullingMode();
|
key.cull_mode = regs.polygon_control.CullingMode();
|
||||||
|
key.front_face = regs.polygon_control.front_face;
|
||||||
|
|
||||||
const auto& db = regs.depth_buffer;
|
const auto& db = regs.depth_buffer;
|
||||||
key.depth_format = key.depth.depth_enable
|
key.depth_format = key.depth.depth_enable
|
||||||
|
@ -163,10 +164,19 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
|
||||||
programs[i] = Shader::TranslateProgram(inst_pool, block_pool, code, std::move(info));
|
programs[i] = Shader::TranslateProgram(inst_pool, block_pool, code, std::move(info));
|
||||||
|
|
||||||
// Compile IR to SPIR-V
|
// Compile IR to SPIR-V
|
||||||
const auto spv_code = Shader::Backend::SPIRV::EmitSPIRV(profile, programs[i], binding);
|
auto spv_code = Shader::Backend::SPIRV::EmitSPIRV(profile, programs[i], binding);
|
||||||
stages[i] = CompileSPV(spv_code, instance.GetDevice());
|
stages[i] = CompileSPV(spv_code, instance.GetDevice());
|
||||||
infos[i] = &programs[i].info;
|
infos[i] = &programs[i].info;
|
||||||
|
|
||||||
|
// Set module name to hash in renderdoc
|
||||||
|
const auto name = fmt::format("{}_{:#x}", stage, hash);
|
||||||
|
const vk::DebugUtilsObjectNameInfoEXT name_info = {
|
||||||
|
.objectType = vk::ObjectType::eShaderModule,
|
||||||
|
.objectHandle = std::bit_cast<u64>(stages[i]),
|
||||||
|
.pObjectName = name.c_str(),
|
||||||
|
};
|
||||||
|
instance.GetDevice().setDebugUtilsObjectNameEXT(name_info);
|
||||||
|
|
||||||
if (Config::dumpShaders()) {
|
if (Config::dumpShaders()) {
|
||||||
DumpShader(spv_code, hash, stage, "spv");
|
DumpShader(spv_code, hash, stage, "spv");
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,6 +85,7 @@ void Rasterizer::Draw(bool is_indexed) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rasterizer::DispatchDirect() {
|
void Rasterizer::DispatchDirect() {
|
||||||
|
compute_done = true;
|
||||||
return;
|
return;
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
const auto& cs_program = liverpool->regs.cs_program;
|
const auto& cs_program = liverpool->regs.cs_program;
|
||||||
|
|
|
@ -49,6 +49,7 @@ private:
|
||||||
Core::MemoryManager* memory;
|
Core::MemoryManager* memory;
|
||||||
PipelineCache pipeline_cache;
|
PipelineCache pipeline_cache;
|
||||||
StreamBuffer vertex_index_buffer;
|
StreamBuffer vertex_index_buffer;
|
||||||
|
bool compute_done{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -39,8 +39,10 @@ using Libraries::VideoOut::TilingMode;
|
||||||
if (false /*&& IsDepthStencilFormat(format)*/) {
|
if (false /*&& IsDepthStencilFormat(format)*/) {
|
||||||
usage |= vk::ImageUsageFlagBits::eDepthStencilAttachment;
|
usage |= vk::ImageUsageFlagBits::eDepthStencilAttachment;
|
||||||
} else {
|
} else {
|
||||||
|
if (format != vk::Format::eBc3SrgbBlock) {
|
||||||
usage |= vk::ImageUsageFlagBits::eColorAttachment;
|
usage |= vk::ImageUsageFlagBits::eColorAttachment;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return usage;
|
return usage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,8 +103,10 @@ ImageInfo::ImageInfo(const AmdGpu::Image& image) noexcept {
|
||||||
size.width = image.width + 1;
|
size.width = image.width + 1;
|
||||||
size.height = image.height + 1;
|
size.height = image.height + 1;
|
||||||
size.depth = 1;
|
size.depth = 1;
|
||||||
|
pitch = image.Pitch();
|
||||||
|
resources.levels = image.NumLevels();
|
||||||
|
resources.layers = image.NumLayers();
|
||||||
// TODO: Derive this properly from tiling params
|
// TODO: Derive this properly from tiling params
|
||||||
pitch = size.width;
|
|
||||||
guest_size_bytes = size.width * size.height * 4;
|
guest_size_bytes = size.width * size.height * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,7 +187,7 @@ void Image::Transit(vk::ImageLayout dst_layout, vk::Flags<vk::AccessFlagBits> ds
|
||||||
.subresourceRange{
|
.subresourceRange{
|
||||||
.aspectMask = aspect_mask,
|
.aspectMask = aspect_mask,
|
||||||
.baseMipLevel = 0,
|
.baseMipLevel = 0,
|
||||||
.levelCount = 1,
|
.levelCount = VK_REMAINING_MIP_LEVELS,
|
||||||
.baseArrayLayer = 0,
|
.baseArrayLayer = 0,
|
||||||
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
||||||
}};
|
}};
|
||||||
|
|
|
@ -14,8 +14,9 @@ vk::ImageViewType ConvertImageViewType(AmdGpu::ImageType type) {
|
||||||
case AmdGpu::ImageType::Color1DArray:
|
case AmdGpu::ImageType::Color1DArray:
|
||||||
return vk::ImageViewType::e1DArray;
|
return vk::ImageViewType::e1DArray;
|
||||||
case AmdGpu::ImageType::Color2D:
|
case AmdGpu::ImageType::Color2D:
|
||||||
case AmdGpu::ImageType::Cube:
|
|
||||||
return vk::ImageViewType::e2D;
|
return vk::ImageViewType::e2D;
|
||||||
|
case AmdGpu::ImageType::Cube:
|
||||||
|
return vk::ImageViewType::eCube;
|
||||||
case AmdGpu::ImageType::Color2DArray:
|
case AmdGpu::ImageType::Color2DArray:
|
||||||
return vk::ImageViewType::e2DArray;
|
return vk::ImageViewType::e2DArray;
|
||||||
case AmdGpu::ImageType::Color3D:
|
case AmdGpu::ImageType::Color3D:
|
||||||
|
@ -47,10 +48,10 @@ vk::ComponentSwizzle ConvertComponentSwizzle(u32 dst_sel) {
|
||||||
ImageViewInfo::ImageViewInfo(const AmdGpu::Image& image) noexcept {
|
ImageViewInfo::ImageViewInfo(const AmdGpu::Image& image) noexcept {
|
||||||
type = ConvertImageViewType(image.type);
|
type = ConvertImageViewType(image.type);
|
||||||
format = Vulkan::LiverpoolToVK::SurfaceFormat(image.GetDataFmt(), image.GetNumberFmt());
|
format = Vulkan::LiverpoolToVK::SurfaceFormat(image.GetDataFmt(), image.GetNumberFmt());
|
||||||
range.base.level = image.base_level;
|
range.base.level = 0;
|
||||||
range.base.layer = 0;
|
range.base.layer = 0;
|
||||||
range.extent.levels = 1;
|
range.extent.levels = image.NumLevels();
|
||||||
range.extent.layers = 1;
|
range.extent.layers = image.NumLayers();
|
||||||
mapping.r = ConvertComponentSwizzle(image.dst_sel_x);
|
mapping.r = ConvertComponentSwizzle(image.dst_sel_x);
|
||||||
mapping.g = ConvertComponentSwizzle(image.dst_sel_y);
|
mapping.g = ConvertComponentSwizzle(image.dst_sel_y);
|
||||||
mapping.b = ConvertComponentSwizzle(image.dst_sel_z);
|
mapping.b = ConvertComponentSwizzle(image.dst_sel_z);
|
||||||
|
|
|
@ -175,6 +175,8 @@ void TextureCache::RefreshImage(Image& image) {
|
||||||
// Mark image as validated.
|
// Mark image as validated.
|
||||||
image.flags &= ~ImageFlagBits::CpuModified;
|
image.flags &= ~ImageFlagBits::CpuModified;
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
// Upload data to the staging buffer.
|
// Upload data to the staging buffer.
|
||||||
const auto [data, offset, _] = staging.Map(image.info.guest_size_bytes, 4);
|
const auto [data, offset, _] = staging.Map(image.info.guest_size_bytes, 4);
|
||||||
const u8* image_data = reinterpret_cast<const u8*>(image.cpu_addr);
|
const u8* image_data = reinterpret_cast<const u8*>(image.cpu_addr);
|
||||||
|
@ -212,11 +214,55 @@ void TextureCache::RefreshImage(Image& image) {
|
||||||
|
|
||||||
image.Transit(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits::eTransferWrite);
|
image.Transit(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits::eTransferWrite);
|
||||||
|
|
||||||
cmdbuf.copyBufferToImage(staging.Handle(), image.image, vk::ImageLayout::eTransferDstOptimal,
|
cmdbuf.copyBufferToImage(staging.Handle(), image.image,
|
||||||
image_copy);
|
vk::ImageLayout::eTransferDstOptimal, image_copy);
|
||||||
|
|
||||||
image.Transit(vk::ImageLayout::eGeneral,
|
image.Transit(vk::ImageLayout::eGeneral,
|
||||||
vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferRead);
|
vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferRead);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u8* image_data = reinterpret_cast<const u8*>(image.cpu_addr);
|
||||||
|
for (u32 l = 0; l < image.info.resources.layers; l++) {
|
||||||
|
// Upload data to the staging buffer.
|
||||||
|
for (u32 m = 0; m < image.info.resources.levels; m++) {
|
||||||
|
const u32 width = image.info.size.width >> m;
|
||||||
|
const u32 height = image.info.size.height >> m;
|
||||||
|
const u32 map_size = width * height;
|
||||||
|
const auto [data, offset, _] = staging.Map(map_size, 16);
|
||||||
|
if (image.info.is_tiled) {
|
||||||
|
ConvertTileToLinear(data, image_data, width, height, Config::isNeoMode());
|
||||||
|
} else {
|
||||||
|
std::memcpy(data, image_data, map_size);
|
||||||
|
}
|
||||||
|
staging.Commit(map_size);
|
||||||
|
image_data += map_size;
|
||||||
|
|
||||||
|
// Copy to the image.
|
||||||
|
const vk::BufferImageCopy image_copy = {
|
||||||
|
.bufferOffset = offset,
|
||||||
|
.bufferRowLength = 0,
|
||||||
|
.bufferImageHeight = 0,
|
||||||
|
.imageSubresource{
|
||||||
|
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||||
|
.mipLevel = m,
|
||||||
|
.baseArrayLayer = l,
|
||||||
|
.layerCount = 1,
|
||||||
|
},
|
||||||
|
.imageOffset = {0, 0, 0},
|
||||||
|
.imageExtent = {width, height, 1},
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
|
image.Transit(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits::eTransferWrite);
|
||||||
|
|
||||||
|
cmdbuf.copyBufferToImage(staging.Handle(), image.image,
|
||||||
|
vk::ImageLayout::eTransferDstOptimal, image_copy);
|
||||||
|
|
||||||
|
image.Transit(vk::ImageLayout::eGeneral,
|
||||||
|
vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferRead);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::Sampler TextureCache::GetSampler(const AmdGpu::Sampler& sampler) {
|
vk::Sampler TextureCache::GetSampler(const AmdGpu::Sampler& sampler) {
|
||||||
|
|
Loading…
Reference in New Issue