2018-12-20 22:09:21 +00:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/common_types.h"
|
2019-05-22 21:17:19 +00:00
|
|
|
#include "common/logging/log.h"
|
2018-12-20 22:09:21 +00:00
|
|
|
#include "video_core/engines/shader_bytecode.h"
|
2019-06-05 01:44:06 +00:00
|
|
|
#include "video_core/shader/node_helper.h"
|
2018-12-20 22:09:21 +00:00
|
|
|
#include "video_core/shader/shader_ir.h"
|
|
|
|
|
|
|
|
namespace VideoCommon::Shader {
|
|
|
|
|
2018-12-21 06:18:54 +00:00
|
|
|
using Tegra::Shader::ConditionCode;
|
2018-12-20 22:09:21 +00:00
|
|
|
using Tegra::Shader::Instruction;
|
|
|
|
using Tegra::Shader::OpCode;
|
2018-12-24 05:24:38 +00:00
|
|
|
using Tegra::Shader::Register;
|
2019-05-03 06:00:51 +00:00
|
|
|
using Tegra::Shader::SystemVariable;
|
2018-12-20 22:09:21 +00:00
|
|
|
|
2019-01-30 05:09:40 +00:00
|
|
|
u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
|
2018-12-20 22:09:21 +00:00
|
|
|
const Instruction instr = {program_code[pc]};
|
|
|
|
const auto opcode = OpCode::Decode(instr);
|
|
|
|
|
2018-12-21 03:07:32 +00:00
|
|
|
switch (opcode->get().GetId()) {
|
2019-07-30 00:18:30 +00:00
|
|
|
case OpCode::Id::NOP: {
|
|
|
|
UNIMPLEMENTED_IF(instr.nop.cc != Tegra::Shader::ConditionCode::T);
|
|
|
|
UNIMPLEMENTED_IF(instr.nop.trigger != 0);
|
|
|
|
// With the previous preconditions, this instruction is a no-operation.
|
|
|
|
break;
|
|
|
|
}
|
2018-12-21 03:07:32 +00:00
|
|
|
case OpCode::Id::EXIT: {
|
|
|
|
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
|
|
|
|
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}",
|
|
|
|
static_cast<u32>(cc));
|
|
|
|
|
|
|
|
switch (instr.flow.cond) {
|
|
|
|
case Tegra::Shader::FlowCondition::Always:
|
|
|
|
bb.push_back(Operation(OperationCode::Exit));
|
|
|
|
if (instr.pred.pred_index == static_cast<u64>(Tegra::Shader::Pred::UnusedIndex)) {
|
|
|
|
// If this is an unconditional exit then just end processing here,
|
|
|
|
// otherwise we have to account for the possibility of the condition
|
|
|
|
// not being met, so continue processing the next instruction.
|
|
|
|
pc = MAX_PROGRAM_LENGTH - 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Tegra::Shader::FlowCondition::Fcsm_Tr:
|
|
|
|
// TODO(bunnei): What is this used for? If we assume this conditon is not
|
|
|
|
// satisifed, dual vertex shaders in Farming Simulator make more sense
|
|
|
|
UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr");
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
UNIMPLEMENTED_MSG("Unhandled flow condition: {}",
|
|
|
|
static_cast<u32>(instr.flow.cond.Value()));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2018-12-18 01:18:46 +00:00
|
|
|
case OpCode::Id::KIL: {
|
|
|
|
UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always);
|
|
|
|
|
|
|
|
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
|
|
|
|
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "KIL condition code used: {}",
|
|
|
|
static_cast<u32>(cc));
|
|
|
|
|
2018-12-26 06:18:11 +00:00
|
|
|
bb.push_back(Operation(OperationCode::Discard));
|
2018-12-18 01:18:46 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-04-04 06:37:51 +00:00
|
|
|
case OpCode::Id::S2R: {
|
2019-10-24 01:26:07 +00:00
|
|
|
const Node value = [this, instr] {
|
2019-05-03 06:00:51 +00:00
|
|
|
switch (instr.sys20) {
|
2020-01-21 19:29:29 +00:00
|
|
|
case SystemVariable::LaneId:
|
2020-04-04 06:37:51 +00:00
|
|
|
LOG_WARNING(HW_GPU, "S2R instruction with LaneId is incomplete");
|
2020-01-21 19:29:29 +00:00
|
|
|
return Immediate(0U);
|
2019-10-30 21:17:27 +00:00
|
|
|
case SystemVariable::InvocationId:
|
|
|
|
return Operation(OperationCode::InvocationId);
|
2019-05-03 06:00:51 +00:00
|
|
|
case SystemVariable::Ydirection:
|
|
|
|
return Operation(OperationCode::YNegate);
|
|
|
|
case SystemVariable::InvocationInfo:
|
2020-04-04 06:37:51 +00:00
|
|
|
LOG_WARNING(HW_GPU, "S2R instruction with InvocationInfo is incomplete");
|
2020-01-21 19:29:29 +00:00
|
|
|
return Immediate(0U);
|
2019-07-12 00:14:44 +00:00
|
|
|
case SystemVariable::Tid: {
|
|
|
|
Node value = Immediate(0);
|
|
|
|
value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdX), 0, 9);
|
|
|
|
value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdY), 16, 9);
|
|
|
|
value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdZ), 26, 5);
|
|
|
|
return value;
|
|
|
|
}
|
2019-05-03 06:00:51 +00:00
|
|
|
case SystemVariable::TidX:
|
|
|
|
return Operation(OperationCode::LocalInvocationIdX);
|
|
|
|
case SystemVariable::TidY:
|
|
|
|
return Operation(OperationCode::LocalInvocationIdY);
|
|
|
|
case SystemVariable::TidZ:
|
|
|
|
return Operation(OperationCode::LocalInvocationIdZ);
|
|
|
|
case SystemVariable::CtaIdX:
|
|
|
|
return Operation(OperationCode::WorkGroupIdX);
|
|
|
|
case SystemVariable::CtaIdY:
|
|
|
|
return Operation(OperationCode::WorkGroupIdY);
|
|
|
|
case SystemVariable::CtaIdZ:
|
|
|
|
return Operation(OperationCode::WorkGroupIdZ);
|
|
|
|
default:
|
|
|
|
UNIMPLEMENTED_MSG("Unhandled system move: {}",
|
|
|
|
static_cast<u32>(instr.sys20.Value()));
|
|
|
|
return Immediate(0u);
|
|
|
|
}
|
|
|
|
}();
|
|
|
|
SetRegister(bb, instr.gpr0, value);
|
|
|
|
|
2018-12-18 01:18:46 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-12-21 03:11:33 +00:00
|
|
|
case OpCode::Id::BRA: {
|
2019-06-25 01:25:38 +00:00
|
|
|
Node branch;
|
|
|
|
if (instr.bra.constant_buffer == 0) {
|
|
|
|
const u32 target = pc + instr.bra.GetBranchTarget();
|
|
|
|
branch = Operation(OperationCode::Branch, Immediate(target));
|
|
|
|
} else {
|
|
|
|
const u32 target = pc + 1;
|
|
|
|
const Node op_a = GetConstBuffer(instr.cbuf36.index, instr.cbuf36.GetOffset());
|
2019-06-25 17:03:51 +00:00
|
|
|
const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true,
|
|
|
|
PRECISE, op_a, Immediate(3));
|
|
|
|
const Node operand =
|
|
|
|
Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target));
|
2019-07-16 16:18:19 +00:00
|
|
|
branch = Operation(OperationCode::BranchIndirect, operand);
|
2019-06-25 01:25:38 +00:00
|
|
|
}
|
2018-12-21 03:11:33 +00:00
|
|
|
|
2019-06-25 01:25:38 +00:00
|
|
|
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
|
|
|
|
if (cc != Tegra::Shader::ConditionCode::T) {
|
|
|
|
bb.push_back(Conditional(GetConditionCode(cc), {branch}));
|
|
|
|
} else {
|
|
|
|
bb.push_back(branch);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OpCode::Id::BRX: {
|
|
|
|
Node operand;
|
|
|
|
if (instr.brx.constant_buffer != 0) {
|
|
|
|
const s32 target = pc + 1;
|
|
|
|
const Node index = GetRegister(instr.gpr8);
|
|
|
|
const Node op_a =
|
|
|
|
GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 0, index);
|
2019-06-25 17:03:51 +00:00
|
|
|
const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true,
|
|
|
|
PRECISE, op_a, Immediate(3));
|
2019-06-25 01:25:38 +00:00
|
|
|
operand = Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target));
|
|
|
|
} else {
|
|
|
|
const s32 target = pc + instr.brx.GetBranchExtend();
|
|
|
|
const Node op_a = GetRegister(instr.gpr8);
|
2019-06-25 17:03:51 +00:00
|
|
|
const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true,
|
|
|
|
PRECISE, op_a, Immediate(3));
|
2019-06-25 01:25:38 +00:00
|
|
|
operand = Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target));
|
|
|
|
}
|
|
|
|
const Node branch = Operation(OperationCode::BranchIndirect, operand);
|
2018-12-17 22:03:53 +00:00
|
|
|
|
|
|
|
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
|
|
|
|
if (cc != Tegra::Shader::ConditionCode::T) {
|
|
|
|
bb.push_back(Conditional(GetConditionCode(cc), {branch}));
|
|
|
|
} else {
|
|
|
|
bb.push_back(branch);
|
|
|
|
}
|
2018-12-21 03:11:33 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-12-15 06:18:25 +00:00
|
|
|
case OpCode::Id::SSY: {
|
|
|
|
UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
|
|
|
|
"Constant buffer flow is not supported");
|
|
|
|
|
2019-06-25 17:03:51 +00:00
|
|
|
if (disable_flow_stack) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-06-02 21:52:07 +00:00
|
|
|
// The SSY opcode tells the GPU where to re-converge divergent execution paths with SYNC.
|
2018-12-17 20:44:20 +00:00
|
|
|
const u32 target = pc + instr.bra.GetBranchTarget();
|
2019-06-02 21:52:07 +00:00
|
|
|
bb.push_back(
|
|
|
|
Operation(OperationCode::PushFlowStack, MetaStackClass::Ssy, Immediate(target)));
|
2018-12-17 20:44:20 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OpCode::Id::PBK: {
|
|
|
|
UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
|
|
|
|
"Constant buffer PBK is not supported");
|
|
|
|
|
2019-06-25 17:03:51 +00:00
|
|
|
if (disable_flow_stack) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-06-02 21:52:07 +00:00
|
|
|
// PBK pushes to a stack the address where BRK will jump to.
|
2018-12-17 20:44:20 +00:00
|
|
|
const u32 target = pc + instr.bra.GetBranchTarget();
|
2019-06-02 21:52:07 +00:00
|
|
|
bb.push_back(
|
|
|
|
Operation(OperationCode::PushFlowStack, MetaStackClass::Pbk, Immediate(target)));
|
2018-12-15 06:18:25 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OpCode::Id::SYNC: {
|
|
|
|
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
|
|
|
|
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "SYNC condition code used: {}",
|
|
|
|
static_cast<u32>(cc));
|
|
|
|
|
2020-01-21 19:30:04 +00:00
|
|
|
if (decompiled) {
|
2019-06-25 17:03:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-12-15 06:18:25 +00:00
|
|
|
// The SYNC opcode jumps to the address previously set by the SSY opcode
|
2019-06-02 21:52:07 +00:00
|
|
|
bb.push_back(Operation(OperationCode::PopFlowStack, MetaStackClass::Ssy));
|
2018-12-15 06:18:25 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-12-17 20:44:20 +00:00
|
|
|
case OpCode::Id::BRK: {
|
|
|
|
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
|
|
|
|
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "BRK condition code used: {}",
|
|
|
|
static_cast<u32>(cc));
|
2020-01-21 19:30:04 +00:00
|
|
|
if (decompiled) {
|
2019-06-25 17:03:51 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-12-17 20:44:20 +00:00
|
|
|
|
|
|
|
// The BRK opcode jumps to the address previously set by the PBK opcode
|
2019-06-02 21:52:07 +00:00
|
|
|
bb.push_back(Operation(OperationCode::PopFlowStack, MetaStackClass::Pbk));
|
2018-12-17 20:44:20 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-12-21 03:08:21 +00:00
|
|
|
case OpCode::Id::IPA: {
|
2019-05-03 00:45:53 +00:00
|
|
|
const bool is_physical = instr.ipa.idx && instr.gpr8.Value() != 0xff;
|
|
|
|
|
|
|
|
const auto attribute = instr.attribute.fmt28;
|
2018-12-21 03:08:21 +00:00
|
|
|
const Tegra::Shader::IpaMode input_mode{instr.ipa.interp_mode.Value(),
|
|
|
|
instr.ipa.sample_mode.Value()};
|
|
|
|
|
2019-05-03 00:45:53 +00:00
|
|
|
Node value = is_physical ? GetPhysicalInputAttribute(instr.gpr8)
|
|
|
|
: GetInputAttribute(attribute.index, attribute.element);
|
2019-02-13 01:14:39 +00:00
|
|
|
const Tegra::Shader::Attribute::Index index = attribute.index.Value();
|
2019-05-03 00:45:53 +00:00
|
|
|
const bool is_generic = index >= Tegra::Shader::Attribute::Index::Attribute_0 &&
|
|
|
|
index <= Tegra::Shader::Attribute::Index::Attribute_31;
|
|
|
|
if (is_generic || is_physical) {
|
2019-02-13 01:14:39 +00:00
|
|
|
// TODO(Blinkhawk): There are cases where a perspective attribute use PASS.
|
|
|
|
// In theory by setting them as perspective, OpenGL does the perspective correction.
|
|
|
|
// A way must figured to reverse the last step of it.
|
|
|
|
if (input_mode.interpolation_mode == Tegra::Shader::IpaInterpMode::Multiply) {
|
|
|
|
value = Operation(OperationCode::FMul, PRECISE, value, GetRegister(instr.gpr20));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
value = GetSaturatedFloat(value, instr.ipa.saturate);
|
2018-12-21 03:08:21 +00:00
|
|
|
|
|
|
|
SetRegister(bb, instr.gpr0, value);
|
|
|
|
break;
|
|
|
|
}
|
2018-12-24 05:24:38 +00:00
|
|
|
case OpCode::Id::OUT_R: {
|
|
|
|
UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex,
|
|
|
|
"Stream buffer is not supported");
|
|
|
|
|
|
|
|
if (instr.out.emit) {
|
|
|
|
// gpr0 is used to store the next address and gpr8 contains the address to emit.
|
|
|
|
// Hardware uses pointers here but we just ignore it
|
|
|
|
bb.push_back(Operation(OperationCode::EmitVertex));
|
|
|
|
SetRegister(bb, instr.gpr0, Immediate(0));
|
|
|
|
}
|
|
|
|
if (instr.out.cut) {
|
|
|
|
bb.push_back(Operation(OperationCode::EndPrimitive));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OpCode::Id::ISBERD: {
|
|
|
|
UNIMPLEMENTED_IF(instr.isberd.o != 0);
|
|
|
|
UNIMPLEMENTED_IF(instr.isberd.skew != 0);
|
|
|
|
UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None);
|
|
|
|
UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None);
|
|
|
|
LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete");
|
|
|
|
SetRegister(bb, instr.gpr0, GetRegister(instr.gpr8));
|
|
|
|
break;
|
|
|
|
}
|
2019-12-10 00:46:12 +00:00
|
|
|
case OpCode::Id::MEMBAR: {
|
|
|
|
UNIMPLEMENTED_IF(instr.membar.type != Tegra::Shader::MembarType::GL);
|
|
|
|
UNIMPLEMENTED_IF(instr.membar.unknown != Tegra::Shader::MembarUnknown::Default);
|
|
|
|
bb.push_back(Operation(OperationCode::MemoryBarrierGL));
|
|
|
|
break;
|
|
|
|
}
|
2018-12-15 20:16:14 +00:00
|
|
|
case OpCode::Id::DEPBAR: {
|
2019-11-20 00:26:40 +00:00
|
|
|
LOG_DEBUG(HW_GPU, "DEPBAR instruction is stubbed");
|
2018-12-15 20:16:14 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-12-21 03:07:32 +00:00
|
|
|
default:
|
|
|
|
UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName());
|
|
|
|
}
|
2018-12-20 22:09:21 +00:00
|
|
|
|
|
|
|
return pc;
|
|
|
|
}
|
|
|
|
|
2019-02-13 01:14:39 +00:00
|
|
|
} // namespace VideoCommon::Shader
|