summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/ir_opt
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/ir_opt')
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp4
-rw-r--r--src/shader_recompiler/ir_opt/conditional_barrier_pass.cpp44
-rw-r--r--src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp21
-rw-r--r--src/shader_recompiler/ir_opt/lower_fp64_to_fp32.cpp185
-rw-r--r--src/shader_recompiler/ir_opt/passes.h4
-rw-r--r--src/shader_recompiler/ir_opt/texture_pass.cpp44
6 files changed, 277 insertions, 25 deletions
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 5a4195217..70292686f 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -424,6 +424,10 @@ void VisitUsages(Info& info, IR::Inst& inst) {
info.used_constant_buffer_types |= IR::Type::U32 | IR::Type::U32x2;
info.used_storage_buffer_types |= IR::Type::U32 | IR::Type::U32x2 | IR::Type::U32x4;
break;
+ case IR::Opcode::LoadLocal:
+ case IR::Opcode::WriteLocal:
+ info.uses_local_memory = true;
+ break;
default:
break;
}
diff --git a/src/shader_recompiler/ir_opt/conditional_barrier_pass.cpp b/src/shader_recompiler/ir_opt/conditional_barrier_pass.cpp
new file mode 100644
index 000000000..c3ed27f4f
--- /dev/null
+++ b/src/shader_recompiler/ir_opt/conditional_barrier_pass.cpp
@@ -0,0 +1,44 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "shader_recompiler/frontend/ir/program.h"
+#include "shader_recompiler/ir_opt/passes.h"
+
+namespace Shader::Optimization {
+
+void ConditionalBarrierPass(IR::Program& program) {
+ s32 conditional_control_flow_count{0};
+ s32 conditional_return_count{0};
+ for (IR::AbstractSyntaxNode& node : program.syntax_list) {
+ switch (node.type) {
+ case IR::AbstractSyntaxNode::Type::If:
+ case IR::AbstractSyntaxNode::Type::Loop:
+ conditional_control_flow_count++;
+ break;
+ case IR::AbstractSyntaxNode::Type::EndIf:
+ case IR::AbstractSyntaxNode::Type::Repeat:
+ conditional_control_flow_count--;
+ break;
+ case IR::AbstractSyntaxNode::Type::Unreachable:
+ case IR::AbstractSyntaxNode::Type::Return:
+ if (conditional_control_flow_count > 0) {
+ conditional_return_count++;
+ }
+ break;
+ case IR::AbstractSyntaxNode::Type::Block:
+ for (IR::Inst& inst : node.data.block->Instructions()) {
+ if ((conditional_control_flow_count > 0 || conditional_return_count > 0) &&
+ inst.GetOpcode() == IR::Opcode::Barrier) {
+ LOG_WARNING(Shader, "Barrier within conditional control flow");
+ inst.ReplaceOpcode(IR::Opcode::Identity);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ ASSERT(conditional_control_flow_count == 0);
+}
+
+} // namespace Shader::Optimization
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 9101722ba..d1e59f22e 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -11,7 +11,6 @@
#include "shader_recompiler/frontend/ir/breadth_first_search.h"
#include "shader_recompiler/frontend/ir/ir_emitter.h"
#include "shader_recompiler/frontend/ir/value.h"
-#include "shader_recompiler/host_translate_info.h"
#include "shader_recompiler/ir_opt/passes.h"
namespace Shader::Optimization {
@@ -36,6 +35,7 @@ struct Bias {
u32 index;
u32 offset_begin;
u32 offset_end;
+ u32 alignment;
};
using boost::container::flat_set;
@@ -350,7 +350,8 @@ std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias)
.index = index.U32(),
.offset = offset.U32(),
};
- if (!Common::IsAligned(storage_buffer.offset, 16)) {
+ const u32 alignment{bias ? bias->alignment : 8U};
+ if (!Common::IsAligned(storage_buffer.offset, alignment)) {
// The SSBO pointer has to be aligned
return std::nullopt;
}
@@ -372,6 +373,7 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
.index = 0,
.offset_begin = 0x110,
.offset_end = 0x610,
+ .alignment = 16,
};
// Track the low address of the instruction
const std::optional<LowAddrInfo> low_addr_info{TrackLowAddress(&inst)};
@@ -387,8 +389,11 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
storage_buffer = Track(low_addr, nullptr);
if (!storage_buffer) {
// If that also fails, use NVN fallbacks
+ LOG_WARNING(Shader, "Storage buffer failed to track, using global memory fallbacks");
return;
}
+ LOG_WARNING(Shader, "Storage buffer tracked without bias, index {} offset {}",
+ storage_buffer->index, storage_buffer->offset);
}
// Collect storage buffer and the instruction
if (IsGlobalMemoryWrite(inst)) {
@@ -403,7 +408,7 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
}
/// Returns the offset in indices (not bytes) for an equivalent storage instruction
-IR::U32 StorageOffset(IR::Block& block, IR::Inst& inst, StorageBufferAddr buffer, u32 alignment) {
+IR::U32 StorageOffset(IR::Block& block, IR::Inst& inst, StorageBufferAddr buffer) {
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
IR::U32 offset;
if (const std::optional<LowAddrInfo> low_addr{TrackLowAddress(&inst)}) {
@@ -416,10 +421,7 @@ IR::U32 StorageOffset(IR::Block& block, IR::Inst& inst, StorageBufferAddr buffer
}
// Subtract the least significant 32 bits from the guest offset. The result is the storage
// buffer offset in bytes.
- IR::U32 low_cbuf{ir.GetCbuf(ir.Imm32(buffer.index), ir.Imm32(buffer.offset))};
-
- // Align the offset base to match the host alignment requirements
- low_cbuf = ir.BitwiseAnd(low_cbuf, ir.Imm32(~(alignment - 1U)));
+ const IR::U32 low_cbuf{ir.GetCbuf(ir.Imm32(buffer.index), ir.Imm32(buffer.offset))};
return ir.ISub(offset, low_cbuf);
}
@@ -514,7 +516,7 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
}
} // Anonymous namespace
-void GlobalMemoryToStorageBufferPass(IR::Program& program, const HostTranslateInfo& host_info) {
+void GlobalMemoryToStorageBufferPass(IR::Program& program) {
StorageInfo info;
for (IR::Block* const block : program.post_order_blocks) {
for (IR::Inst& inst : block->Instructions()) {
@@ -538,8 +540,7 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program, const HostTranslateIn
const IR::U32 index{IR::Value{static_cast<u32>(info.set.index_of(it))}};
IR::Block* const block{storage_inst.block};
IR::Inst* const inst{storage_inst.inst};
- const IR::U32 offset{
- StorageOffset(*block, *inst, storage_buffer, host_info.min_ssbo_alignment)};
+ const IR::U32 offset{StorageOffset(*block, *inst, storage_buffer)};
Replace(*block, *inst, index, offset);
}
}
diff --git a/src/shader_recompiler/ir_opt/lower_fp64_to_fp32.cpp b/src/shader_recompiler/ir_opt/lower_fp64_to_fp32.cpp
new file mode 100644
index 000000000..5db7a38ad
--- /dev/null
+++ b/src/shader_recompiler/ir_opt/lower_fp64_to_fp32.cpp
@@ -0,0 +1,185 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "shader_recompiler/frontend/ir/ir_emitter.h"
+#include "shader_recompiler/frontend/ir/opcodes.h"
+#include "shader_recompiler/frontend/ir/value.h"
+#include "shader_recompiler/ir_opt/passes.h"
+
+namespace Shader::Optimization {
+namespace {
+
+constexpr s32 F64ToF32Exp = +1023 - 127;
+constexpr s32 F32ToF64Exp = +127 - 1023;
+
+IR::F32 PackedF64ToF32(IR::IREmitter& ir, const IR::Value& packed) {
+ const IR::U32 lo{ir.CompositeExtract(packed, 0)};
+ const IR::U32 hi{ir.CompositeExtract(packed, 1)};
+ const IR::U32 sign{ir.BitFieldExtract(hi, ir.Imm32(31), ir.Imm32(1))};
+ const IR::U32 exp{ir.BitFieldExtract(hi, ir.Imm32(20), ir.Imm32(11))};
+ const IR::U32 mantissa_hi{ir.BitFieldExtract(hi, ir.Imm32(0), ir.Imm32(20))};
+ const IR::U32 mantissa_lo{ir.BitFieldExtract(lo, ir.Imm32(29), ir.Imm32(3))};
+ const IR::U32 mantissa{
+ ir.BitwiseOr(ir.ShiftLeftLogical(mantissa_hi, ir.Imm32(3)), mantissa_lo)};
+ const IR::U32 exp_if_subnorm{
+ ir.Select(ir.IEqual(exp, ir.Imm32(0)), ir.Imm32(0), ir.IAdd(exp, ir.Imm32(F64ToF32Exp)))};
+ const IR::U32 exp_if_infnan{
+ ir.Select(ir.IEqual(exp, ir.Imm32(0x7ff)), ir.Imm32(0xff), exp_if_subnorm)};
+ const IR::U32 result{
+ ir.BitwiseOr(ir.ShiftLeftLogical(sign, ir.Imm32(31)),
+ ir.BitwiseOr(ir.ShiftLeftLogical(exp_if_infnan, ir.Imm32(23)), mantissa))};
+ return ir.BitCast<IR::F32>(result);
+}
+
+IR::Value F32ToPackedF64(IR::IREmitter& ir, const IR::Value& raw) {
+ const IR::U32 value{ir.BitCast<IR::U32>(IR::F32(raw))};
+ const IR::U32 sign{ir.BitFieldExtract(value, ir.Imm32(31), ir.Imm32(1))};
+ const IR::U32 exp{ir.BitFieldExtract(value, ir.Imm32(23), ir.Imm32(8))};
+ const IR::U32 mantissa{ir.BitFieldExtract(value, ir.Imm32(0), ir.Imm32(23))};
+ const IR::U32 mantissa_hi{ir.BitFieldExtract(mantissa, ir.Imm32(3), ir.Imm32(20))};
+ const IR::U32 mantissa_lo{ir.BitFieldExtract(mantissa, ir.Imm32(0), ir.Imm32(3))};
+ const IR::U32 exp_if_subnorm{
+ ir.Select(ir.IEqual(exp, ir.Imm32(0)), ir.Imm32(0), ir.IAdd(exp, ir.Imm32(F32ToF64Exp)))};
+ const IR::U32 exp_if_infnan{
+ ir.Select(ir.IEqual(exp, ir.Imm32(0xff)), ir.Imm32(0x7ff), exp_if_subnorm)};
+ const IR::U32 lo{ir.ShiftLeftLogical(mantissa_lo, ir.Imm32(29))};
+ const IR::U32 hi{
+ ir.BitwiseOr(ir.ShiftLeftLogical(sign, ir.Imm32(31)),
+ ir.BitwiseOr(ir.ShiftLeftLogical(exp_if_infnan, ir.Imm32(20)), mantissa_hi))};
+ return ir.CompositeConstruct(lo, hi);
+}
+
+IR::Opcode Replace(IR::Opcode op) {
+ switch (op) {
+ case IR::Opcode::FPAbs64:
+ return IR::Opcode::FPAbs32;
+ case IR::Opcode::FPAdd64:
+ return IR::Opcode::FPAdd32;
+ case IR::Opcode::FPCeil64:
+ return IR::Opcode::FPCeil32;
+ case IR::Opcode::FPFloor64:
+ return IR::Opcode::FPFloor32;
+ case IR::Opcode::FPFma64:
+ return IR::Opcode::FPFma32;
+ case IR::Opcode::FPMul64:
+ return IR::Opcode::FPMul32;
+ case IR::Opcode::FPNeg64:
+ return IR::Opcode::FPNeg32;
+ case IR::Opcode::FPRoundEven64:
+ return IR::Opcode::FPRoundEven32;
+ case IR::Opcode::FPSaturate64:
+ return IR::Opcode::FPSaturate32;
+ case IR::Opcode::FPClamp64:
+ return IR::Opcode::FPClamp32;
+ case IR::Opcode::FPTrunc64:
+ return IR::Opcode::FPTrunc32;
+ case IR::Opcode::CompositeConstructF64x2:
+ return IR::Opcode::CompositeConstructF32x2;
+ case IR::Opcode::CompositeConstructF64x3:
+ return IR::Opcode::CompositeConstructF32x3;
+ case IR::Opcode::CompositeConstructF64x4:
+ return IR::Opcode::CompositeConstructF32x4;
+ case IR::Opcode::CompositeExtractF64x2:
+ return IR::Opcode::CompositeExtractF32x2;
+ case IR::Opcode::CompositeExtractF64x3:
+ return IR::Opcode::CompositeExtractF32x3;
+ case IR::Opcode::CompositeExtractF64x4:
+ return IR::Opcode::CompositeExtractF32x4;
+ case IR::Opcode::CompositeInsertF64x2:
+ return IR::Opcode::CompositeInsertF32x2;
+ case IR::Opcode::CompositeInsertF64x3:
+ return IR::Opcode::CompositeInsertF32x3;
+ case IR::Opcode::CompositeInsertF64x4:
+ return IR::Opcode::CompositeInsertF32x4;
+ case IR::Opcode::FPOrdEqual64:
+ return IR::Opcode::FPOrdEqual32;
+ case IR::Opcode::FPUnordEqual64:
+ return IR::Opcode::FPUnordEqual32;
+ case IR::Opcode::FPOrdNotEqual64:
+ return IR::Opcode::FPOrdNotEqual32;
+ case IR::Opcode::FPUnordNotEqual64:
+ return IR::Opcode::FPUnordNotEqual32;
+ case IR::Opcode::FPOrdLessThan64:
+ return IR::Opcode::FPOrdLessThan32;
+ case IR::Opcode::FPUnordLessThan64:
+ return IR::Opcode::FPUnordLessThan32;
+ case IR::Opcode::FPOrdGreaterThan64:
+ return IR::Opcode::FPOrdGreaterThan32;
+ case IR::Opcode::FPUnordGreaterThan64:
+ return IR::Opcode::FPUnordGreaterThan32;
+ case IR::Opcode::FPOrdLessThanEqual64:
+ return IR::Opcode::FPOrdLessThanEqual32;
+ case IR::Opcode::FPUnordLessThanEqual64:
+ return IR::Opcode::FPUnordLessThanEqual32;
+ case IR::Opcode::FPOrdGreaterThanEqual64:
+ return IR::Opcode::FPOrdGreaterThanEqual32;
+ case IR::Opcode::FPUnordGreaterThanEqual64:
+ return IR::Opcode::FPUnordGreaterThanEqual32;
+ case IR::Opcode::FPIsNan64:
+ return IR::Opcode::FPIsNan32;
+ case IR::Opcode::ConvertS16F64:
+ return IR::Opcode::ConvertS16F32;
+ case IR::Opcode::ConvertS32F64:
+ return IR::Opcode::ConvertS32F32;
+ case IR::Opcode::ConvertS64F64:
+ return IR::Opcode::ConvertS64F32;
+ case IR::Opcode::ConvertU16F64:
+ return IR::Opcode::ConvertU16F32;
+ case IR::Opcode::ConvertU32F64:
+ return IR::Opcode::ConvertU32F32;
+ case IR::Opcode::ConvertU64F64:
+ return IR::Opcode::ConvertU64F32;
+ case IR::Opcode::ConvertF32F64:
+ return IR::Opcode::Identity;
+ case IR::Opcode::ConvertF64F32:
+ return IR::Opcode::Identity;
+ case IR::Opcode::ConvertF64S8:
+ return IR::Opcode::ConvertF32S8;
+ case IR::Opcode::ConvertF64S16:
+ return IR::Opcode::ConvertF32S16;
+ case IR::Opcode::ConvertF64S32:
+ return IR::Opcode::ConvertF32S32;
+ case IR::Opcode::ConvertF64S64:
+ return IR::Opcode::ConvertF32S64;
+ case IR::Opcode::ConvertF64U8:
+ return IR::Opcode::ConvertF32U8;
+ case IR::Opcode::ConvertF64U16:
+ return IR::Opcode::ConvertF32U16;
+ case IR::Opcode::ConvertF64U32:
+ return IR::Opcode::ConvertF32U32;
+ case IR::Opcode::ConvertF64U64:
+ return IR::Opcode::ConvertF32U64;
+ default:
+ return op;
+ }
+}
+
+void Lower(IR::Block& block, IR::Inst& inst) {
+ switch (inst.GetOpcode()) {
+ case IR::Opcode::PackDouble2x32: {
+ IR::IREmitter ir(block, IR::Block::InstructionList::s_iterator_to(inst));
+ inst.ReplaceUsesWith(PackedF64ToF32(ir, inst.Arg(0)));
+ break;
+ }
+ case IR::Opcode::UnpackDouble2x32: {
+ IR::IREmitter ir(block, IR::Block::InstructionList::s_iterator_to(inst));
+ inst.ReplaceUsesWith(F32ToPackedF64(ir, inst.Arg(0)));
+ break;
+ }
+ default:
+ inst.ReplaceOpcode(Replace(inst.GetOpcode()));
+ break;
+ }
+}
+
+} // Anonymous namespace
+
+void LowerFp64ToFp32(IR::Program& program) {
+ for (IR::Block* const block : program.blocks) {
+ for (IR::Inst& inst : block->Instructions()) {
+ Lower(*block, inst);
+ }
+ }
+}
+
+} // namespace Shader::Optimization
diff --git a/src/shader_recompiler/ir_opt/passes.h b/src/shader_recompiler/ir_opt/passes.h
index 4ffad1172..629d18fa1 100644
--- a/src/shader_recompiler/ir_opt/passes.h
+++ b/src/shader_recompiler/ir_opt/passes.h
@@ -13,10 +13,12 @@ struct HostTranslateInfo;
namespace Shader::Optimization {
void CollectShaderInfoPass(Environment& env, IR::Program& program);
+void ConditionalBarrierPass(IR::Program& program);
void ConstantPropagationPass(Environment& env, IR::Program& program);
void DeadCodeEliminationPass(IR::Program& program);
-void GlobalMemoryToStorageBufferPass(IR::Program& program, const HostTranslateInfo& host_info);
+void GlobalMemoryToStorageBufferPass(IR::Program& program);
void IdentityRemovalPass(IR::Program& program);
+void LowerFp64ToFp32(IR::Program& program);
void LowerFp16ToFp32(IR::Program& program);
void LowerInt64ToInt32(IR::Program& program);
void RescalingPass(IR::Program& program);
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index f5c86fcb1..d374c976a 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -355,21 +355,21 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
};
}
-TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
+u32 GetTextureHandle(Environment& env, const ConstBufferAddr& cbuf) {
const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left};
const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)
<< cbuf.secondary_shift_left};
- return env.ReadTextureType(lhs_raw | rhs_raw);
+ return lhs_raw | rhs_raw;
+}
+
+TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
+ return env.ReadTextureType(GetTextureHandle(env, cbuf));
}
TexturePixelFormat ReadTexturePixelFormat(Environment& env, const ConstBufferAddr& cbuf) {
- const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
- const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
- const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)};
- const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)};
- return env.ReadTexturePixelFormat(lhs_raw | rhs_raw);
+ return env.ReadTexturePixelFormat(GetTextureHandle(env, cbuf));
}
class Descriptors {
@@ -386,8 +386,10 @@ public:
return Add(texture_buffer_descriptors, desc, [&desc](const auto& existing) {
return desc.cbuf_index == existing.cbuf_index &&
desc.cbuf_offset == existing.cbuf_offset &&
+ desc.shift_left == existing.shift_left &&
desc.secondary_cbuf_index == existing.secondary_cbuf_index &&
desc.secondary_cbuf_offset == existing.secondary_cbuf_offset &&
+ desc.secondary_shift_left == existing.secondary_shift_left &&
desc.count == existing.count && desc.size_shift == existing.size_shift &&
desc.has_secondary == existing.has_secondary;
});
@@ -405,15 +407,20 @@ public:
}
u32 Add(const TextureDescriptor& desc) {
- return Add(texture_descriptors, desc, [&desc](const auto& existing) {
+ const u32 index{Add(texture_descriptors, desc, [&desc](const auto& existing) {
return desc.type == existing.type && desc.is_depth == existing.is_depth &&
desc.has_secondary == existing.has_secondary &&
desc.cbuf_index == existing.cbuf_index &&
desc.cbuf_offset == existing.cbuf_offset &&
+ desc.shift_left == existing.shift_left &&
desc.secondary_cbuf_index == existing.secondary_cbuf_index &&
desc.secondary_cbuf_offset == existing.secondary_cbuf_offset &&
+ desc.secondary_shift_left == existing.secondary_shift_left &&
desc.count == existing.count && desc.size_shift == existing.size_shift;
- });
+ })};
+ // TODO: Read this from TIC
+ texture_descriptors[index].is_multisample |= desc.is_multisample;
+ return index;
}
u32 Add(const ImageDescriptor& desc) {
@@ -452,7 +459,8 @@ void PatchImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) {
const IR::Value coord(inst.Arg(1));
const IR::Value handle(ir.Imm32(0));
const IR::U32 lod{ir.Imm32(0)};
- const IR::Value texture_size = ir.ImageQueryDimension(handle, lod, info);
+ const IR::U1 skip_mips{ir.Imm1(true)};
+ const IR::Value texture_size = ir.ImageQueryDimension(handle, lod, skip_mips, info);
inst.SetArg(
1, ir.CompositeConstruct(
ir.FPMul(IR::F32(ir.CompositeExtract(coord, 0)),
@@ -486,10 +494,10 @@ void PatchTexelFetch(IR::Block& block, IR::Inst& inst, TexturePixelFormat pixel_
const IR::F32 w(ir.CompositeExtract(new_inst, 3));
const IR::F16F32F64 max_value(ir.Imm32(get_max_value()));
const IR::Value converted =
- ir.CompositeConstruct(ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(x)), max_value),
- ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(y)), max_value),
- ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(z)), max_value),
- ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(w)), max_value));
+ ir.CompositeConstruct(ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::U32>(x)), max_value),
+ ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::U32>(y)), max_value),
+ ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::U32>(z)), max_value),
+ ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::U32>(w)), max_value));
inst.ReplaceUsesWith(converted);
}
} // Anonymous namespace
@@ -524,6 +532,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
const auto& cbuf{texture_inst.cbuf};
auto flags{inst->Flags<IR::TextureInstInfo>()};
+ bool is_multisample{false};
switch (inst->GetOpcode()) {
case IR::Opcode::ImageQueryDimensions:
flags.type.Assign(ReadTextureType(env, cbuf));
@@ -538,6 +547,12 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
}
break;
case IR::Opcode::ImageFetch:
+ if (flags.type == TextureType::Color2D || flags.type == TextureType::Color2DRect ||
+ flags.type == TextureType::ColorArray2D) {
+ is_multisample = !inst->Arg(4).IsEmpty();
+ } else {
+ inst->SetArg(4, IR::U32{});
+ }
if (flags.type != TextureType::Color1D) {
break;
}
@@ -613,6 +628,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
index = descriptors.Add(TextureDescriptor{
.type = flags.type,
.is_depth = flags.is_depth != 0,
+ .is_multisample = is_multisample,
.has_secondary = cbuf.has_secondary,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,