diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/video_core/renderer_vulkan/shaders/blit.frag | 24 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/shaders/blit.vert | 28 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/shaders/quad_array.comp | 37 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/shaders/uint8.comp | 33 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_device.cpp | 22 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_device.h | 9 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_resource_manager.cpp | 36 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_resource_manager.h | 16 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_shader_decompiler.cpp | 107 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_shader_decompiler.h | 4 | ||||
-rw-r--r-- | src/video_core/shader/decode/memory.cpp | 38 | ||||
-rw-r--r-- | src/video_core/texture_cache/surface_params.cpp | 38 | ||||
-rw-r--r-- | src/video_core/texture_cache/surface_params.h | 14 | ||||
-rw-r--r-- | src/video_core/texture_cache/texture_cache.h | 89 | ||||
-rw-r--r-- | src/video_core/textures/decoders.h | 4 |
15 files changed, 430 insertions, 69 deletions
diff --git a/src/video_core/renderer_vulkan/shaders/blit.frag b/src/video_core/renderer_vulkan/shaders/blit.frag new file mode 100644 index 000000000..a06ecd24a --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/blit.frag @@ -0,0 +1,24 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (location = 0) in vec2 frag_tex_coord; + +layout (location = 0) out vec4 color; + +layout (binding = 1) uniform sampler2D color_texture; + +void main() { + color = texture(color_texture, frag_tex_coord); +} diff --git a/src/video_core/renderer_vulkan/shaders/blit.vert b/src/video_core/renderer_vulkan/shaders/blit.vert new file mode 100644 index 000000000..c64d9235a --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/blit.vert @@ -0,0 +1,28 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (location = 0) in vec2 vert_position; +layout (location = 1) in vec2 vert_tex_coord; + +layout (location = 0) out vec2 frag_tex_coord; + +layout (set = 0, binding = 0) uniform MatrixBlock { + mat4 modelview_matrix; +}; + +void main() { + gl_Position = modelview_matrix * vec4(vert_position, 0.0, 1.0); + frag_tex_coord = vert_tex_coord; +} diff --git a/src/video_core/renderer_vulkan/shaders/quad_array.comp b/src/video_core/renderer_vulkan/shaders/quad_array.comp new file mode 100644 index 000000000..5a5703308 --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/quad_array.comp @@ -0,0 +1,37 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (local_size_x = 1024) in; + +layout (std430, set = 0, binding = 0) buffer OutputBuffer { + uint output_indexes[]; +}; + +layout (push_constant) uniform PushConstants { + uint first; +}; + +void main() { + uint primitive = gl_GlobalInvocationID.x; + if (primitive * 6 >= output_indexes.length()) { + return; + } + + const uint quad_map[6] = uint[](0, 1, 2, 0, 2, 3); + for (uint vertex = 0; vertex < 6; ++vertex) { + uint index = first + primitive * 4 + quad_map[vertex]; + output_indexes[primitive * 6 + vertex] = index; + } +} diff --git a/src/video_core/renderer_vulkan/shaders/uint8.comp b/src/video_core/renderer_vulkan/shaders/uint8.comp new file mode 100644 index 000000000..a320f3ae0 --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/uint8.comp @@ -0,0 +1,33 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core +#extension GL_EXT_shader_16bit_storage : require +#extension GL_EXT_shader_8bit_storage : require + +layout (local_size_x = 1024) in; + +layout (std430, set = 0, binding = 0) readonly buffer InputBuffer { + uint8_t input_indexes[]; +}; + +layout (std430, set = 0, binding = 1) writeonly buffer OutputBuffer { + uint16_t output_indexes[]; +}; + +void main() { + uint id = gl_GlobalInvocationID.x; + if (id < input_indexes.length()) { + output_indexes[id] = uint16_t(input_indexes[id]); + } +} diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 92854a4b3..939eebe83 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -3,12 +3,15 @@ // Refer to the license.txt file included. #include <bitset> +#include <chrono> #include <cstdlib> #include <optional> #include <set> #include <string_view> +#include <thread> #include <vector> #include "common/assert.h" +#include "core/settings.h" #include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/vk_device.h" @@ -201,6 +204,22 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, return wanted_format; } +void VKDevice::ReportLoss() const { + LOG_CRITICAL(Render_Vulkan, "Device loss occured!"); + + // Wait some time to let the log flush + std::this_thread::sleep_for(std::chrono::seconds{1}); + + if (!nv_device_diagnostic_checkpoints) { + return; + } + + [[maybe_unused]] const std::vector data = graphics_queue.getCheckpointDataNV(dld); + // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be + // executed. It can be done on a debugger by evaluating the expression: + // *(VKGraphicsPipeline*)data[0] +} + bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features, const vk::DispatchLoaderDynamic& dldi) const { // Disable for now to avoid converting ASTC twice. @@ -381,6 +400,8 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, true); Test(extension, ext_subgroup_size_control, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME, false); + Test(extension, nv_device_diagnostic_checkpoints, + VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true); } if (khr_shader_float16_int8) { @@ -464,6 +485,7 @@ std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() con std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) { static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, + vk::Format::eA8B8G8R8UintPack32, vk::Format::eA8B8G8R8SnormPack32, vk::Format::eA8B8G8R8SrgbPack32, vk::Format::eB5G6R5UnormPack16, diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index a844c52df..72603f9f6 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -39,6 +39,9 @@ public: vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, FormatType format_type) const; + /// Reports a device loss. + void ReportLoss() const; + /// Returns the dispatch loader with direct function pointers of the device. const vk::DispatchLoaderDynamic& GetDispatchLoader() const { return dld; @@ -159,6 +162,11 @@ public: return ext_shader_viewport_index_layer; } + /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints. + bool IsNvDeviceDiagnosticCheckpoints() const { + return nv_device_diagnostic_checkpoints; + } + /// Returns the vendor name reported from Vulkan. std::string_view GetVendorName() const { return vendor_name; @@ -218,6 +226,7 @@ private: bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8. bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. + bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints. // Telemetry parameters std::string vendor_name; ///< Device's driver name. diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp index 13c46e5b8..525b4bb46 100644 --- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp @@ -72,12 +72,22 @@ VKFence::VKFence(const VKDevice& device, UniqueFence handle) VKFence::~VKFence() = default; void VKFence::Wait() { + static constexpr u64 timeout = std::numeric_limits<u64>::max(); const auto dev = device.GetLogical(); const auto& dld = device.GetDispatchLoader(); - dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); + switch (const auto result = dev.waitForFences(1, &*handle, true, timeout, dld)) { + case vk::Result::eSuccess: + return; + case vk::Result::eErrorDeviceLost: + device.ReportLoss(); + [[fallthrough]]; + default: + vk::throwResultException(result, "vk::waitForFences"); + } } void VKFence::Release() { + ASSERT(is_owned); is_owned = false; } @@ -133,8 +143,32 @@ void VKFence::Unprotect(VKResource* resource) { protected_resources.erase(it); } +void VKFence::RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept { + std::replace(std::begin(protected_resources), std::end(protected_resources), old_resource, + new_resource); +} + VKFenceWatch::VKFenceWatch() = default; +VKFenceWatch::VKFenceWatch(VKFence& initial_fence) { + Watch(initial_fence); +} + +VKFenceWatch::VKFenceWatch(VKFenceWatch&& rhs) noexcept { + fence = std::exchange(rhs.fence, nullptr); + if (fence) { + fence->RedirectProtection(&rhs, this); + } +} + +VKFenceWatch& VKFenceWatch::operator=(VKFenceWatch&& rhs) noexcept { + fence = std::exchange(rhs.fence, nullptr); + if (fence) { + fence->RedirectProtection(&rhs, this); + } + return *this; +} + VKFenceWatch::~VKFenceWatch() { if (fence) { fence->Unprotect(this); diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h index 08ee86fa6..d4cbc95a5 100644 --- a/src/video_core/renderer_vulkan/vk_resource_manager.h +++ b/src/video_core/renderer_vulkan/vk_resource_manager.h @@ -65,6 +65,9 @@ public: /// Removes protection for a resource. void Unprotect(VKResource* resource); + /// Redirects one protected resource to a new address. + void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept; + /// Retreives the fence. operator vk::Fence() const { return *handle; @@ -97,8 +100,13 @@ private: class VKFenceWatch final : public VKResource { public: explicit VKFenceWatch(); + VKFenceWatch(VKFence& initial_fence); + VKFenceWatch(VKFenceWatch&&) noexcept; + VKFenceWatch(const VKFenceWatch&) = delete; ~VKFenceWatch() override; + VKFenceWatch& operator=(VKFenceWatch&&) noexcept; + /// Waits for the fence to be released. void Wait(); @@ -116,6 +124,14 @@ public: void OnFenceRemoval(VKFence* signaling_fence) override; + /** + * Do not use it paired with Watch. Use TryWatch instead. + * Returns true when the watch is free. + */ + bool IsUsed() const { + return fence != nullptr; + } + private: VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free. }; diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 6227bc70b..a8baf91de 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -543,7 +543,7 @@ private: } for (u32 rt = 0; rt < static_cast<u32>(frag_colors.size()); ++rt) { - if (!IsRenderTargetUsed(rt)) { + if (!specialization.enabled_rendertargets[rt]) { continue; } @@ -1555,40 +1555,48 @@ private: Expression Texture(Operation operation) { const auto& meta = std::get<MetaTexture>(operation.GetMeta()); - UNIMPLEMENTED_IF(!meta.aoffi.empty()); const bool can_implicit = stage == ShaderType::Fragment; const Id sampler = GetTextureSampler(operation); const Id coords = GetCoordinates(operation, Type::Float); + std::vector<Id> operands; + spv::ImageOperandsMask mask{}; + if (meta.bias) { + mask = mask | spv::ImageOperandsMask::Bias; + operands.push_back(AsFloat(Visit(meta.bias))); + } + + if (!can_implicit) { + mask = mask | spv::ImageOperandsMask::Lod; + operands.push_back(v_float_zero); + } + + if (!meta.aoffi.empty()) { + mask = mask | spv::ImageOperandsMask::Offset; + operands.push_back(GetOffsetCoordinates(operation)); + } + if (meta.depth_compare) { // Depth sampling UNIMPLEMENTED_IF(meta.bias); const Id dref = AsFloat(Visit(meta.depth_compare)); if (can_implicit) { - return {OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, {}), - Type::Float}; + return { + OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, mask, operands), + Type::Float}; } else { - return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, - spv::ImageOperandsMask::Lod, v_float_zero), - Type::Float}; + return { + OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), + Type::Float}; } } - std::vector<Id> operands; - spv::ImageOperandsMask mask{}; - if (meta.bias) { - mask = mask | spv::ImageOperandsMask::Bias; - operands.push_back(AsFloat(Visit(meta.bias))); - } - Id texture; if (can_implicit) { texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, operands); } else { - texture = OpImageSampleExplicitLod(t_float4, sampler, coords, - mask | spv::ImageOperandsMask::Lod, v_float_zero, - operands); + texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands); } return GetTextureElement(operation, texture, Type::Float); } @@ -1601,7 +1609,8 @@ private: const Id lod = AsFloat(Visit(meta.lod)); spv::ImageOperandsMask mask = spv::ImageOperandsMask::Lod; - std::vector<Id> operands; + std::vector<Id> operands{lod}; + if (!meta.aoffi.empty()) { mask = mask | spv::ImageOperandsMask::Offset; operands.push_back(GetOffsetCoordinates(operation)); @@ -1609,11 +1618,10 @@ private: if (meta.sampler.IsShadow()) { const Id dref = AsFloat(Visit(meta.depth_compare)); - return { - OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, lod, operands), - Type::Float}; + return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), + Type::Float}; } - const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, lod, operands); + const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands); return GetTextureElement(operation, texture, Type::Float); } @@ -1722,7 +1730,7 @@ private: const std::vector grad = {dx, dy}; static constexpr auto mask = spv::ImageOperandsMask::Grad; - const Id texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, grad); + const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, grad); return GetTextureElement(operation, texture, Type::Float); } @@ -1833,7 +1841,7 @@ private: } void PreExit() { - if (stage == ShaderType::Vertex) { + if (stage == ShaderType::Vertex && specialization.ndc_minus_one_to_one) { const u32 position_index = out_indices.position.value(); const Id z_pointer = AccessElement(t_out_float, out_vertex, position_index, 2U); const Id w_pointer = AccessElement(t_out_float, out_vertex, position_index, 3U); @@ -1860,12 +1868,18 @@ private: // rendertargets/components are skipped in the register assignment. u32 current_reg = 0; for (u32 rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { + if (!specialization.enabled_rendertargets[rt]) { + // Skip rendertargets that are not enabled + continue; + } // TODO(Subv): Figure out how dual-source blending is configured in the Switch. for (u32 component = 0; component < 4; ++component) { + const Id pointer = AccessElement(t_out_float, frag_colors.at(rt), component); if (header.ps.IsColorComponentOutputEnabled(rt, component)) { - OpStore(AccessElement(t_out_float, frag_colors.at(rt), component), - SafeGetRegister(current_reg)); + OpStore(pointer, SafeGetRegister(current_reg)); ++current_reg; + } else { + OpStore(pointer, component == 3 ? v_float_one : v_float_zero); } } } @@ -1995,15 +2009,6 @@ private: return DeclareBuiltIn(builtin, spv::StorageClass::Input, type, std::move(name)); } - bool IsRenderTargetUsed(u32 rt) const { - for (u32 component = 0; component < 4; ++component) { - if (header.ps.IsColorComponentOutputEnabled(rt, component)) { - return true; - } - } - return false; - } - template <typename... Args> Id AccessElement(Id pointer_type, Id composite, Args... elements_) { std::vector<Id> members; @@ -2552,29 +2557,7 @@ public: } Id operator()(const ExprCondCode& expr) { - const Node cc = decomp.ir.GetConditionCode(expr.cc); - Id target; - - if (const auto pred = std::get_if<PredicateNode>(&*cc)) { - const auto index = pred->GetIndex(); - switch (index) { - case Tegra::Shader::Pred::NeverExecute: - target = decomp.v_false; - break; - case Tegra::Shader::Pred::UnusedIndex: - target = decomp.v_true; - break; - default: - target = decomp.predicates.at(index); - break; - } - } else if (const auto flag = std::get_if<InternalFlagNode>(&*cc)) { - target = decomp.internal_flags.at(static_cast<u32>(flag->GetFlag())); - } else { - UNREACHABLE(); - } - - return decomp.OpLoad(decomp.t_bool, target); + return decomp.AsBool(decomp.Visit(decomp.ir.GetConditionCode(expr.cc))); } Id operator()(const ExprVar& expr) { @@ -2589,7 +2572,7 @@ public: const Id target = decomp.Constant(decomp.t_uint, expr.value); Id gpr = decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)); gpr = decomp.OpBitcast(decomp.t_uint, gpr); - return decomp.OpLogicalEqual(decomp.t_uint, gpr, target); + return decomp.OpIEqual(decomp.t_bool, gpr, target); } Id Visit(const Expr& node) { @@ -2659,11 +2642,11 @@ public: const Id loop_label = decomp.OpLabel(); const Id endloop_label = decomp.OpLabel(); const Id loop_start_block = decomp.OpLabel(); - const Id loop_end_block = decomp.OpLabel(); + const Id loop_continue_block = decomp.OpLabel(); current_loop_exit = endloop_label; decomp.OpBranch(loop_label); decomp.AddLabel(loop_label); - decomp.OpLoopMerge(endloop_label, loop_end_block, spv::LoopControlMask::MaskNone); + decomp.OpLoopMerge(endloop_label, loop_continue_block, spv::LoopControlMask::MaskNone); decomp.OpBranch(loop_start_block); decomp.AddLabel(loop_start_block); ASTNode current = ast.nodes.GetFirst(); @@ -2671,6 +2654,8 @@ public: Visit(current); current = current->GetNext(); } + decomp.OpBranch(loop_continue_block); + decomp.AddLabel(loop_continue_block); ExprDecompiler expr_parser{decomp}; const Id condition = expr_parser.Visit(ast.condition); decomp.OpBranchConditional(condition, loop_label, endloop_label); diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h index 2b01321b6..10794be1c 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h @@ -94,6 +94,7 @@ struct Specialization final { Maxwell::PrimitiveTopology primitive_topology{}; std::optional<float> point_size{}; std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{}; + bool ndc_minus_one_to_one{}; // Tessellation specific struct { @@ -101,6 +102,9 @@ struct Specialization final { Maxwell::TessellationSpacing spacing{}; bool clockwise{}; } tessellation; + + // Fragment specific + std::bitset<8> enabled_rendertargets; }; // Old gcc versions don't consider this trivially copyable. // static_assert(std::is_trivially_copyable_v<Specialization>); diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 78e92f52e..c934d0719 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -22,7 +22,23 @@ using Tegra::Shader::Register; namespace { -u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) { +u32 GetLdgMemorySize(Tegra::Shader::UniformType uniform_type) { + switch (uniform_type) { + case Tegra::Shader::UniformType::UnsignedByte: + case Tegra::Shader::UniformType::Single: + return 1; + case Tegra::Shader::UniformType::Double: + return 2; + case Tegra::Shader::UniformType::Quad: + case Tegra::Shader::UniformType::UnsignedQuad: + return 4; + default: + UNIMPLEMENTED_MSG("Unimplemented size={}!", static_cast<u32>(uniform_type)); + return 1; + } +} + +u32 GetStgMemorySize(Tegra::Shader::UniformType uniform_type) { switch (uniform_type) { case Tegra::Shader::UniformType::Single: return 1; @@ -170,7 +186,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { const auto [real_address_base, base_address, descriptor] = TrackGlobalMemory(bb, instr, false); - const u32 count = GetUniformTypeElementsCount(type); + const u32 count = GetLdgMemorySize(type); if (!real_address_base || !base_address) { // Tracking failed, load zeroes. for (u32 i = 0; i < count; ++i) { @@ -181,12 +197,22 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { for (u32 i = 0; i < count; ++i) { const Node it_offset = Immediate(i * 4); - const Node real_address = - Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset); - const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); + const Node real_address = Operation(OperationCode::UAdd, real_address_base, it_offset); + Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); + + if (type == Tegra::Shader::UniformType::UnsignedByte) { + // To handle unaligned loads get the byte used to dereferenced global memory + // and extract that byte from the loaded uint32. + Node byte = Operation(OperationCode::UBitwiseAnd, real_address, Immediate(3)); + byte = Operation(OperationCode::ULogicalShiftLeft, std::move(byte), Immediate(3)); + + gmem = Operation(OperationCode::UBitfieldExtract, std::move(gmem), std::move(byte), + Immediate(8)); + } SetTemporary(bb, i, gmem); } + for (u32 i = 0; i < count; ++i) { SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); } @@ -276,7 +302,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { break; } - const u32 count = GetUniformTypeElementsCount(type); + const u32 count = GetStgMemorySize(type); for (u32 i = 0; i < count; ++i) { const Node it_offset = Immediate(i * 4); const Node real_address = Operation(OperationCode::UAdd, real_address_base, it_offset); diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index a4f1edd9a..38b3a4ba8 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp @@ -392,4 +392,42 @@ std::string SurfaceParams::TargetName() const { } } +u32 SurfaceParams::GetBlockSize() const { + const u32 x = 64U << block_width; + const u32 y = 8U << block_height; + const u32 z = 1U << block_depth; + return x * y * z; +} + +std::pair<u32, u32> SurfaceParams::GetBlockXY() const { + const u32 x_pixels = 64U / GetBytesPerPixel(); + const u32 x = x_pixels << block_width; + const u32 y = 8U << block_height; + return {x, y}; +} + +std::tuple<u32, u32, u32> SurfaceParams::GetBlockOffsetXYZ(u32 offset) const { + const auto div_ceil = [](const u32 x, const u32 y) { return ((x + y - 1) / y); }; + const u32 block_size = GetBlockSize(); + const u32 block_index = offset / block_size; + const u32 gob_offset = offset % block_size; + const u32 gob_index = gob_offset / static_cast<u32>(Tegra::Texture::GetGOBSize()); + const u32 x_gob_pixels = 64U / GetBytesPerPixel(); + const u32 x_block_pixels = x_gob_pixels << block_width; + const u32 y_block_pixels = 8U << block_height; + const u32 z_block_pixels = 1U << block_depth; + const u32 x_blocks = div_ceil(width, x_block_pixels); + const u32 y_blocks = div_ceil(height, y_block_pixels); + const u32 z_blocks = div_ceil(depth, z_block_pixels); + const u32 base_x = block_index % x_blocks; + const u32 base_y = (block_index / x_blocks) % y_blocks; + const u32 base_z = (block_index / (x_blocks * y_blocks)) % z_blocks; + u32 x = base_x * x_block_pixels; + u32 y = base_y * y_block_pixels; + u32 z = base_z * z_block_pixels; + z += gob_index >> block_height; + y += (gob_index * 8U) % y_block_pixels; + return {x, y, z}; +} + } // namespace VideoCommon diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index 129817ad3..992b5c022 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h @@ -4,6 +4,8 @@ #pragma once +#include <utility> + #include "common/alignment.h" #include "common/bit_util.h" #include "common/cityhash.h" @@ -136,6 +138,15 @@ public: std::size_t GetConvertedMipmapSize(u32 level) const; + /// Get this texture Tegra Block size in guest memory layout + u32 GetBlockSize() const; + + /// Get X, Y coordinates max sizes of a single block. + std::pair<u32, u32> GetBlockXY() const; + + /// Get the offset in x, y, z coordinates from a memory offset + std::tuple<u32, u32, u32> GetBlockOffsetXYZ(u32 offset) const; + /// Returns the size of a layer in bytes in guest memory. std::size_t GetGuestLayerSize() const { return GetLayerSize(false, false); @@ -269,7 +280,8 @@ private: /// Returns the size of all mipmap levels and aligns as needed. std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const { - return GetLayerSize(as_host_size, uncompressed) * (layer_only ? 1U : depth); + return GetLayerSize(as_host_size, uncompressed) * + (layer_only ? 1U : (is_layered ? depth : 1U)); } /// Returns the size of a layer diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 02d2e9136..f4c015635 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -616,6 +616,86 @@ private: } /** + * Takes care of managing 3D textures and its slices. Does HLE methods for reconstructing the 3D + * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of + * the HLE methods. + * + * @param overlaps The overlapping surfaces registered in the cache. + * @param params The parameters on the new surface. + * @param gpu_addr The starting address of the new surface. + * @param cache_addr The starting address of the new surface on physical memory. + * @param preserve_contents Indicates that the new surface should be loaded from memory or + * left blank. + */ + std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, + const SurfaceParams& params, + const GPUVAddr gpu_addr, + const CacheAddr cache_addr, + bool preserve_contents) { + if (params.target == SurfaceTarget::Texture3D) { + bool failed = false; + if (params.num_levels > 1) { + // We can't handle mipmaps in 3D textures yet, better fallback to LLE approach + return std::nullopt; + } + TSurface new_surface = GetUncachedSurface(gpu_addr, params); + bool modified = false; + for (auto& surface : overlaps) { + const SurfaceParams& src_params = surface->GetSurfaceParams(); + if (src_params.target != SurfaceTarget::Texture2D) { + failed = true; + break; + } + if (src_params.height != params.height) { + failed = true; + break; + } + if (src_params.block_depth != params.block_depth || + src_params.block_height != params.block_height) { + failed = true; + break; + } + const u32 offset = static_cast<u32>(surface->GetCacheAddr() - cache_addr); + const auto [x, y, z] = params.GetBlockOffsetXYZ(offset); + modified |= surface->IsModified(); + const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height, + 1); + ImageCopy(surface, new_surface, copy_params); + } + if (failed) { + return std::nullopt; + } + for (const auto& surface : overlaps) { + Unregister(surface); + } + new_surface->MarkAsModified(modified, Tick()); + Register(new_surface); + auto view = new_surface->GetMainView(); + return {{std::move(new_surface), view}}; + } else { + for (const auto& surface : overlaps) { + if (!surface->MatchTarget(params.target)) { + if (overlaps.size() == 1 && surface->GetCacheAddr() == cache_addr) { + if (Settings::values.use_accurate_gpu_emulation) { + return std::nullopt; + } + Unregister(surface); + return InitializeSurface(gpu_addr, params, preserve_contents); + } + return std::nullopt; + } + if (surface->GetCacheAddr() != cache_addr) { + continue; + } + if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) { + return {{surface, surface->GetMainView()}}; + } + } + return InitializeSurface(gpu_addr, params, preserve_contents); + } + } + + /** * Gets the starting address and parameters of a candidate surface and tries * to find a matching surface within the cache. This is done in 3 big steps: * @@ -687,6 +767,15 @@ private: } } + // Check if it's a 3D texture + if (params.block_depth > 0) { + auto surface = + Manage3DSurfaces(overlaps, params, gpu_addr, cache_addr, preserve_contents); + if (surface) { + return *surface; + } + } + // Split cases between 1 overlap or many. if (overlaps.size() == 1) { TSurface current_surface = overlaps[0]; diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index f1e3952bc..e5eac3f3b 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -12,6 +12,10 @@ namespace Tegra::Texture { // GOBSize constant. Calculated by 64 bytes in x multiplied by 8 y coords, represents // an small rect of (64/bytes_per_pixel)X8. +inline std::size_t GetGOBSize() { + return 512; +} + inline std::size_t GetGOBSizeShift() { return 9; } |