diff options
21 files changed, 1236 insertions, 120 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 3b20c7d34..2594cd0bd 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -151,12 +151,16 @@ add_library(video_core STATIC if (ENABLE_VULKAN) target_sources(video_core PRIVATE renderer_vulkan/declarations.h + renderer_vulkan/fixed_pipeline_state.cpp + renderer_vulkan/fixed_pipeline_state.h renderer_vulkan/maxwell_to_vk.cpp renderer_vulkan/maxwell_to_vk.h renderer_vulkan/vk_buffer_cache.cpp renderer_vulkan/vk_buffer_cache.h renderer_vulkan/vk_device.cpp renderer_vulkan/vk_device.h + renderer_vulkan/vk_image.cpp + renderer_vulkan/vk_image.h renderer_vulkan/vk_memory_manager.cpp renderer_vulkan/vk_memory_manager.h renderer_vulkan/vk_resource_manager.cpp @@ -167,6 +171,8 @@ if (ENABLE_VULKAN) renderer_vulkan/vk_scheduler.h renderer_vulkan/vk_shader_decompiler.cpp renderer_vulkan/vk_shader_decompiler.h + renderer_vulkan/vk_staging_buffer_pool.cpp + renderer_vulkan/vk_staging_buffer_pool.h renderer_vulkan/vk_stream_buffer.cpp renderer_vulkan/vk_stream_buffer.h renderer_vulkan/vk_swapchain.cpp diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 15a7a9d6a..e1cb8b0b0 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -88,11 +88,11 @@ void Maxwell3D::InitializeRegisterDefaults() { color_mask.A.Assign(1); } - // Commercial games seem to assume this value is enabled and nouveau sets this value manually. + // NVN games expect these values to be enabled at boot + regs.rasterize_enable = 1; regs.rt_separate_frag_data = 1; - - // Some games (like Super Mario Odyssey) assume that SRGB is enabled. regs.framebuffer_srgb = 1; + mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_end_gl)] = true; mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)] = true; mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true; diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index dbb4e597f..a35e7a195 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -657,7 +657,11 @@ public: std::array<f32, 4> tess_level_outer; std::array<f32, 2> tess_level_inner; - INSERT_UNION_PADDING_WORDS(0x102); + INSERT_UNION_PADDING_WORDS(0x10); + + u32 rasterize_enable; + + INSERT_UNION_PADDING_WORDS(0xF1); u32 tfb_enabled; @@ -707,13 +711,15 @@ public: u32 color_mask_common; - INSERT_UNION_PADDING_WORDS(0x6); - - u32 rt_separate_frag_data; + INSERT_UNION_PADDING_WORDS(0x2); f32 depth_bounds[2]; - INSERT_UNION_PADDING_WORDS(0xA); + INSERT_UNION_PADDING_WORDS(0x2); + + u32 rt_separate_frag_data; + + INSERT_UNION_PADDING_WORDS(0xC); struct { u32 address_high; @@ -1030,7 +1036,12 @@ public: BitField<4, 1, u32> depth_clamp_far; } view_volume_clip_control; - INSERT_UNION_PADDING_WORDS(0x21); + INSERT_UNION_PADDING_WORDS(0x1F); + + u32 depth_bounds_enable; + + INSERT_UNION_PADDING_WORDS(1); + struct { u32 enable; LogicOperation operation; @@ -1420,6 +1431,7 @@ ASSERT_REG_POSITION(sync_info, 0xB2); ASSERT_REG_POSITION(tess_mode, 0xC8); ASSERT_REG_POSITION(tess_level_outer, 0xC9); ASSERT_REG_POSITION(tess_level_inner, 0xCD); +ASSERT_REG_POSITION(rasterize_enable, 0xDF); ASSERT_REG_POSITION(tfb_enabled, 0x1D1); ASSERT_REG_POSITION(rt, 0x200); ASSERT_REG_POSITION(viewport_transform, 0x280); @@ -1439,7 +1451,7 @@ ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D6); ASSERT_REG_POSITION(stencil_back_mask, 0x3D7); ASSERT_REG_POSITION(color_mask_common, 0x3E4); ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB); -ASSERT_REG_POSITION(depth_bounds, 0x3EC); +ASSERT_REG_POSITION(depth_bounds, 0x3E7); ASSERT_REG_POSITION(zeta, 0x3F8); ASSERT_REG_POSITION(clear_flags, 0x43E); ASSERT_REG_POSITION(vertex_attrib_format, 0x458); @@ -1495,6 +1507,7 @@ ASSERT_REG_POSITION(cull, 0x646); ASSERT_REG_POSITION(pixel_center_integer, 0x649); ASSERT_REG_POSITION(viewport_transform_enabled, 0x64B); ASSERT_REG_POSITION(view_volume_clip_control, 0x64F); +ASSERT_REG_POSITION(depth_bounds_enable, 0x66F); ASSERT_REG_POSITION(logic_op, 0x671); ASSERT_REG_POSITION(clear_buffers, 0x674); ASSERT_REG_POSITION(color_mask, 0x680); diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index dfb12cd2d..57b57c647 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -1051,7 +1051,7 @@ union Instruction { BitField<40, 1, R2pMode> mode; BitField<41, 2, u64> byte; BitField<20, 7, u64> immediate_mask; - } r2p; + } p2r_r2p; union { BitField<39, 3, u64> pred39; @@ -1239,7 +1239,7 @@ union Instruction { BitField<35, 1, u64> ndv_flag; BitField<49, 1, u64> nodep_flag; BitField<50, 1, u64> dc_flag; - BitField<54, 2, u64> info; + BitField<54, 2, u64> offset_mode; BitField<56, 2, u64> component; bool UsesMiscMode(TextureMiscMode mode) const { @@ -1251,9 +1251,9 @@ union Instruction { case TextureMiscMode::DC: return dc_flag != 0; case TextureMiscMode::AOFFI: - return info == 1; + return offset_mode == 1; case TextureMiscMode::PTP: - return info == 2; + return offset_mode == 2; default: break; } @@ -1265,7 +1265,7 @@ union Instruction { BitField<35, 1, u64> ndv_flag; BitField<49, 1, u64> nodep_flag; BitField<50, 1, u64> dc_flag; - BitField<33, 2, u64> info; + BitField<33, 2, u64> offset_mode; BitField<37, 2, u64> component; bool UsesMiscMode(TextureMiscMode mode) const { @@ -1277,9 +1277,9 @@ union Instruction { case TextureMiscMode::DC: return dc_flag != 0; case TextureMiscMode::AOFFI: - return info == 1; + return offset_mode == 1; case TextureMiscMode::PTP: - return info == 2; + return offset_mode == 2; default: break; } @@ -1801,6 +1801,7 @@ public: PSET, CSETP, R2P_IMM, + P2R_IMM, XMAD_IMM, XMAD_CR, XMAD_RC, @@ -2106,6 +2107,7 @@ private: INST("0101000010010---", Id::PSETP, Type::PredicateSetPredicate, "PSETP"), INST("010100001010----", Id::CSETP, Type::PredicateSetPredicate, "CSETP"), INST("0011100-11110---", Id::R2P_IMM, Type::RegisterSetPredicate, "R2P_IMM"), + INST("0011100-11101---", Id::P2R_IMM, Type::RegisterSetPredicate, "P2R_IMM"), INST("0011011-00------", Id::XMAD_IMM, Type::Xmad, "XMAD_IMM"), INST("0100111---------", Id::XMAD_CR, Type::Xmad, "XMAD_CR"), INST("010100010-------", Id::XMAD_RC, Type::Xmad, "XMAD_RC"), diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index f20967d85..672051102 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -271,6 +271,9 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { case Maxwell::ShaderProgram::Geometry: shader_program_manager->UseTrivialGeometryShader(); break; + case Maxwell::ShaderProgram::Fragment: + shader_program_manager->UseTrivialFragmentShader(); + break; default: break; } @@ -514,6 +517,7 @@ void RasterizerOpenGL::Clear() { ConfigureClearFramebuffer(clear_state, use_color, use_depth, use_stencil); SyncViewport(clear_state); + SyncRasterizeEnable(clear_state); if (regs.clear_flags.scissor) { SyncScissorTest(clear_state); } @@ -541,6 +545,7 @@ void RasterizerOpenGL::Clear() { void RasterizerOpenGL::DrawPrelude() { auto& gpu = system.GPU().Maxwell3D(); + SyncRasterizeEnable(state); SyncColorMask(); SyncFragmentColorClampState(); SyncMultiSampleState(); @@ -1133,6 +1138,11 @@ void RasterizerOpenGL::SyncStencilTestState() { } } +void RasterizerOpenGL::SyncRasterizeEnable(OpenGLState& current_state) { + const auto& regs = system.GPU().Maxwell3D().regs; + current_state.rasterizer_discard = regs.rasterize_enable == 0; +} + void RasterizerOpenGL::SyncColorMask() { auto& maxwell3d = system.GPU().Maxwell3D(); if (!maxwell3d.dirty.color_mask) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 04c1ca551..6a27cf497 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -168,6 +168,9 @@ private: /// Syncs the point state to match the guest state void SyncPointState(); + /// Syncs the rasterizer enable state to match the guest state + void SyncRasterizeEnable(OpenGLState& current_state); + /// Syncs Color Mask void SyncColorMask(); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 270a9dc2b..de742d11c 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -112,25 +112,25 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) { } /// Describes primitive behavior on geometry shaders -constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) { +constexpr std::pair<const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) { switch (primitive_mode) { case GL_POINTS: - return {"points", "Points", 1}; + return {"points", 1}; case GL_LINES: case GL_LINE_STRIP: - return {"lines", "Lines", 2}; + return {"lines", 2}; case GL_LINES_ADJACENCY: case GL_LINE_STRIP_ADJACENCY: - return {"lines_adjacency", "LinesAdj", 4}; + return {"lines_adjacency", 4}; case GL_TRIANGLES: case GL_TRIANGLE_STRIP: case GL_TRIANGLE_FAN: - return {"triangles", "Triangles", 3}; + return {"triangles", 3}; case GL_TRIANGLES_ADJACENCY: case GL_TRIANGLE_STRIP_ADJACENCY: - return {"triangles_adjacency", "TrianglesAdj", 6}; + return {"triangles_adjacency", 6}; default: - return {"points", "Invalid", 1}; + return {"points", 1}; } } @@ -264,30 +264,25 @@ CachedProgram BuildShader(const Device& device, u64 unique_identifier, ShaderTyp "#extension GL_NV_shader_thread_group : require\n" "#extension GL_NV_shader_thread_shuffle : require\n"; } - source += '\n'; if (shader_type == ShaderType::Geometry) { - const auto [glsl_topology, debug_name, max_vertices] = - GetPrimitiveDescription(variant.primitive_mode); - - source += fmt::format("layout ({}) in;\n\n", glsl_topology); + const auto [glsl_topology, max_vertices] = GetPrimitiveDescription(variant.primitive_mode); source += fmt::format("#define MAX_VERTEX_INPUT {}\n", max_vertices); + source += fmt::format("layout ({}) in;\n", glsl_topology); } if (shader_type == ShaderType::Compute) { + if (variant.local_memory_size > 0) { + source += fmt::format("#define LOCAL_MEMORY_SIZE {}\n", + Common::AlignUp(variant.local_memory_size, 4) / 4); + } source += fmt::format("layout (local_size_x = {}, local_size_y = {}, local_size_z = {}) in;\n", variant.block_x, variant.block_y, variant.block_z); if (variant.shared_memory_size > 0) { - // TODO(Rodrigo): We should divide by four here, but having a larger shared memory pool - // avoids out of bound stores. Find out why shared memory size is being invalid. + // shared_memory_size is described in number of words source += fmt::format("shared uint smem[{}];\n", variant.shared_memory_size); } - - if (variant.local_memory_size > 0) { - source += fmt::format("#define LOCAL_MEMORY_SIZE {}\n", - Common::AlignUp(variant.local_memory_size, 4) / 4); - } } source += '\n'; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 0389c2143..a311dbcfe 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -48,10 +48,10 @@ class ExprDecompiler; enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; -struct TextureAoffi {}; +struct TextureOffset {}; struct TextureDerivates {}; using TextureArgument = std::pair<Type, Node>; -using TextureIR = std::variant<TextureAoffi, TextureDerivates, TextureArgument>; +using TextureIR = std::variant<TextureOffset, TextureDerivates, TextureArgument>; constexpr u32 MAX_CONSTBUFFER_ELEMENTS = static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float)); @@ -1077,7 +1077,7 @@ private: } std::string GenerateTexture(Operation operation, const std::string& function_suffix, - const std::vector<TextureIR>& extras, bool sepparate_dc = false) { + const std::vector<TextureIR>& extras, bool separate_dc = false) { constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"}; const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); @@ -1090,10 +1090,12 @@ private: std::string expr = "texture" + function_suffix; if (!meta->aoffi.empty()) { expr += "Offset"; + } else if (!meta->ptp.empty()) { + expr += "Offsets"; } expr += '(' + GetSampler(meta->sampler) + ", "; expr += coord_constructors.at(count + (has_array ? 1 : 0) + - (has_shadow && !sepparate_dc ? 1 : 0) - 1); + (has_shadow && !separate_dc ? 1 : 0) - 1); expr += '('; for (std::size_t i = 0; i < count; ++i) { expr += Visit(operation[i]).AsFloat(); @@ -1106,7 +1108,7 @@ private: expr += ", float(" + Visit(meta->array).AsInt() + ')'; } if (has_shadow) { - if (sepparate_dc) { + if (separate_dc) { expr += "), " + Visit(meta->depth_compare).AsFloat(); } else { expr += ", " + Visit(meta->depth_compare).AsFloat() + ')'; @@ -1118,8 +1120,12 @@ private: for (const auto& variant : extras) { if (const auto argument = std::get_if<TextureArgument>(&variant)) { expr += GenerateTextureArgument(*argument); - } else if (std::holds_alternative<TextureAoffi>(variant)) { - expr += GenerateTextureAoffi(meta->aoffi); + } else if (std::holds_alternative<TextureOffset>(variant)) { + if (!meta->aoffi.empty()) { + expr += GenerateTextureAoffi(meta->aoffi); + } else if (!meta->ptp.empty()) { + expr += GenerateTexturePtp(meta->ptp); + } } else if (std::holds_alternative<TextureDerivates>(variant)) { expr += GenerateTextureDerivates(meta->derivates); } else { @@ -1160,6 +1166,20 @@ private: return expr; } + std::string ReadTextureOffset(const Node& value) { + if (const auto immediate = std::get_if<ImmediateNode>(&*value)) { + // Inline the string as an immediate integer in GLSL (AOFFI arguments are required + // to be constant by the standard). + return std::to_string(static_cast<s32>(immediate->GetValue())); + } else if (device.HasVariableAoffi()) { + // Avoid using variable AOFFI on unsupported devices. + return Visit(value).AsInt(); + } else { + // Insert 0 on devices not supporting variable AOFFI. + return "0"; + } + } + std::string GenerateTextureAoffi(const std::vector<Node>& aoffi) { if (aoffi.empty()) { return {}; @@ -1170,18 +1190,7 @@ private: expr += '('; for (std::size_t index = 0; index < aoffi.size(); ++index) { - const auto operand{aoffi.at(index)}; - if (const auto immediate = std::get_if<ImmediateNode>(&*operand)) { - // Inline the string as an immediate integer in GLSL (AOFFI arguments are required - // to be constant by the standard). - expr += std::to_string(static_cast<s32>(immediate->GetValue())); - } else if (device.HasVariableAoffi()) { - // Avoid using variable AOFFI on unsupported devices. - expr += Visit(operand).AsInt(); - } else { - // Insert 0 on devices not supporting variable AOFFI. - expr += '0'; - } + expr += ReadTextureOffset(aoffi.at(index)); if (index + 1 < aoffi.size()) { expr += ", "; } @@ -1191,6 +1200,20 @@ private: return expr; } + std::string GenerateTexturePtp(const std::vector<Node>& ptp) { + static constexpr std::size_t num_vectors = 4; + ASSERT(ptp.size() == num_vectors * 2); + + std::string expr = ", ivec2[]("; + for (std::size_t vector = 0; vector < num_vectors; ++vector) { + const bool has_next = vector + 1 < num_vectors; + expr += fmt::format("ivec2({}, {}){}", ReadTextureOffset(ptp.at(vector * 2)), + ReadTextureOffset(ptp.at(vector * 2 + 1)), has_next ? ", " : ""); + } + expr += ')'; + return expr; + } + std::string GenerateTextureDerivates(const std::vector<Node>& derivates) { if (derivates.empty()) { return {}; @@ -1689,7 +1712,7 @@ private: ASSERT(meta); std::string expr = GenerateTexture( - operation, "", {TextureAoffi{}, TextureArgument{Type::Float, meta->bias}}); + operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}}); if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } @@ -1701,7 +1724,7 @@ private: ASSERT(meta); std::string expr = GenerateTexture( - operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureAoffi{}}); + operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}}); if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } @@ -1709,21 +1732,19 @@ private: } Expression TextureGather(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - ASSERT(meta); + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); - const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; - if (meta->sampler.IsShadow()) { - return {GenerateTexture(operation, "Gather", {TextureAoffi{}}, true) + - GetSwizzle(meta->element), - Type::Float}; + const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int; + const bool separate_dc = meta.sampler.IsShadow(); + + std::vector<TextureIR> ir; + if (meta.sampler.IsShadow()) { + ir = {TextureOffset{}}; } else { - return {GenerateTexture(operation, "Gather", - {TextureAoffi{}, TextureArgument{type, meta->component}}, - false) + - GetSwizzle(meta->element), - Type::Float}; + ir = {TextureOffset{}, TextureArgument{type, meta.component}}; } + return {GenerateTexture(operation, "Gather", ir, separate_dc) + GetSwizzle(meta.element), + Type::Float}; } Expression TextureQueryDimensions(Operation operation) { @@ -1794,7 +1815,8 @@ private: const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); - std::string expr = GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureAoffi{}}); + std::string expr = + GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}}); return {std::move(expr) + GetSwizzle(meta->element), Type::Float}; } diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h index 3703e7018..478c165ce 100644 --- a/src/video_core/renderer_opengl/gl_shader_manager.h +++ b/src/video_core/renderer_opengl/gl_shader_manager.h @@ -50,6 +50,10 @@ public: current_state.geometry_shader = 0; } + void UseTrivialFragmentShader() { + current_state.fragment_shader = 0; + } + private: struct PipelineState { bool operator==(const PipelineState& rhs) const { diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index ccc1e050a..df2e2395a 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -182,6 +182,10 @@ void OpenGLState::ApplyCulling() { } } +void OpenGLState::ApplyRasterizerDiscard() { + Enable(GL_RASTERIZER_DISCARD, cur_state.rasterizer_discard, rasterizer_discard); +} + void OpenGLState::ApplyColorMask() { if (!dirty.color_mask) { return; @@ -455,6 +459,7 @@ void OpenGLState::Apply() { ApplyPointSize(); ApplyFragmentColorClamp(); ApplyMultisample(); + ApplyRasterizerDiscard(); ApplyColorMask(); ApplyDepthClamp(); ApplyViewport(); diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index 0b5895084..fb180f302 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h @@ -48,6 +48,8 @@ public: GLuint index = 0; } primitive_restart; // GL_PRIMITIVE_RESTART + bool rasterizer_discard = false; // GL_RASTERIZER_DISCARD + struct ColorMask { GLboolean red_enabled = GL_TRUE; GLboolean green_enabled = GL_TRUE; @@ -56,6 +58,7 @@ public: }; std::array<ColorMask, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_mask; // GL_COLOR_WRITEMASK + struct { bool test_enabled = false; // GL_STENCIL_TEST struct { @@ -174,6 +177,7 @@ public: void ApplyMultisample(); void ApplySRgb(); void ApplyCulling(); + void ApplyRasterizerDiscard(); void ApplyColorMask(); void ApplyDepth(); void ApplyPrimitiveRestart(); diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp new file mode 100644 index 000000000..5a490f6ef --- /dev/null +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp @@ -0,0 +1,296 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <tuple> + +#include <boost/functional/hash.hpp> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/fixed_pipeline_state.h" + +namespace Vulkan { + +namespace { + +constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) { + const FixedPipelineState::StencilFace front_stencil( + regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass, + regs.stencil_front_func_func); + const FixedPipelineState::StencilFace back_stencil = + regs.stencil_two_side_enable + ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail, + regs.stencil_back_op_zpass, + regs.stencil_back_func_func) + : front_stencil; + return FixedPipelineState::DepthStencil( + regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1, + regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil); +} + +constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) { + return FixedPipelineState::InputAssembly( + regs.draw.topology, regs.primitive_restart.enabled, + regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f); +} + +constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState( + const Maxwell& regs, std::size_t render_target) { + const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target]; + const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0}; + + const FixedPipelineState::BlendingAttachment default_blending( + false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One, + Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One, + Maxwell::Blend::Factor::Zero, components); + if (render_target >= regs.rt_control.count) { + return default_blending; + } + + if (!regs.independent_blend_enable) { + const auto& src = regs.blend; + if (!src.enable[render_target]) { + return default_blending; + } + return FixedPipelineState::BlendingAttachment( + true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, + src.factor_source_a, src.factor_dest_a, components); + } + + if (!regs.blend.enable[render_target]) { + return default_blending; + } + const auto& src = regs.independent_blend[render_target]; + return FixedPipelineState::BlendingAttachment( + true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, + src.factor_source_a, src.factor_dest_a, components); +} + +constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) { + return FixedPipelineState::ColorBlending( + {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a}, + regs.rt_control.count, + {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1), + GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3), + GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5), + GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)}); +} + +constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) { + return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim, + regs.tess_mode.spacing, regs.tess_mode.cw != 0); +} + +constexpr std::size_t Point = 0; +constexpr std::size_t Line = 1; +constexpr std::size_t Polygon = 2; +constexpr std::array PolygonOffsetEnableLUT = { + Point, // Points + Line, // Lines + Line, // LineLoop + Line, // LineStrip + Polygon, // Triangles + Polygon, // TriangleStrip + Polygon, // TriangleFan + Polygon, // Quads + Polygon, // QuadStrip + Polygon, // Polygon + Line, // LinesAdjacency + Line, // LineStripAdjacency + Polygon, // TrianglesAdjacency + Polygon, // TriangleStripAdjacency + Polygon, // Patches +}; + +constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) { + const std::array enabled_lut = {regs.polygon_offset_point_enable, + regs.polygon_offset_line_enable, + regs.polygon_offset_fill_enable}; + const auto topology = static_cast<std::size_t>(regs.draw.topology.Value()); + const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]]; + + Maxwell::Cull::FrontFace front_face = regs.cull.front_face; + if (regs.screen_y_control.triangle_rast_flip != 0 && + regs.viewport_transform[0].scale_y > 0.0f) { + if (front_face == Maxwell::Cull::FrontFace::CounterClockWise) + front_face = Maxwell::Cull::FrontFace::ClockWise; + else if (front_face == Maxwell::Cull::FrontFace::ClockWise) + front_face = Maxwell::Cull::FrontFace::CounterClockWise; + } + + const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; + return FixedPipelineState::Rasterizer(regs.cull.enabled, depth_bias_enabled, gl_ndc, + regs.cull.cull_face, front_face); +} + +} // Anonymous namespace + +std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept { + return (index << stride) ^ divisor; +} + +bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept { + return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor); +} + +std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept { + return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^ + (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^ + (static_cast<std::size_t>(offset) << 36); +} + +bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept { + return std::tie(index, buffer, type, size, offset) == + std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset); +} + +std::size_t FixedPipelineState::StencilFace::Hash() const noexcept { + return static_cast<std::size_t>(action_stencil_fail) ^ + (static_cast<std::size_t>(action_depth_fail) << 4) ^ + (static_cast<std::size_t>(action_depth_fail) << 20) ^ + (static_cast<std::size_t>(action_depth_pass) << 36); +} + +bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept { + return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) == + std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass, + rhs.test_func); +} + +std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept { + return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^ + (static_cast<std::size_t>(src_rgb_func) << 10) ^ + (static_cast<std::size_t>(dst_rgb_func) << 15) ^ + (static_cast<std::size_t>(a_equation) << 20) ^ + (static_cast<std::size_t>(src_a_func) << 25) ^ + (static_cast<std::size_t>(dst_a_func) << 30) ^ + (static_cast<std::size_t>(components[0]) << 35) ^ + (static_cast<std::size_t>(components[1]) << 36) ^ + (static_cast<std::size_t>(components[2]) << 37) ^ + (static_cast<std::size_t>(components[3]) << 38); +} + +bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const + noexcept { + return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func, + dst_a_func, components) == + std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func, + rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components); +} + +std::size_t FixedPipelineState::VertexInput::Hash() const noexcept { + std::size_t hash = num_bindings ^ (num_attributes << 32); + for (std::size_t i = 0; i < num_bindings; ++i) { + boost::hash_combine(hash, bindings[i].Hash()); + } + for (std::size_t i = 0; i < num_attributes; ++i) { + boost::hash_combine(hash, attributes[i].Hash()); + } + return hash; +} + +bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept { + return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(), + rhs.bindings.begin() + rhs.num_bindings) && + std::equal(attributes.begin(), attributes.begin() + num_attributes, + rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes); +} + +std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept { + std::size_t point_size_int = 0; + std::memcpy(&point_size_int, &point_size, sizeof(point_size)); + return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^ + static_cast<std::size_t>(primitive_restart_enable); +} + +bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept { + return std::tie(topology, primitive_restart_enable, point_size) == + std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size); +} + +std::size_t FixedPipelineState::Tessellation::Hash() const noexcept { + return static_cast<std::size_t>(patch_control_points) ^ + (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^ + (static_cast<std::size_t>(clockwise) << 10); +} + +bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept { + return std::tie(patch_control_points, primitive, spacing, clockwise) == + std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise); +} + +std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept { + return static_cast<std::size_t>(cull_enable) ^ + (static_cast<std::size_t>(depth_bias_enable) << 1) ^ + (static_cast<std::size_t>(ndc_minus_one_to_one) << 2) ^ + (static_cast<std::size_t>(cull_face) << 24) ^ + (static_cast<std::size_t>(front_face) << 48); +} + +bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept { + return std::tie(cull_enable, depth_bias_enable, ndc_minus_one_to_one, cull_face, front_face) == + std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.ndc_minus_one_to_one, rhs.cull_face, + rhs.front_face); +} + +std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept { + std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^ + (static_cast<std::size_t>(depth_write_enable) << 1) ^ + (static_cast<std::size_t>(depth_bounds_enable) << 2) ^ + (static_cast<std::size_t>(stencil_enable) << 3) ^ + (static_cast<std::size_t>(depth_test_function) << 4); + boost::hash_combine(hash, front_stencil.Hash()); + boost::hash_combine(hash, back_stencil.Hash()); + return hash; +} + +bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept { + return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function, + stencil_enable, front_stencil, back_stencil) == + std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable, + rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil, + rhs.back_stencil); +} + +std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept { + std::size_t hash = attachments_count << 13; + for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) { + boost::hash_combine(hash, attachments[rt].Hash()); + } + return hash; +} + +bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept { + return std::equal(attachments.begin(), attachments.begin() + attachments_count, + rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count); +} + +std::size_t FixedPipelineState::Hash() const noexcept { + std::size_t hash = 0; + boost::hash_combine(hash, vertex_input.Hash()); + boost::hash_combine(hash, input_assembly.Hash()); + boost::hash_combine(hash, tessellation.Hash()); + boost::hash_combine(hash, rasterizer.Hash()); + boost::hash_combine(hash, depth_stencil.Hash()); + boost::hash_combine(hash, color_blending.Hash()); + return hash; +} + +bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept { + return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil, + color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly, + rhs.tessellation, rhs.rasterizer, rhs.depth_stencil, + rhs.color_blending); +} + +FixedPipelineState GetFixedPipelineState(const Maxwell& regs) { + FixedPipelineState fixed_state; + fixed_state.input_assembly = GetInputAssemblyState(regs); + fixed_state.tessellation = GetTessellationState(regs); + fixed_state.rasterizer = GetRasterizerState(regs); + fixed_state.depth_stencil = GetDepthStencilState(regs); + fixed_state.color_blending = GetColorBlendingState(regs); + return fixed_state; +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h new file mode 100644 index 000000000..04152c0d4 --- /dev/null +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h @@ -0,0 +1,282 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <type_traits> + +#include "common/common_types.h" + +#include "video_core/engines/maxwell_3d.h" +#include "video_core/surface.h" + +namespace Vulkan { + +using Maxwell = Tegra::Engines::Maxwell3D::Regs; + +// TODO(Rodrigo): Optimize this structure. + +struct FixedPipelineState { + using PixelFormat = VideoCore::Surface::PixelFormat; + + struct VertexBinding { + constexpr VertexBinding(u32 index, u32 stride, u32 divisor) + : index{index}, stride{stride}, divisor{divisor} {} + VertexBinding() = default; + + u32 index; + u32 stride; + u32 divisor; + + std::size_t Hash() const noexcept; + + bool operator==(const VertexBinding& rhs) const noexcept; + + bool operator!=(const VertexBinding& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct VertexAttribute { + constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type, + Maxwell::VertexAttribute::Size size, u32 offset) + : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {} + VertexAttribute() = default; + + u32 index; + u32 buffer; + Maxwell::VertexAttribute::Type type; + Maxwell::VertexAttribute::Size size; + u32 offset; + + std::size_t Hash() const noexcept; + + bool operator==(const VertexAttribute& rhs) const noexcept; + + bool operator!=(const VertexAttribute& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct StencilFace { + constexpr StencilFace(Maxwell::StencilOp action_stencil_fail, + Maxwell::StencilOp action_depth_fail, + Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func) + : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail}, + action_depth_pass{action_depth_pass}, test_func{test_func} {} + StencilFace() = default; + + Maxwell::StencilOp action_stencil_fail; + Maxwell::StencilOp action_depth_fail; + Maxwell::StencilOp action_depth_pass; + Maxwell::ComparisonOp test_func; + + std::size_t Hash() const noexcept; + + bool operator==(const StencilFace& rhs) const noexcept; + + bool operator!=(const StencilFace& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct BlendingAttachment { + constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation, + Maxwell::Blend::Factor src_rgb_func, + Maxwell::Blend::Factor dst_rgb_func, + Maxwell::Blend::Equation a_equation, + Maxwell::Blend::Factor src_a_func, + Maxwell::Blend::Factor dst_a_func, + std::array<bool, 4> components) + : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func}, + dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func}, + dst_a_func{dst_a_func}, components{components} {} + BlendingAttachment() = default; + + bool enable; + Maxwell::Blend::Equation rgb_equation; + Maxwell::Blend::Factor src_rgb_func; + Maxwell::Blend::Factor dst_rgb_func; + Maxwell::Blend::Equation a_equation; + Maxwell::Blend::Factor src_a_func; + Maxwell::Blend::Factor dst_a_func; + std::array<bool, 4> components; + + std::size_t Hash() const noexcept; + + bool operator==(const BlendingAttachment& rhs) const noexcept; + + bool operator!=(const BlendingAttachment& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct VertexInput { + std::size_t num_bindings = 0; + std::size_t num_attributes = 0; + std::array<VertexBinding, Maxwell::NumVertexArrays> bindings; + std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes; + + std::size_t Hash() const noexcept; + + bool operator==(const VertexInput& rhs) const noexcept; + + bool operator!=(const VertexInput& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct InputAssembly { + constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable, + float point_size) + : topology{topology}, primitive_restart_enable{primitive_restart_enable}, + point_size{point_size} {} + InputAssembly() = default; + + Maxwell::PrimitiveTopology topology; + bool primitive_restart_enable; + float point_size; + + std::size_t Hash() const noexcept; + + bool operator==(const InputAssembly& rhs) const noexcept; + + bool operator!=(const InputAssembly& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct Tessellation { + constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive, + Maxwell::TessellationSpacing spacing, bool clockwise) + : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing}, + clockwise{clockwise} {} + Tessellation() = default; + + u32 patch_control_points; + Maxwell::TessellationPrimitive primitive; + Maxwell::TessellationSpacing spacing; + bool clockwise; + + std::size_t Hash() const noexcept; + + bool operator==(const Tessellation& rhs) const noexcept; + + bool operator!=(const Tessellation& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct Rasterizer { + constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool ndc_minus_one_to_one, + Maxwell::Cull::CullFace cull_face, Maxwell::Cull::FrontFace front_face) + : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable}, + ndc_minus_one_to_one{ndc_minus_one_to_one}, cull_face{cull_face}, front_face{ + front_face} {} + Rasterizer() = default; + + bool cull_enable; + bool depth_bias_enable; + bool ndc_minus_one_to_one; + Maxwell::Cull::CullFace cull_face; + Maxwell::Cull::FrontFace front_face; + + std::size_t Hash() const noexcept; + + bool operator==(const Rasterizer& rhs) const noexcept; + + bool operator!=(const Rasterizer& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct DepthStencil { + constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable, + bool depth_bounds_enable, bool stencil_enable, + Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil, + StencilFace back_stencil) + : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable}, + depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable}, + depth_test_function{depth_test_function}, front_stencil{front_stencil}, + back_stencil{back_stencil} {} + DepthStencil() = default; + + bool depth_test_enable; + bool depth_write_enable; + bool depth_bounds_enable; + bool stencil_enable; + Maxwell::ComparisonOp depth_test_function; + StencilFace front_stencil; + StencilFace back_stencil; + + std::size_t Hash() const noexcept; + + bool operator==(const DepthStencil& rhs) const noexcept; + + bool operator!=(const DepthStencil& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct ColorBlending { + constexpr ColorBlending( + std::array<float, 4> blend_constants, std::size_t attachments_count, + std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments) + : attachments_count{attachments_count}, attachments{attachments} {} + ColorBlending() = default; + + std::size_t attachments_count; + std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments; + + std::size_t Hash() const noexcept; + + bool operator==(const ColorBlending& rhs) const noexcept; + + bool operator!=(const ColorBlending& rhs) const noexcept { + return !operator==(rhs); + } + }; + + std::size_t Hash() const noexcept; + + bool operator==(const FixedPipelineState& rhs) const noexcept; + + bool operator!=(const FixedPipelineState& rhs) const noexcept { + return !operator==(rhs); + } + + VertexInput vertex_input; + InputAssembly input_assembly; + Tessellation tessellation; + Rasterizer rasterizer; + DepthStencil depth_stencil; + ColorBlending color_blending; +}; +static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState>); + +FixedPipelineState GetFixedPipelineState(const Maxwell& regs); + +} // namespace Vulkan + +namespace std { + +template <> +struct hash<Vulkan::FixedPipelineState> { + std::size_t operator()(const Vulkan::FixedPipelineState& k) const noexcept { + return k.Hash(); + } +}; + +} // namespace std diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp new file mode 100644 index 000000000..4bcbef959 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_image.cpp @@ -0,0 +1,106 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <memory> +#include <vector> + +#include "common/assert.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_image.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" + +namespace Vulkan { + +VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, + const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask) + : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask}, + image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} { + UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0, + "Queue family tracking is not implemented"); + + const auto dev = device.GetLogical(); + image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader()); + + const u32 num_ranges = image_num_layers * image_num_levels; + barriers.resize(num_ranges); + subrange_states.resize(num_ranges, {{}, image_ci.initialLayout}); +} + +VKImage::~VKImage() = default; + +void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, + vk::ImageLayout new_layout) { + if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) { + return; + } + + std::size_t cursor = 0; + for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) { + for (u32 level_it = 0; level_it < num_levels; ++level_it, ++cursor) { + const u32 layer = base_layer + layer_it; + const u32 level = base_level + level_it; + auto& state = GetSubrangeState(layer, level); + barriers[cursor] = vk::ImageMemoryBarrier( + state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1}); + state.access = new_access; + state.layout = new_layout; + } + } + + scheduler.RequestOutsideRenderPassOperationContext(); + + scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) { + // TODO(Rodrigo): Implement a way to use the latest stage across subresources. + constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands; + cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr, + static_cast<u32>(cursor), barriers.data(), dld); + }); +} + +bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept { + const bool is_full_range = base_layer == 0 && num_layers == image_num_layers && + base_level == 0 && num_levels == image_num_levels; + if (!is_full_range) { + state_diverged = true; + } + + if (!state_diverged) { + auto& state = GetSubrangeState(0, 0); + if (state.access != new_access || state.layout != new_layout) { + return true; + } + } + + for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) { + for (u32 level_it = 0; level_it < num_levels; ++level_it) { + const u32 layer = base_layer + layer_it; + const u32 level = base_level + level_it; + auto& state = GetSubrangeState(layer, level); + if (state.access != new_access || state.layout != new_layout) { + return true; + } + } + } + return false; +} + +void VKImage::CreatePresentView() { + // Image type has to be 2D to be presented. + const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {}, + {aspect_mask, 0, 1, 0, 1}); + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld); +} + +VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept { + return subrange_states[static_cast<std::size_t>(layer * image_num_levels) + + static_cast<std::size_t>(level)]; +} + +} // namespace Vulkan
\ No newline at end of file diff --git a/src/video_core/renderer_vulkan/vk_image.h b/src/video_core/renderer_vulkan/vk_image.h new file mode 100644 index 000000000..b78242512 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_image.h @@ -0,0 +1,84 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <vector> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKScheduler; + +class VKImage { +public: + explicit VKImage(const VKDevice& device, VKScheduler& scheduler, + const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask); + ~VKImage(); + + /// Records in the passed command buffer an image transition and updates the state of the image. + void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, + vk::ImageLayout new_layout); + + /// Returns a view compatible with presentation, the image has to be 2D. + vk::ImageView GetPresentView() { + if (!present_view) { + CreatePresentView(); + } + return *present_view; + } + + /// Returns the Vulkan image handler. + vk::Image GetHandle() const { + return *image; + } + + /// Returns the Vulkan format for this image. + vk::Format GetFormat() const { + return format; + } + + /// Returns the Vulkan aspect mask. + vk::ImageAspectFlags GetAspectMask() const { + return aspect_mask; + } + +private: + struct SubrangeState final { + vk::AccessFlags access{}; ///< Current access bits. + vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout. + }; + + bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept; + + /// Creates a presentation view. + void CreatePresentView(); + + /// Returns the subrange state for a layer and layer. + SubrangeState& GetSubrangeState(u32 layer, u32 level) noexcept; + + const VKDevice& device; ///< Device handler. + VKScheduler& scheduler; ///< Device scheduler. + + const vk::Format format; ///< Vulkan format. + const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask. + const u32 image_num_layers; ///< Number of layers. + const u32 image_num_levels; ///< Number of mipmap levels. + + UniqueImage image; ///< Image handle. + UniqueImageView present_view; ///< Image view compatible with presentation. + + std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers. + std::vector<SubrangeState> subrange_states; ///< Current subrange state. + + bool state_diverged = false; ///< True when subresources mismatch in layout. +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp new file mode 100644 index 000000000..171d78afc --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp @@ -0,0 +1,127 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "common/bit_util.h" +#include "common/common_types.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" + +namespace Vulkan { + +VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, + u64 last_epoch) + : buffer{std::move(buffer)}, watch{fence}, last_epoch{last_epoch} {} + +VKStagingBufferPool::StagingBuffer::StagingBuffer(StagingBuffer&& rhs) noexcept { + buffer = std::move(rhs.buffer); + watch = std::move(rhs.watch); + last_epoch = rhs.last_epoch; +} + +VKStagingBufferPool::StagingBuffer::~StagingBuffer() = default; + +VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator=( + StagingBuffer&& rhs) noexcept { + buffer = std::move(rhs.buffer); + watch = std::move(rhs.watch); + last_epoch = rhs.last_epoch; + return *this; +} + +VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager, + VKScheduler& scheduler) + : device{device}, memory_manager{memory_manager}, scheduler{scheduler}, + is_device_integrated{device.IsIntegrated()} {} + +VKStagingBufferPool::~VKStagingBufferPool() = default; + +VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) { + if (const auto buffer = TryGetReservedBuffer(size, host_visible)) { + return *buffer; + } + return CreateStagingBuffer(size, host_visible); +} + +void VKStagingBufferPool::TickFrame() { + ++epoch; + current_delete_level = (current_delete_level + 1) % NumLevels; + + ReleaseCache(true); + if (!is_device_integrated) { + ReleaseCache(false); + } +} + +VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { + for (auto& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { + if (entry.watch.TryWatch(scheduler.GetFence())) { + entry.last_epoch = epoch; + return &*entry.buffer; + } + } + return nullptr; +} + +VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { + const auto usage = + vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst | + vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer; + const u32 log2 = Common::Log2Ceil64(size); + const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0, + nullptr); + const auto dev = device.GetLogical(); + auto buffer = std::make_unique<VKBuffer>(); + buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader()); + buffer->commit = memory_manager.Commit(*buffer->handle, host_visible); + + auto& entries = GetCache(host_visible)[log2].entries; + return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer; +} + +VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { + return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers; +} + +void VKStagingBufferPool::ReleaseCache(bool host_visible) { + auto& cache = GetCache(host_visible); + const u64 size = ReleaseLevel(cache, current_delete_level); + if (size == 0) { + return; + } +} + +u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { + static constexpr u64 epochs_to_destroy = 180; + static constexpr std::size_t deletions_per_tick = 16; + + auto& staging = cache[log2]; + auto& entries = staging.entries; + const std::size_t old_size = entries.size(); + + const auto is_deleteable = [this](const auto& entry) { + return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed(); + }; + const std::size_t begin_offset = staging.delete_index; + const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); + const auto begin = std::begin(entries) + begin_offset; + const auto end = std::begin(entries) + end_offset; + entries.erase(std::remove_if(begin, end, is_deleteable), end); + + const std::size_t new_size = entries.size(); + staging.delete_index += deletions_per_tick; + if (staging.delete_index >= new_size) { + staging.delete_index = 0; + } + + return (1ULL << log2) * (old_size - new_size); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h new file mode 100644 index 000000000..02310375f --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h @@ -0,0 +1,83 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <climits> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "common/common_types.h" + +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_memory_manager.h" + +namespace Vulkan { + +class VKDevice; +class VKFenceWatch; +class VKScheduler; + +struct VKBuffer final { + UniqueBuffer handle; + VKMemoryCommit commit; +}; + +class VKStagingBufferPool final { +public: + explicit VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager, + VKScheduler& scheduler); + ~VKStagingBufferPool(); + + VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible); + + void TickFrame(); + +private: + struct StagingBuffer final { + explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, u64 last_epoch); + StagingBuffer(StagingBuffer&& rhs) noexcept; + StagingBuffer(const StagingBuffer&) = delete; + ~StagingBuffer(); + + StagingBuffer& operator=(StagingBuffer&& rhs) noexcept; + + std::unique_ptr<VKBuffer> buffer; + VKFenceWatch watch; + u64 last_epoch = 0; + }; + + struct StagingBuffers final { + std::vector<StagingBuffer> entries; + std::size_t delete_index = 0; + }; + + static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT; + using StagingBuffersCache = std::array<StagingBuffers, NumLevels>; + + VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible); + + VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible); + + StagingBuffersCache& GetCache(bool host_visible); + + void ReleaseCache(bool host_visible); + + u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2); + + const VKDevice& device; + VKMemoryManager& memory_manager; + VKScheduler& scheduler; + const bool is_device_integrated; + + StagingBuffersCache host_staging_buffers; + StagingBuffersCache device_staging_buffers; + + u64 epoch = 0; + + std::size_t current_delete_level = 0; +}; + +} // namespace Vulkan diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp index e6c9d287e..8d54cce34 100644 --- a/src/video_core/shader/decode/register_set_predicate.cpp +++ b/src/video_core/shader/decode/register_set_predicate.cpp @@ -13,37 +13,65 @@ namespace VideoCommon::Shader { using Tegra::Shader::Instruction; using Tegra::Shader::OpCode; +namespace { +constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7; +} + u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); - UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); + UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr); - const Node apply_mask = [&]() { + const Node apply_mask = [&] { switch (opcode->get().GetId()) { case OpCode::Id::R2P_IMM: - return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); + case OpCode::Id::P2R_IMM: + return Immediate(static_cast<u32>(instr.p2r_r2p.immediate_mask)); default: UNREACHABLE(); - return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); + return Immediate(0); } }(); - const Node mask = GetRegister(instr.gpr8); - const auto offset = static_cast<u32>(instr.r2p.byte) * 8; - constexpr u32 programmable_preds = 7; - for (u64 pred = 0; pred < programmable_preds; ++pred) { - const auto shift = static_cast<u32>(pred); + const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8; + + switch (opcode->get().GetId()) { + case OpCode::Id::R2P_IMM: { + const Node mask = GetRegister(instr.gpr8); - const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); - const Node condition = - Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0)); + for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { + const auto shift = static_cast<u32>(pred); - const Node value_compare = BitfieldExtract(mask, offset + shift, 1); - const Node value = Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); + const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); + const Node condition = + Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0)); - const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); - bb.push_back(Conditional(condition, {code})); + const Node value_compare = BitfieldExtract(mask, offset + shift, 1); + const Node value = + Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); + + const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); + bb.push_back(Conditional(condition, {code})); + } + break; + } + case OpCode::Id::P2R_IMM: { + Node value = Immediate(0); + for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { + Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred), + Immediate(0)); + value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit)); + } + value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask); + value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8); + + SetRegister(bb, instr.gpr0, std::move(value)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled P2R/R2R instruction: {}", opcode->get().GetName()); + break; } return pc; diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp index dff01a541..4b14cdf58 100644 --- a/src/video_core/shader/decode/texture.cpp +++ b/src/video_core/shader/decode/texture.cpp @@ -89,59 +89,62 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { [[fallthrough]]; } case OpCode::Id::TLD4: { - ASSERT(instr.tld4.array == 0); UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), "NDV is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), - "PTP is not implemented"); - const auto texture_type = instr.tld4.texture_type.Value(); const bool depth_compare = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::DC) : instr.tld4.UsesMiscMode(TextureMiscMode::DC); const bool is_array = instr.tld4.array != 0; const bool is_aoffi = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::AOFFI) : instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI); - WriteTexInstructionFloat( - bb, instr, - GetTld4Code(instr, texture_type, depth_compare, is_array, is_aoffi, is_bindless)); + const bool is_ptp = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::PTP) + : instr.tld4.UsesMiscMode(TextureMiscMode::PTP); + WriteTexInstructionFloat(bb, instr, + GetTld4Code(instr, texture_type, depth_compare, is_array, is_aoffi, + is_ptp, is_bindless)); break; } case OpCode::Id::TLD4S: { - const bool uses_aoffi = instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI); - UNIMPLEMENTED_IF_MSG(uses_aoffi, "AOFFI is not implemented"); - - const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); + constexpr std::size_t num_coords = 2; + const bool is_aoffi = instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI); + const bool is_depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); const Node op_a = GetRegister(instr.gpr8); const Node op_b = GetRegister(instr.gpr20); // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. std::vector<Node> coords; - Node dc_reg; - if (depth_compare) { + std::vector<Node> aoffi; + Node depth_compare; + if (is_depth_compare) { // Note: TLD4S coordinate encoding works just like TEXS's const Node op_y = GetRegister(instr.gpr8.Value() + 1); coords.push_back(op_a); coords.push_back(op_y); - dc_reg = uses_aoffi ? GetRegister(instr.gpr20.Value() + 1) : op_b; + if (is_aoffi) { + aoffi = GetAoffiCoordinates(op_b, num_coords, true); + depth_compare = GetRegister(instr.gpr20.Value() + 1); + } else { + depth_compare = op_b; + } } else { + // There's no depth compare coords.push_back(op_a); - if (uses_aoffi) { - const Node op_y = GetRegister(instr.gpr8.Value() + 1); - coords.push_back(op_y); + if (is_aoffi) { + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + aoffi = GetAoffiCoordinates(op_b, num_coords, true); } else { coords.push_back(op_b); } - dc_reg = {}; } const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); - const SamplerInfo info{TextureType::Texture2D, false, depth_compare}; + const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare}; const Sampler& sampler = *GetSampler(instr.sampler, info); Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, {}, dc_reg, {}, {}, {}, {}, component, element}; + MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {}, {}, {}, component, element}; values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } @@ -190,7 +193,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { } for (u32 element = 0; element < values.size(); ++element) { - MetaTexture meta{*sampler, {}, {}, {}, derivates, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, derivates, {}, {}, {}, element}; values[element] = Operation(OperationCode::TextureGradient, std::move(meta), coords); } @@ -230,7 +233,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { if (!instr.txq.IsComponentEnabled(element)) { continue; } - MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0))); @@ -299,7 +302,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { continue; } auto params = coords; - MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); SetTemporary(bb, indexer++, value); } @@ -367,7 +370,7 @@ const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, if (it != used_samplers.end()) { ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); - return &(*it); + return &*it; } // Otherwise create a new mapping for this sampler @@ -397,7 +400,7 @@ const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, if (it != used_samplers.end()) { ASSERT(it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow); - return &(*it); + return &*it; } // Otherwise create a new mapping for this sampler @@ -538,7 +541,7 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, for (u32 element = 0; element < values.size(); ++element) { auto copy_coords = coords; - MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, bias, lod, {}, element}; + MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, {}, bias, lod, {}, element}; values[element] = Operation(read_method, meta, std::move(copy_coords)); } @@ -635,7 +638,9 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, } Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, - bool is_array, bool is_aoffi, bool is_bindless) { + bool is_array, bool is_aoffi, bool is_ptp, bool is_bindless) { + ASSERT_MSG(!(is_aoffi && is_ptp), "AOFFI and PTP can't be enabled at the same time"); + const std::size_t coord_count = GetCoordCount(texture_type); // If enabled arrays index is always stored in the gpr8 field @@ -661,12 +666,15 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de return values; } - std::vector<Node> aoffi; + std::vector<Node> aoffi, ptp; if (is_aoffi) { aoffi = GetAoffiCoordinates(GetRegister(parameter_register++), coord_count, true); + } else if (is_ptp) { + ptp = GetPtpCoordinates( + {GetRegister(parameter_register++), GetRegister(parameter_register++)}); } - Node dc{}; + Node dc; if (depth_compare) { dc = GetRegister(parameter_register++); } @@ -676,8 +684,8 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{*sampler, GetRegister(array_register), dc, aoffi, {}, {}, {}, component, - element}; + MetaTexture meta{ + *sampler, GetRegister(array_register), dc, aoffi, ptp, {}, {}, {}, component, element}; values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } @@ -710,7 +718,7 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) { Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, array_register, {}, {}, {}, {}, lod, {}, element}; + MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element}; values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); } @@ -760,7 +768,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, array, {}, {}, {}, {}, lod, {}, element}; + MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element}; values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); } return values; @@ -825,4 +833,38 @@ std::vector<Node> ShaderIR::GetAoffiCoordinates(Node aoffi_reg, std::size_t coor return aoffi; } +std::vector<Node> ShaderIR::GetPtpCoordinates(std::array<Node, 2> ptp_regs) { + static constexpr u32 num_entries = 8; + + std::vector<Node> ptp; + ptp.reserve(num_entries); + + const auto global_size = static_cast<s64>(global_code.size()); + const std::optional low = TrackImmediate(ptp_regs[0], global_code, global_size); + const std::optional high = TrackImmediate(ptp_regs[1], global_code, global_size); + if (!low || !high) { + for (u32 entry = 0; entry < num_entries; ++entry) { + const u32 reg = entry / 4; + const u32 offset = entry % 4; + const Node value = BitfieldExtract(ptp_regs[reg], offset * 8, 6); + const Node condition = + Operation(OperationCode::LogicalIGreaterEqual, value, Immediate(32)); + const Node negative = Operation(OperationCode::IAdd, value, Immediate(-64)); + ptp.push_back(Operation(OperationCode::Select, condition, negative, value)); + } + return ptp; + } + + const u64 immediate = (static_cast<u64>(*high) << 32) | static_cast<u64>(*low); + for (u32 entry = 0; entry < num_entries; ++entry) { + s32 value = (immediate >> (entry * 8)) & 0b111111; + if (value >= 32) { + value -= 64; + } + ptp.push_back(Immediate(value)); + } + + return ptp; +} + } // namespace VideoCommon::Shader diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index abd40f582..4d2f4d6a8 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h @@ -374,6 +374,7 @@ struct MetaTexture { Node array; Node depth_compare; std::vector<Node> aoffi; + std::vector<Node> ptp; std::vector<Node> derivates; Node bias; Node lod; diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index 04ae5f822..baed06ccd 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -350,7 +350,8 @@ private: bool is_array); Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, - bool depth_compare, bool is_array, bool is_aoffi, bool is_bindless); + bool depth_compare, bool is_array, bool is_aoffi, bool is_ptp, + bool is_bindless); Node4 GetTldCode(Tegra::Shader::Instruction instr); @@ -363,6 +364,8 @@ private: std::vector<Node> GetAoffiCoordinates(Node aoffi_reg, std::size_t coord_count, bool is_tld4); + std::vector<Node> GetPtpCoordinates(std::array<Node, 2> ptp_regs); + Node4 GetTextureCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, Tegra::Shader::TextureProcessMode process_mode, std::vector<Node> coords, Node array, Node depth_compare, u32 bias_offset, std::vector<Node> aoffi, |