diff options
Diffstat (limited to 'src')
194 files changed, 6879 insertions, 3201 deletions
diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h index 201ec7a3c..b2e5d336c 100644 --- a/src/audio_core/audio_renderer.h +++ b/src/audio_core/audio_renderer.h @@ -46,16 +46,18 @@ struct AudioRendererParameter { u32_le sample_rate; u32_le sample_count; u32_le mix_buffer_count; - u32_le unknown_c; + u32_le submix_count; u32_le voice_count; u32_le sink_count; u32_le effect_count; - u32_le unknown_1c; - u8 unknown_20; - INSERT_PADDING_BYTES(3); + u32_le performance_frame_count; + u8 is_voice_drop_enabled; + u8 unknown_21; + u8 unknown_22; + u8 execution_mode; u32_le splitter_count; - u32_le unknown_2c; - INSERT_PADDING_WORDS(1); + u32_le num_splitter_send_channels; + u32_le unknown_30; u32_le revision; }; static_assert(sizeof(AudioRendererParameter) == 52, "AudioRendererParameter is an invalid size"); diff --git a/src/audio_core/buffer.h b/src/audio_core/buffer.h index a323b23ec..5ee09e9aa 100644 --- a/src/audio_core/buffer.h +++ b/src/audio_core/buffer.h @@ -21,7 +21,7 @@ public: Buffer(Tag tag, std::vector<s16>&& samples) : tag{tag}, samples{std::move(samples)} {} /// Returns the raw audio data for the buffer - std::vector<s16>& Samples() { + std::vector<s16>& GetSamples() { return samples; } diff --git a/src/audio_core/codec.cpp b/src/audio_core/codec.cpp index 454de798b..c5a0d98ce 100644 --- a/src/audio_core/codec.cpp +++ b/src/audio_core/codec.cpp @@ -68,8 +68,8 @@ std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM } } - state.yn1 = yn1; - state.yn2 = yn2; + state.yn1 = static_cast<s16>(yn1); + state.yn2 = static_cast<s16>(yn2); return ret; } diff --git a/src/audio_core/cubeb_sink.cpp b/src/audio_core/cubeb_sink.cpp index 097328901..7047ed9cf 100644 --- a/src/audio_core/cubeb_sink.cpp +++ b/src/audio_core/cubeb_sink.cpp @@ -12,6 +12,10 @@ #include "common/ring_buffer.h" #include "core/settings.h" +#ifdef _WIN32 +#include <objbase.h> +#endif + namespace AudioCore { class CubebSinkStream final : public SinkStream { @@ -46,7 +50,7 @@ public: } } - ~CubebSinkStream() { + ~CubebSinkStream() override { if (!ctx) { return; } @@ -75,11 +79,11 @@ public: queue.Push(samples); } - std::size_t SamplesInQueue(u32 num_channels) const override { + std::size_t SamplesInQueue(u32 channel_count) const override { if (!ctx) return 0; - return queue.Size() / num_channels; + return queue.Size() / channel_count; } void Flush() override { @@ -98,7 +102,7 @@ private: u32 num_channels{}; Common::RingBuffer<s16, 0x10000> queue; - std::array<s16, 2> last_frame; + std::array<s16, 2> last_frame{}; std::atomic<bool> should_flush{}; TimeStretcher time_stretch; @@ -108,6 +112,11 @@ private: }; CubebSink::CubebSink(std::string_view target_device_name) { + // Cubeb requires COM to be initialized on the thread calling cubeb_init on Windows +#ifdef _WIN32 + com_init_result = CoInitializeEx(nullptr, COINIT_MULTITHREADED); +#endif + if (cubeb_init(&ctx, "yuzu", nullptr) != CUBEB_OK) { LOG_CRITICAL(Audio_Sink, "cubeb_init failed"); return; @@ -142,6 +151,12 @@ CubebSink::~CubebSink() { } cubeb_destroy(ctx); + +#ifdef _WIN32 + if (SUCCEEDED(com_init_result)) { + CoUninitialize(); + } +#endif } SinkStream& CubebSink::AcquireSinkStream(u32 sample_rate, u32 num_channels, diff --git a/src/audio_core/cubeb_sink.h b/src/audio_core/cubeb_sink.h index efb9d1634..7ce850f47 100644 --- a/src/audio_core/cubeb_sink.h +++ b/src/audio_core/cubeb_sink.h @@ -25,6 +25,10 @@ private: cubeb* ctx{}; cubeb_devid output_device{}; std::vector<SinkStreamPtr> sink_streams; + +#ifdef _WIN32 + u32 com_init_result = 0; +#endif }; std::vector<std::string> ListCubebSinkDevices(); diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp index d89ff30b7..4b66a6786 100644 --- a/src/audio_core/stream.cpp +++ b/src/audio_core/stream.cpp @@ -95,7 +95,7 @@ void Stream::PlayNextBuffer() { active_buffer = queued_buffers.front(); queued_buffers.pop(); - VolumeAdjustSamples(active_buffer->Samples()); + VolumeAdjustSamples(active_buffer->GetSamples()); sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples()); diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index b0174b445..c538c6415 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -47,6 +47,7 @@ add_custom_command(OUTPUT scm_rev.cpp "${VIDEO_CORE}/shader/decode/integer_set.cpp" "${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp" "${VIDEO_CORE}/shader/decode/memory.cpp" + "${VIDEO_CORE}/shader/decode/texture.cpp" "${VIDEO_CORE}/shader/decode/other.cpp" "${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp" "${VIDEO_CORE}/shader/decode/predicate_set_register.cpp" diff --git a/src/common/bit_field.h b/src/common/bit_field.h index 21e07925d..7433c39ba 100644 --- a/src/common/bit_field.h +++ b/src/common/bit_field.h @@ -111,12 +111,6 @@ template <std::size_t Position, std::size_t Bits, typename T> struct BitField { private: - // We hide the copy assigment operator here, because the default copy - // assignment would copy the full storage value, rather than just the bits - // relevant to this particular bit field. - // We don't delete it because we want BitField to be trivially copyable. - constexpr BitField& operator=(const BitField&) = default; - // UnderlyingType is T for non-enum types and the underlying type of T if // T is an enumeration. Note that T is wrapped within an enable_if in the // former case to workaround compile errors which arise when using @@ -163,9 +157,13 @@ public: BitField(T val) = delete; BitField& operator=(T val) = delete; - // Force default constructor to be created - // so that we can use this within unions - constexpr BitField() = default; + constexpr BitField() noexcept = default; + + constexpr BitField(const BitField&) noexcept = default; + constexpr BitField& operator=(const BitField&) noexcept = default; + + constexpr BitField(BitField&&) noexcept = default; + constexpr BitField& operator=(BitField&&) noexcept = default; constexpr FORCE_INLINE operator T() const { return Value(); diff --git a/src/common/color.h b/src/common/color.h index 0379040be..3a2222077 100644 --- a/src/common/color.h +++ b/src/common/color.h @@ -55,36 +55,36 @@ constexpr u8 Convert8To6(u8 value) { /** * Decode a color stored in RGBA8 format * @param bytes Pointer to encoded source color - * @return Result color decoded as Math::Vec4<u8> + * @return Result color decoded as Common::Vec4<u8> */ -inline Math::Vec4<u8> DecodeRGBA8(const u8* bytes) { +inline Common::Vec4<u8> DecodeRGBA8(const u8* bytes) { return {bytes[3], bytes[2], bytes[1], bytes[0]}; } /** * Decode a color stored in RGB8 format * @param bytes Pointer to encoded source color - * @return Result color decoded as Math::Vec4<u8> + * @return Result color decoded as Common::Vec4<u8> */ -inline Math::Vec4<u8> DecodeRGB8(const u8* bytes) { +inline Common::Vec4<u8> DecodeRGB8(const u8* bytes) { return {bytes[2], bytes[1], bytes[0], 255}; } /** * Decode a color stored in RG8 (aka HILO8) format * @param bytes Pointer to encoded source color - * @return Result color decoded as Math::Vec4<u8> + * @return Result color decoded as Common::Vec4<u8> */ -inline Math::Vec4<u8> DecodeRG8(const u8* bytes) { +inline Common::Vec4<u8> DecodeRG8(const u8* bytes) { return {bytes[1], bytes[0], 0, 255}; } /** * Decode a color stored in RGB565 format * @param bytes Pointer to encoded source color - * @return Result color decoded as Math::Vec4<u8> + * @return Result color decoded as Common::Vec4<u8> */ -inline Math::Vec4<u8> DecodeRGB565(const u8* bytes) { +inline Common::Vec4<u8> DecodeRGB565(const u8* bytes) { u16_le pixel; std::memcpy(&pixel, bytes, sizeof(pixel)); return {Convert5To8((pixel >> 11) & 0x1F), Convert6To8((pixel >> 5) & 0x3F), @@ -94,9 +94,9 @@ inline Math::Vec4<u8> DecodeRGB565(const u8* bytes) { /** * Decode a color stored in RGB5A1 format * @param bytes Pointer to encoded source color - * @return Result color decoded as Math::Vec4<u8> + * @return Result color decoded as Common::Vec4<u8> */ -inline Math::Vec4<u8> DecodeRGB5A1(const u8* bytes) { +inline Common::Vec4<u8> DecodeRGB5A1(const u8* bytes) { u16_le pixel; std::memcpy(&pixel, bytes, sizeof(pixel)); return {Convert5To8((pixel >> 11) & 0x1F), Convert5To8((pixel >> 6) & 0x1F), @@ -106,9 +106,9 @@ inline Math::Vec4<u8> DecodeRGB5A1(const u8* bytes) { /** * Decode a color stored in RGBA4 format * @param bytes Pointer to encoded source color - * @return Result color decoded as Math::Vec4<u8> + * @return Result color decoded as Common::Vec4<u8> */ -inline Math::Vec4<u8> DecodeRGBA4(const u8* bytes) { +inline Common::Vec4<u8> DecodeRGBA4(const u8* bytes) { u16_le pixel; std::memcpy(&pixel, bytes, sizeof(pixel)); return {Convert4To8((pixel >> 12) & 0xF), Convert4To8((pixel >> 8) & 0xF), @@ -138,9 +138,9 @@ inline u32 DecodeD24(const u8* bytes) { /** * Decode a depth value and a stencil value stored in D24S8 format * @param bytes Pointer to encoded source values - * @return Resulting values stored as a Math::Vec2 + * @return Resulting values stored as a Common::Vec2 */ -inline Math::Vec2<u32> DecodeD24S8(const u8* bytes) { +inline Common::Vec2<u32> DecodeD24S8(const u8* bytes) { return {static_cast<u32>((bytes[2] << 16) | (bytes[1] << 8) | bytes[0]), bytes[3]}; } @@ -149,7 +149,7 @@ inline Math::Vec2<u32> DecodeD24S8(const u8* bytes) { * @param color Source color to encode * @param bytes Destination pointer to store encoded color */ -inline void EncodeRGBA8(const Math::Vec4<u8>& color, u8* bytes) { +inline void EncodeRGBA8(const Common::Vec4<u8>& color, u8* bytes) { bytes[3] = color.r(); bytes[2] = color.g(); bytes[1] = color.b(); @@ -161,7 +161,7 @@ inline void EncodeRGBA8(const Math::Vec4<u8>& color, u8* bytes) { * @param color Source color to encode * @param bytes Destination pointer to store encoded color */ -inline void EncodeRGB8(const Math::Vec4<u8>& color, u8* bytes) { +inline void EncodeRGB8(const Common::Vec4<u8>& color, u8* bytes) { bytes[2] = color.r(); bytes[1] = color.g(); bytes[0] = color.b(); @@ -172,7 +172,7 @@ inline void EncodeRGB8(const Math::Vec4<u8>& color, u8* bytes) { * @param color Source color to encode * @param bytes Destination pointer to store encoded color */ -inline void EncodeRG8(const Math::Vec4<u8>& color, u8* bytes) { +inline void EncodeRG8(const Common::Vec4<u8>& color, u8* bytes) { bytes[1] = color.r(); bytes[0] = color.g(); } @@ -181,7 +181,7 @@ inline void EncodeRG8(const Math::Vec4<u8>& color, u8* bytes) { * @param color Source color to encode * @param bytes Destination pointer to store encoded color */ -inline void EncodeRGB565(const Math::Vec4<u8>& color, u8* bytes) { +inline void EncodeRGB565(const Common::Vec4<u8>& color, u8* bytes) { const u16_le data = (Convert8To5(color.r()) << 11) | (Convert8To6(color.g()) << 5) | Convert8To5(color.b()); @@ -193,7 +193,7 @@ inline void EncodeRGB565(const Math::Vec4<u8>& color, u8* bytes) { * @param color Source color to encode * @param bytes Destination pointer to store encoded color */ -inline void EncodeRGB5A1(const Math::Vec4<u8>& color, u8* bytes) { +inline void EncodeRGB5A1(const Common::Vec4<u8>& color, u8* bytes) { const u16_le data = (Convert8To5(color.r()) << 11) | (Convert8To5(color.g()) << 6) | (Convert8To5(color.b()) << 1) | Convert8To1(color.a()); @@ -205,7 +205,7 @@ inline void EncodeRGB5A1(const Math::Vec4<u8>& color, u8* bytes) { * @param color Source color to encode * @param bytes Destination pointer to store encoded color */ -inline void EncodeRGBA4(const Math::Vec4<u8>& color, u8* bytes) { +inline void EncodeRGBA4(const Common::Vec4<u8>& color, u8* bytes) { const u16 data = (Convert8To4(color.r()) << 12) | (Convert8To4(color.g()) << 8) | (Convert8To4(color.b()) << 4) | Convert8To4(color.a()); diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index a5e031189..4462ff3fb 100644 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp @@ -39,10 +39,10 @@ public: Impl(Impl const&) = delete; const Impl& operator=(Impl const&) = delete; - void PushEntry(Entry e) { - std::lock_guard<std::mutex> lock(message_mutex); - message_queue.Push(std::move(e)); - message_cv.notify_one(); + void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num, + const char* function, std::string message) { + message_queue.Push( + CreateEntry(log_class, log_level, filename, line_num, function, std::move(message))); } void AddBackend(std::unique_ptr<Backend> backend) { @@ -86,15 +86,13 @@ private: } }; while (true) { - { - std::unique_lock<std::mutex> lock(message_mutex); - message_cv.wait(lock, [&] { return !running || message_queue.Pop(entry); }); - } - if (!running) { + entry = message_queue.PopWait(); + if (entry.final_entry) { break; } write_logs(entry); } + // Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a case // where a system is repeatedly spamming logs even on close. const int MAX_LOGS_TO_WRITE = filter.IsDebug() ? INT_MAX : 100; @@ -106,18 +104,36 @@ private: } ~Impl() { - running = false; - message_cv.notify_one(); + Entry entry; + entry.final_entry = true; + message_queue.Push(entry); backend_thread.join(); } - std::atomic_bool running{true}; - std::mutex message_mutex, writing_mutex; - std::condition_variable message_cv; + Entry CreateEntry(Class log_class, Level log_level, const char* filename, unsigned int line_nr, + const char* function, std::string message) const { + using std::chrono::duration_cast; + using std::chrono::steady_clock; + + Entry entry; + entry.timestamp = + duration_cast<std::chrono::microseconds>(steady_clock::now() - time_origin); + entry.log_class = log_class; + entry.log_level = log_level; + entry.filename = Common::TrimSourcePath(filename); + entry.line_num = line_nr; + entry.function = function; + entry.message = std::move(message); + + return entry; + } + + std::mutex writing_mutex; std::thread backend_thread; std::vector<std::unique_ptr<Backend>> backends; Common::MPSCQueue<Log::Entry> message_queue; Filter filter; + std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()}; }; void ConsoleBackend::Write(const Entry& entry) { @@ -276,25 +292,6 @@ const char* GetLevelName(Level log_level) { #undef LVL } -Entry CreateEntry(Class log_class, Level log_level, const char* filename, unsigned int line_nr, - const char* function, std::string message) { - using std::chrono::duration_cast; - using std::chrono::steady_clock; - - static steady_clock::time_point time_origin = steady_clock::now(); - - Entry entry; - entry.timestamp = duration_cast<std::chrono::microseconds>(steady_clock::now() - time_origin); - entry.log_class = log_class; - entry.log_level = log_level; - entry.filename = Common::TrimSourcePath(filename); - entry.line_num = line_nr; - entry.function = function; - entry.message = std::move(message); - - return entry; -} - void SetGlobalFilter(const Filter& filter) { Impl::Instance().SetGlobalFilter(filter); } @@ -319,9 +316,7 @@ void FmtLogMessageImpl(Class log_class, Level log_level, const char* filename, if (!filter.CheckMessage(log_class, log_level)) return; - Entry entry = - CreateEntry(log_class, log_level, filename, line_num, function, fmt::vformat(format, args)); - - instance.PushEntry(std::move(entry)); + instance.PushEntry(log_class, log_level, filename, line_num, function, + fmt::vformat(format, args)); } } // namespace Log diff --git a/src/common/logging/backend.h b/src/common/logging/backend.h index 91bb0c309..fca0267a1 100644 --- a/src/common/logging/backend.h +++ b/src/common/logging/backend.h @@ -27,6 +27,7 @@ struct Entry { unsigned int line_num; std::string function; std::string message; + bool final_entry = false; Entry() = default; Entry(Entry&& o) = default; @@ -134,10 +135,6 @@ const char* GetLogClassName(Class log_class); */ const char* GetLevelName(Level log_level); -/// Creates a log entry by formatting the given source location, and message. -Entry CreateEntry(Class log_class, Level log_level, const char* filename, unsigned int line_nr, - const char* function, std::string message); - /** * The global filter will prevent any messages from even being processed if they are filtered. Each * backend can have a filter, but if the level is lower than the global filter, the backend will diff --git a/src/common/math_util.h b/src/common/math_util.h index 94b4394c5..cff3d48c5 100644 --- a/src/common/math_util.h +++ b/src/common/math_util.h @@ -7,7 +7,7 @@ #include <cstdlib> #include <type_traits> -namespace MathUtil { +namespace Common { constexpr float PI = 3.14159265f; @@ -41,4 +41,4 @@ struct Rectangle { } }; -} // namespace MathUtil +} // namespace Common diff --git a/src/common/quaternion.h b/src/common/quaternion.h index c528c0b68..370198ae0 100644 --- a/src/common/quaternion.h +++ b/src/common/quaternion.h @@ -6,12 +6,12 @@ #include "common/vector_math.h" -namespace Math { +namespace Common { template <typename T> class Quaternion { public: - Math::Vec3<T> xyz; + Vec3<T> xyz; T w{}; Quaternion<decltype(-T{})> Inverse() const { @@ -38,12 +38,12 @@ public: }; template <typename T> -auto QuaternionRotate(const Quaternion<T>& q, const Math::Vec3<T>& v) { +auto QuaternionRotate(const Quaternion<T>& q, const Vec3<T>& v) { return v + 2 * Cross(q.xyz, Cross(q.xyz, v) + v * q.w); } -inline Quaternion<float> MakeQuaternion(const Math::Vec3<float>& axis, float angle) { +inline Quaternion<float> MakeQuaternion(const Vec3<float>& axis, float angle) { return {axis * std::sin(angle / 2), std::cos(angle / 2)}; } -} // namespace Math +} // namespace Common diff --git a/src/common/swap.h b/src/common/swap.h index 32af0b6ac..0e219747f 100644 --- a/src/common/swap.h +++ b/src/common/swap.h @@ -28,8 +28,8 @@ #include <cstring> #include "common/common_types.h" -// GCC 4.6+ -#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +// GCC +#ifdef __GNUC__ #if __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && !defined(COMMON_LITTLE_ENDIAN) #define COMMON_LITTLE_ENDIAN 1 @@ -38,7 +38,7 @@ #endif // LLVM/clang -#elif __clang__ +#elif defined(__clang__) #if __LITTLE_ENDIAN__ && !defined(COMMON_LITTLE_ENDIAN) #define COMMON_LITTLE_ENDIAN 1 diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h index f553efdc9..821e8536a 100644 --- a/src/common/threadsafe_queue.h +++ b/src/common/threadsafe_queue.h @@ -8,6 +8,7 @@ // single reader, single writer queue #include <atomic> +#include <condition_variable> #include <cstddef> #include <mutex> #include <utility> @@ -45,6 +46,7 @@ public: ElementPtr* new_ptr = new ElementPtr(); write_ptr->next.store(new_ptr, std::memory_order_release); write_ptr = new_ptr; + cv.notify_one(); ++size; } @@ -74,6 +76,16 @@ public: return true; } + T PopWait() { + if (Empty()) { + std::unique_lock<std::mutex> lock(cv_mutex); + cv.wait(lock, [this]() { return !Empty(); }); + } + T t; + Pop(t); + return t; + } + // not thread-safe void Clear() { size.store(0); @@ -101,6 +113,8 @@ private: ElementPtr* write_ptr; ElementPtr* read_ptr; std::atomic_size_t size{0}; + std::mutex cv_mutex; + std::condition_variable cv; }; // a simple thread-safe, @@ -135,6 +149,10 @@ public: return spsc_queue.Pop(t); } + T PopWait() { + return spsc_queue.PopWait(); + } + // not thread-safe void Clear() { spsc_queue.Clear(); diff --git a/src/common/vector_math.h b/src/common/vector_math.h index 8feb49941..429485329 100644 --- a/src/common/vector_math.h +++ b/src/common/vector_math.h @@ -33,7 +33,7 @@ #include <cmath> #include <type_traits> -namespace Math { +namespace Common { template <typename T> class Vec2; @@ -690,4 +690,4 @@ constexpr Vec4<T> MakeVec(const T& x, const Vec3<T>& yzw) { return MakeVec(x, yzw[0], yzw[1], yzw[2]); } -} // namespace Math +} // namespace Common diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index f61bcd40d..8ccb2d5f0 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -217,6 +217,7 @@ add_library(core STATIC hle/service/audio/audren_u.h hle/service/audio/codecctl.cpp hle/service/audio/codecctl.h + hle/service/audio/errors.h hle/service/audio/hwopus.cpp hle/service/audio/hwopus.h hle/service/bcat/bcat.cpp @@ -400,6 +401,10 @@ add_library(core STATIC hle/service/time/time.h hle/service/usb/usb.cpp hle/service/usb/usb.h + hle/service/vi/display/vi_display.cpp + hle/service/vi/display/vi_display.h + hle/service/vi/layer/vi_layer.cpp + hle/service/vi/layer/vi_layer.h hle/service/vi/vi.cpp hle/service/vi/vi.h hle/service/vi/vi_m.cpp diff --git a/src/core/core.cpp b/src/core/core.cpp index 8aa0932c5..89b3fb418 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -36,7 +36,8 @@ #include "frontend/applets/software_keyboard.h" #include "frontend/applets/web_browser.h" #include "video_core/debug_utils/debug_utils.h" -#include "video_core/gpu.h" +#include "video_core/gpu_asynch.h" +#include "video_core/gpu_synch.h" #include "video_core/renderer_base.h" #include "video_core/video_core.h" @@ -78,6 +79,7 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs, return vfs->OpenFile(path, FileSys::Mode::Read); } struct System::Impl { + explicit Impl(System& system) : kernel{system} {} Cpu& CurrentCpuCore() { return cpu_core_manager.GetCurrentCore(); @@ -95,7 +97,7 @@ struct System::Impl { LOG_DEBUG(HW_Memory, "initialized OK"); core_timing.Initialize(); - kernel.Initialize(core_timing); + kernel.Initialize(); const auto current_time = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()); @@ -114,7 +116,7 @@ struct System::Impl { if (web_browser == nullptr) web_browser = std::make_unique<Core::Frontend::DefaultWebBrowserApplet>(); - auto main_process = Kernel::Process::Create(kernel, "main"); + auto main_process = Kernel::Process::Create(system, "main"); kernel.MakeCurrentProcess(main_process.get()); telemetry_session = std::make_unique<Core::TelemetrySession>(); @@ -128,10 +130,16 @@ struct System::Impl { return ResultStatus::ErrorVideoCore; } - gpu_core = std::make_unique<Tegra::GPU>(renderer->Rasterizer()); + is_powered_on = true; + + if (Settings::values.use_asynchronous_gpu_emulation) { + gpu_core = std::make_unique<VideoCommon::GPUAsynch>(system, *renderer); + } else { + gpu_core = std::make_unique<VideoCommon::GPUSynch>(system, *renderer); + } cpu_core_manager.Initialize(system); - is_powered_on = true; + LOG_DEBUG(Core, "Initialized OK"); // Reset counters and set time origin to current frame @@ -182,13 +190,13 @@ struct System::Impl { void Shutdown() { // Log last frame performance stats - auto perf_results = GetAndResetPerfStats(); - Telemetry().AddField(Telemetry::FieldType::Performance, "Shutdown_EmulationSpeed", - perf_results.emulation_speed * 100.0); - Telemetry().AddField(Telemetry::FieldType::Performance, "Shutdown_Framerate", - perf_results.game_fps); - Telemetry().AddField(Telemetry::FieldType::Performance, "Shutdown_Frametime", - perf_results.frametime * 1000.0); + const auto perf_results = GetAndResetPerfStats(); + telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_EmulationSpeed", + perf_results.emulation_speed * 100.0); + telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Framerate", + perf_results.game_fps); + telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Frametime", + perf_results.frametime * 1000.0); is_powered_on = false; @@ -265,7 +273,7 @@ struct System::Impl { Core::FrameLimiter frame_limiter; }; -System::System() : impl{std::make_unique<Impl>()} {} +System::System() : impl{std::make_unique<Impl>(*this)} {} System::~System() = default; Cpu& System::CurrentCpuCore() { diff --git a/src/core/core.h b/src/core/core.h index d720013f7..ba76a41d8 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -293,10 +293,6 @@ inline ARM_Interface& CurrentArmInterface() { return System::GetInstance().CurrentArmInterface(); } -inline TelemetrySession& Telemetry() { - return System::GetInstance().TelemetrySession(); -} - inline Kernel::Process* CurrentProcess() { return System::GetInstance().CurrentProcess(); } diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 54aa21a3a..1eefed6d0 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp @@ -11,6 +11,7 @@ #endif #include "core/arm/exclusive_monitor.h" #include "core/arm/unicorn/arm_unicorn.h" +#include "core/core.h" #include "core/core_cpu.h" #include "core/core_timing.h" #include "core/hle/kernel/scheduler.h" @@ -49,9 +50,9 @@ bool CpuBarrier::Rendezvous() { return false; } -Cpu::Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, - CpuBarrier& cpu_barrier, std::size_t core_index) - : cpu_barrier{cpu_barrier}, core_timing{core_timing}, core_index{core_index} { +Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, + std::size_t core_index) + : cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} { if (Settings::values.use_cpu_jit) { #ifdef ARCHITECTURE_x86_64 arm_interface = std::make_unique<ARM_Dynarmic>(core_timing, exclusive_monitor, core_index); @@ -63,7 +64,7 @@ Cpu::Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, arm_interface = std::make_unique<ARM_Unicorn>(core_timing); } - scheduler = std::make_unique<Kernel::Scheduler>(*arm_interface); + scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface); } Cpu::~Cpu() = default; diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h index e2204c6b0..7589beb8c 100644 --- a/src/core/core_cpu.h +++ b/src/core/core_cpu.h @@ -15,6 +15,10 @@ namespace Kernel { class Scheduler; } +namespace Core { +class System; +} + namespace Core::Timing { class CoreTiming; } @@ -45,8 +49,8 @@ private: class Cpu { public: - Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, - CpuBarrier& cpu_barrier, std::size_t core_index); + Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, + std::size_t core_index); ~Cpu(); void RunLoop(bool tight_loop = true); diff --git a/src/core/cpu_core_manager.cpp b/src/core/cpu_core_manager.cpp index 2ddb3610d..93bc5619c 100644 --- a/src/core/cpu_core_manager.cpp +++ b/src/core/cpu_core_manager.cpp @@ -27,8 +27,7 @@ void CpuCoreManager::Initialize(System& system) { exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size()); for (std::size_t index = 0; index < cores.size(); ++index) { - cores[index] = - std::make_unique<Cpu>(system.CoreTiming(), *exclusive_monitor, *barrier, index); + cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index); } // Create threads for CPU cores 1-3, and build thread_to_cpu map diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp index ca12fb4ab..dfac9a4b3 100644 --- a/src/core/crypto/key_manager.cpp +++ b/src/core/crypto/key_manager.cpp @@ -398,7 +398,8 @@ static bool ValidCryptoRevisionString(std::string_view base, size_t begin, size_ } void KeyManager::LoadFromFile(const std::string& filename, bool is_title_keys) { - std::ifstream file(filename); + std::ifstream file; + OpenFStream(file, filename, std::ios_base::in); if (!file.is_open()) return; diff --git a/src/core/file_sys/vfs_vector.cpp b/src/core/file_sys/vfs_vector.cpp index 515626658..75fc04302 100644 --- a/src/core/file_sys/vfs_vector.cpp +++ b/src/core/file_sys/vfs_vector.cpp @@ -47,7 +47,7 @@ std::size_t VectorVfsFile::Write(const u8* data_, std::size_t length, std::size_ if (offset + length > data.size()) data.resize(offset + length); const auto write = std::min(length, data.size() - offset); - std::memcpy(data.data(), data_, write); + std::memcpy(data.data() + offset, data_, write); return write; } diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp index 9dd493efb..e29afd630 100644 --- a/src/core/frontend/emu_window.cpp +++ b/src/core/frontend/emu_window.cpp @@ -67,7 +67,7 @@ static bool IsWithinTouchscreen(const Layout::FramebufferLayout& layout, unsigne framebuffer_x >= layout.screen.left && framebuffer_x < layout.screen.right); } -std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsigned new_y) { +std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsigned new_y) const { new_x = std::max(new_x, framebuffer_layout.screen.left); new_x = std::min(new_x, framebuffer_layout.screen.right - 1); diff --git a/src/core/frontend/emu_window.h b/src/core/frontend/emu_window.h index 7006a37b3..d0bcb4660 100644 --- a/src/core/frontend/emu_window.h +++ b/src/core/frontend/emu_window.h @@ -166,7 +166,7 @@ private: /** * Clip the provided coordinates to be inside the touchscreen area. */ - std::tuple<unsigned, unsigned> ClipToTouchScreen(unsigned new_x, unsigned new_y); + std::tuple<unsigned, unsigned> ClipToTouchScreen(unsigned new_x, unsigned new_y) const; }; } // namespace Core::Frontend diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp index f8662d193..a1357179f 100644 --- a/src/core/frontend/framebuffer_layout.cpp +++ b/src/core/frontend/framebuffer_layout.cpp @@ -12,12 +12,12 @@ namespace Layout { // Finds the largest size subrectangle contained in window area that is confined to the aspect ratio template <class T> -static MathUtil::Rectangle<T> maxRectangle(MathUtil::Rectangle<T> window_area, - float screen_aspect_ratio) { +static Common::Rectangle<T> MaxRectangle(Common::Rectangle<T> window_area, + float screen_aspect_ratio) { float scale = std::min(static_cast<float>(window_area.GetWidth()), window_area.GetHeight() / screen_aspect_ratio); - return MathUtil::Rectangle<T>{0, 0, static_cast<T>(std::round(scale)), - static_cast<T>(std::round(scale * screen_aspect_ratio))}; + return Common::Rectangle<T>{0, 0, static_cast<T>(std::round(scale)), + static_cast<T>(std::round(scale * screen_aspect_ratio))}; } FramebufferLayout DefaultFrameLayout(unsigned width, unsigned height) { @@ -29,8 +29,8 @@ FramebufferLayout DefaultFrameLayout(unsigned width, unsigned height) { const float emulation_aspect_ratio{static_cast<float>(ScreenUndocked::Height) / ScreenUndocked::Width}; - MathUtil::Rectangle<unsigned> screen_window_area{0, 0, width, height}; - MathUtil::Rectangle<unsigned> screen = maxRectangle(screen_window_area, emulation_aspect_ratio); + Common::Rectangle<unsigned> screen_window_area{0, 0, width, height}; + Common::Rectangle<unsigned> screen = MaxRectangle(screen_window_area, emulation_aspect_ratio); float window_aspect_ratio = static_cast<float>(height) / width; diff --git a/src/core/frontend/framebuffer_layout.h b/src/core/frontend/framebuffer_layout.h index e06647794..c2c63d08c 100644 --- a/src/core/frontend/framebuffer_layout.h +++ b/src/core/frontend/framebuffer_layout.h @@ -16,7 +16,7 @@ struct FramebufferLayout { unsigned width{ScreenUndocked::Width}; unsigned height{ScreenUndocked::Height}; - MathUtil::Rectangle<unsigned> screen; + Common::Rectangle<unsigned> screen; /** * Returns the ration of pixel size of the screen, compared to the native size of the undocked diff --git a/src/core/frontend/input.h b/src/core/frontend/input.h index 16fdcd376..7c11d7546 100644 --- a/src/core/frontend/input.h +++ b/src/core/frontend/input.h @@ -124,7 +124,7 @@ using AnalogDevice = InputDevice<std::tuple<float, float>>; * Orientation is determined by right-hand rule. * Units: deg/sec */ -using MotionDevice = InputDevice<std::tuple<Math::Vec3<float>, Math::Vec3<float>>>; +using MotionDevice = InputDevice<std::tuple<Common::Vec3<float>, Common::Vec3<float>>>; /** * A touch device is an input device that returns a tuple of two floats and a bool. The floats are diff --git a/src/core/hle/ipc.h b/src/core/hle/ipc.h index ed84197b3..455d1f346 100644 --- a/src/core/hle/ipc.h +++ b/src/core/hle/ipc.h @@ -4,10 +4,10 @@ #pragma once +#include "common/bit_field.h" +#include "common/common_funcs.h" #include "common/common_types.h" #include "common/swap.h" -#include "core/hle/kernel/errors.h" -#include "core/memory.h" namespace IPC { diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 90f276ee8..a1e4be070 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -19,9 +19,12 @@ #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/server_session.h" +#include "core/hle/result.h" namespace IPC { +constexpr ResultCode ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301}; + class RequestHelperBase { protected: Kernel::HLERequestContext* context = nullptr; @@ -350,7 +353,7 @@ public: template <class T> std::shared_ptr<T> PopIpcInterface() { ASSERT(context->Session()->IsDomain()); - ASSERT(context->GetDomainMessageHeader()->input_object_count > 0); + ASSERT(context->GetDomainMessageHeader().input_object_count > 0); return context->GetDomainRequestHandler<T>(Pop<u32>() - 1); } }; @@ -362,6 +365,11 @@ inline u32 RequestParser::Pop() { return cmdbuf[index++]; } +template <> +inline s32 RequestParser::Pop() { + return static_cast<s32>(Pop<u32>()); +} + template <typename T> void RequestParser::PopRaw(T& value) { std::memcpy(&value, cmdbuf + index, sizeof(T)); @@ -393,6 +401,16 @@ inline u64 RequestParser::Pop() { } template <> +inline s8 RequestParser::Pop() { + return static_cast<s8>(Pop<u8>()); +} + +template <> +inline s16 RequestParser::Pop() { + return static_cast<s16>(Pop<u16>()); +} + +template <> inline s64 RequestParser::Pop() { return static_cast<s64>(Pop<u64>()); } diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 57157beb4..352190da8 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -9,6 +9,7 @@ #include "common/common_types.h" #include "core/core.h" #include "core/core_cpu.h" +#include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" @@ -18,58 +19,15 @@ #include "core/memory.h" namespace Kernel { -namespace AddressArbiter { - -// Performs actual address waiting logic. -static ResultCode WaitForAddress(VAddr address, s64 timeout) { - SharedPtr<Thread> current_thread = GetCurrentThread(); - current_thread->SetArbiterWaitAddress(address); - current_thread->SetStatus(ThreadStatus::WaitArb); - current_thread->InvalidateWakeupCallback(); - - current_thread->WakeAfterDelay(timeout); - - Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); - return RESULT_TIMEOUT; -} - -// Gets the threads waiting on an address. -static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) { - const auto RetrieveWaitingThreads = [](std::size_t core_index, - std::vector<SharedPtr<Thread>>& waiting_threads, - VAddr arb_addr) { - const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); - const auto& thread_list = scheduler.GetThreadList(); - - for (const auto& thread : thread_list) { - if (thread->GetArbiterWaitAddress() == arb_addr) - waiting_threads.push_back(thread); - } - }; - - // Retrieve all threads that are waiting for this address. - std::vector<SharedPtr<Thread>> threads; - RetrieveWaitingThreads(0, threads, address); - RetrieveWaitingThreads(1, threads, address); - RetrieveWaitingThreads(2, threads, address); - RetrieveWaitingThreads(3, threads, address); - - // Sort them by priority, such that the highest priority ones come first. - std::sort(threads.begin(), threads.end(), - [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { - return lhs->GetPriority() < rhs->GetPriority(); - }); - - return threads; -} - +namespace { // Wake up num_to_wake (or all) threads in a vector. -static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { +void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { // Only process up to 'target' threads, unless 'target' is <= 0, in which case process // them all. std::size_t last = waiting_threads.size(); - if (num_to_wake > 0) + if (num_to_wake > 0) { last = num_to_wake; + } // Signal the waiting threads. for (std::size_t i = 0; i < last; i++) { @@ -79,42 +37,55 @@ static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num waiting_threads[i]->ResumeFromWait(); } } +} // Anonymous namespace + +AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} +AddressArbiter::~AddressArbiter() = default; + +ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value, + s32 num_to_wake) { + switch (type) { + case SignalType::Signal: + return SignalToAddressOnly(address, num_to_wake); + case SignalType::IncrementAndSignalIfEqual: + return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); + case SignalType::ModifyByWaitingCountAndSignalIfEqual: + return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake); + default: + return ERR_INVALID_ENUM_VALUE; + } +} -// Signals an address being waited on. -ResultCode SignalToAddress(VAddr address, s32 num_to_wake) { - std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); - +ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { + const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); WakeThreads(waiting_threads, num_to_wake); return RESULT_SUCCESS; } -// Signals an address being waited on and increments its value if equal to the value argument. -ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { +ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, + s32 num_to_wake) { // Ensure that we can write to the address. if (!Memory::IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } - if (static_cast<s32>(Memory::Read32(address)) == value) { - Memory::Write32(address, static_cast<u32>(value + 1)); - } else { + if (static_cast<s32>(Memory::Read32(address)) != value) { return ERR_INVALID_STATE; } - return SignalToAddress(address, num_to_wake); + Memory::Write32(address, static_cast<u32>(value + 1)); + return SignalToAddressOnly(address, num_to_wake); } -// Signals an address being waited on and modifies its value based on waiting thread count if equal -// to the value argument. -ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, - s32 num_to_wake) { +ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, + s32 num_to_wake) { // Ensure that we can write to the address. if (!Memory::IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } // Get threads waiting on the address. - std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); + const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); // Determine the modified value depending on the waiting count. s32 updated_value; @@ -126,41 +97,54 @@ ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 valu updated_value = value; } - if (static_cast<s32>(Memory::Read32(address)) == value) { - Memory::Write32(address, static_cast<u32>(updated_value)); - } else { + if (static_cast<s32>(Memory::Read32(address)) != value) { return ERR_INVALID_STATE; } + Memory::Write32(address, static_cast<u32>(updated_value)); WakeThreads(waiting_threads, num_to_wake); return RESULT_SUCCESS; } -// Waits on an address if the value passed is less than the argument value, optionally decrementing. -ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement) { +ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value, + s64 timeout_ns) { + switch (type) { + case ArbitrationType::WaitIfLessThan: + return WaitForAddressIfLessThan(address, value, timeout_ns, false); + case ArbitrationType::DecrementAndWaitIfLessThan: + return WaitForAddressIfLessThan(address, value, timeout_ns, true); + case ArbitrationType::WaitIfEqual: + return WaitForAddressIfEqual(address, value, timeout_ns); + default: + return ERR_INVALID_ENUM_VALUE; + } +} + +ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, + bool should_decrement) { // Ensure that we can read the address. if (!Memory::IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } - s32 cur_value = static_cast<s32>(Memory::Read32(address)); - if (cur_value < value) { - if (should_decrement) { - Memory::Write32(address, static_cast<u32>(cur_value - 1)); - } - } else { + const s32 cur_value = static_cast<s32>(Memory::Read32(address)); + if (cur_value >= value) { return ERR_INVALID_STATE; } + + if (should_decrement) { + Memory::Write32(address, static_cast<u32>(cur_value - 1)); + } + // Short-circuit without rescheduling, if timeout is zero. if (timeout == 0) { return RESULT_TIMEOUT; } - return WaitForAddress(address, timeout); + return WaitForAddressImpl(address, timeout); } -// Waits on an address if the value passed is equal to the argument value. -ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { +ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { // Ensure that we can read the address. if (!Memory::IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; @@ -174,7 +158,48 @@ ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { return RESULT_TIMEOUT; } - return WaitForAddress(address, timeout); + return WaitForAddressImpl(address, timeout); +} + +ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { + SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread(); + current_thread->SetArbiterWaitAddress(address); + current_thread->SetStatus(ThreadStatus::WaitArb); + current_thread->InvalidateWakeupCallback(); + + current_thread->WakeAfterDelay(timeout); + + system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); + return RESULT_TIMEOUT; +} + +std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const { + const auto RetrieveWaitingThreads = [this](std::size_t core_index, + std::vector<SharedPtr<Thread>>& waiting_threads, + VAddr arb_addr) { + const auto& scheduler = system.Scheduler(core_index); + const auto& thread_list = scheduler.GetThreadList(); + + for (const auto& thread : thread_list) { + if (thread->GetArbiterWaitAddress() == arb_addr) { + waiting_threads.push_back(thread); + } + } + }; + + // Retrieve all threads that are waiting for this address. + std::vector<SharedPtr<Thread>> threads; + RetrieveWaitingThreads(0, threads, address); + RetrieveWaitingThreads(1, threads, address); + RetrieveWaitingThreads(2, threads, address); + RetrieveWaitingThreads(3, threads, address); + + // Sort them by priority, such that the highest priority ones come first. + std::sort(threads.begin(), threads.end(), + [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { + return lhs->GetPriority() < rhs->GetPriority(); + }); + + return threads; } -} // namespace AddressArbiter } // namespace Kernel diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h index e3657b8e9..ed0d0e69f 100644 --- a/src/core/hle/kernel/address_arbiter.h +++ b/src/core/hle/kernel/address_arbiter.h @@ -4,31 +4,77 @@ #pragma once +#include <vector> + #include "common/common_types.h" +#include "core/hle/kernel/object.h" union ResultCode; +namespace Core { +class System; +} + namespace Kernel { -namespace AddressArbiter { -enum class ArbitrationType { - WaitIfLessThan = 0, - DecrementAndWaitIfLessThan = 1, - WaitIfEqual = 2, -}; +class Thread; -enum class SignalType { - Signal = 0, - IncrementAndSignalIfEqual = 1, - ModifyByWaitingCountAndSignalIfEqual = 2, -}; +class AddressArbiter { +public: + enum class ArbitrationType { + WaitIfLessThan = 0, + DecrementAndWaitIfLessThan = 1, + WaitIfEqual = 2, + }; + + enum class SignalType { + Signal = 0, + IncrementAndSignalIfEqual = 1, + ModifyByWaitingCountAndSignalIfEqual = 2, + }; + + explicit AddressArbiter(Core::System& system); + ~AddressArbiter(); + + AddressArbiter(const AddressArbiter&) = delete; + AddressArbiter& operator=(const AddressArbiter&) = delete; + + AddressArbiter(AddressArbiter&&) = default; + AddressArbiter& operator=(AddressArbiter&&) = delete; + + /// Signals an address being waited on with a particular signaling type. + ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake); -ResultCode SignalToAddress(VAddr address, s32 num_to_wake); -ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); -ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); + /// Waits on an address with a particular arbitration type. + ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); -ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement); -ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); -} // namespace AddressArbiter +private: + /// Signals an address being waited on. + ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); + + /// Signals an address being waited on and increments its value if equal to the value argument. + ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); + + /// Signals an address being waited on and modifies its value based on waiting thread count if + /// equal to the value argument. + ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, + s32 num_to_wake); + + /// Waits on an address if the value passed is less than the argument value, + /// optionally decrementing. + ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, + bool should_decrement); + + /// Waits on an address if the value passed is equal to the argument value. + ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); + + // Waits on the given address with a timeout in nanoseconds + ResultCode WaitForAddressImpl(VAddr address, s64 timeout); + + // Gets the threads waiting on an address. + std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const; + + Core::System& system; +}; } // namespace Kernel diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp index d4c91d529..aa432658e 100644 --- a/src/core/hle/kernel/client_port.cpp +++ b/src/core/hle/kernel/client_port.cpp @@ -33,10 +33,11 @@ ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() { // Create a new session pair, let the created sessions inherit the parent port's HLE handler. auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); - if (server_port->hle_handler) - server_port->hle_handler->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); - else - server_port->pending_sessions.push_back(std::get<SharedPtr<ServerSession>>(sessions)); + if (server_port->HasHLEHandler()) { + server_port->GetHLEHandler()->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); + } else { + server_port->AppendPendingSession(std::get<SharedPtr<ServerSession>>(sessions)); + } // Wake the threads waiting on the ServerPort server_port->WakeupAllWaitingThreads(); diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index 704e82824..c17baa50a 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp @@ -17,21 +17,11 @@ ClientSession::~ClientSession() { // This destructor will be called automatically when the last ClientSession handle is closed by // the emulated application. - // Local references to ServerSession and SessionRequestHandler are necessary to guarantee they + // A local reference to the ServerSession is necessary to guarantee it // will be kept alive until after ClientDisconnected() returns. SharedPtr<ServerSession> server = parent->server; if (server) { - std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler; - if (hle_handler) - hle_handler->ClientDisconnected(server); - - // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set - // their WaitSynchronization result to 0xC920181A. - - // Clean up the list of client threads with pending requests, they are unneeded now that the - // client endpoint is closed. - server->pending_requesting_threads.clear(); - server->currently_handling = nullptr; + server->ClientDisconnected(); } parent->client = nullptr; diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index 4c18de69c..b1f39aad7 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h @@ -36,14 +36,15 @@ public: ResultCode SendSyncRequest(SharedPtr<Thread> thread); - std::string name; ///< Name of client port (optional) +private: + explicit ClientSession(KernelCore& kernel); + ~ClientSession() override; /// The parent session, which links to the server endpoint. std::shared_ptr<Session> parent; -private: - explicit ClientSession(KernelCore& kernel); - ~ClientSession() override; + /// Name of the client session (optional) + std::string name; }; } // namespace Kernel diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index d17eb0cb6..8097b3863 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -14,6 +14,7 @@ constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; +constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108}; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index c8acde5b1..bdfaa977f 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -14,32 +14,47 @@ namespace Kernel { namespace { constexpr u16 GetSlot(Handle handle) { - return handle >> 15; + return static_cast<u16>(handle >> 15); } constexpr u16 GetGeneration(Handle handle) { - return handle & 0x7FFF; + return static_cast<u16>(handle & 0x7FFF); } } // Anonymous namespace HandleTable::HandleTable() { - next_generation = 1; Clear(); } HandleTable::~HandleTable() = default; +ResultCode HandleTable::SetSize(s32 handle_table_size) { + if (static_cast<u32>(handle_table_size) > MAX_COUNT) { + return ERR_OUT_OF_MEMORY; + } + + // Values less than or equal to zero indicate to use the maximum allowable + // size for the handle table in the actual kernel, so we ignore the given + // value in that case, since we assume this by default unless this function + // is called. + if (handle_table_size > 0) { + table_size = static_cast<u16>(handle_table_size); + } + + return RESULT_SUCCESS; +} + ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) { DEBUG_ASSERT(obj != nullptr); - u16 slot = next_free_slot; - if (slot >= generations.size()) { + const u16 slot = next_free_slot; + if (slot >= table_size) { LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use."); return ERR_HANDLE_TABLE_FULL; } next_free_slot = generations[slot]; - u16 generation = next_generation++; + const u16 generation = next_generation++; // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. // Horizon OS uses zero to represent an invalid handle, so skip to 1. @@ -64,10 +79,11 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) { } ResultCode HandleTable::Close(Handle handle) { - if (!IsValid(handle)) + if (!IsValid(handle)) { return ERR_INVALID_HANDLE; + } - u16 slot = GetSlot(handle); + const u16 slot = GetSlot(handle); objects[slot] = nullptr; @@ -77,10 +93,10 @@ ResultCode HandleTable::Close(Handle handle) { } bool HandleTable::IsValid(Handle handle) const { - std::size_t slot = GetSlot(handle); - u16 generation = GetGeneration(handle); + const std::size_t slot = GetSlot(handle); + const u16 generation = GetGeneration(handle); - return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation; + return slot < table_size && objects[slot] != nullptr && generations[slot] == generation; } SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const { @@ -97,7 +113,7 @@ SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const { } void HandleTable::Clear() { - for (u16 i = 0; i < MAX_COUNT; ++i) { + for (u16 i = 0; i < table_size; ++i) { generations[i] = i + 1; objects[i] = nullptr; } diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index 89a3bc740..44901391b 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h @@ -50,6 +50,20 @@ public: ~HandleTable(); /** + * Sets the number of handles that may be in use at one time + * for this handle table. + * + * @param handle_table_size The desired size to limit the handle table to. + * + * @returns an error code indicating if initialization was successful. + * If initialization was not successful, then ERR_OUT_OF_MEMORY + * will be returned. + * + * @pre handle_table_size must be within the range [0, 1024] + */ + ResultCode SetSize(s32 handle_table_size); + + /** * Allocates a handle for the given object. * @return The created Handle or one of the following errors: * - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded. @@ -104,13 +118,20 @@ private: std::array<u16, MAX_COUNT> generations; /** + * The limited size of the handle table. This can be specified by process + * capabilities in order to restrict the overall number of handles that + * can be created in a process instance + */ + u16 table_size = static_cast<u16>(MAX_COUNT); + + /** * Global counter of the number of created handles. Stored in `generations` when a handle is * created, and wraps around to 1 when it hits 0x8000. */ - u16 next_generation; + u16 next_generation = 1; /// Head of the free slots linked list. - u16 next_free_slot; + u16 next_free_slot = 0; }; } // namespace Kernel diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 5dd855db8..fe710eb6e 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -86,7 +86,7 @@ HLERequestContext::~HLERequestContext() = default; void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming) { IPC::RequestParser rp(src_cmdbuf); - command_header = std::make_shared<IPC::CommandHeader>(rp.PopRaw<IPC::CommandHeader>()); + command_header = rp.PopRaw<IPC::CommandHeader>(); if (command_header->type == IPC::CommandType::Close) { // Close does not populate the rest of the IPC header @@ -95,8 +95,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_ // If handle descriptor is present, add size of it if (command_header->enable_handle_descriptor) { - handle_descriptor_header = - std::make_shared<IPC::HandleDescriptorHeader>(rp.PopRaw<IPC::HandleDescriptorHeader>()); + handle_descriptor_header = rp.PopRaw<IPC::HandleDescriptorHeader>(); if (handle_descriptor_header->send_current_pid) { rp.Skip(2, false); } @@ -140,16 +139,15 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_ // If this is an incoming message, only CommandType "Request" has a domain header // All outgoing domain messages have the domain header, if only incoming has it if (incoming || domain_message_header) { - domain_message_header = - std::make_shared<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>()); + domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); } else { - if (Session()->IsDomain()) + if (Session()->IsDomain()) { LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); + } } } - data_payload_header = - std::make_shared<IPC::DataPayloadHeader>(rp.PopRaw<IPC::DataPayloadHeader>()); + data_payload_header = rp.PopRaw<IPC::DataPayloadHeader>(); data_payload_offset = rp.GetCurrentOffset(); @@ -264,11 +262,11 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { // Write the domain objects to the command buffer, these go after the raw untranslated data. // TODO(Subv): This completely ignores C buffers. std::size_t domain_offset = size - domain_message_header->num_objects; - auto& request_handlers = server_session->domain_request_handlers; - for (auto& object : domain_objects) { - request_handlers.emplace_back(object); - dst_cmdbuf[domain_offset++] = static_cast<u32_le>(request_handlers.size()); + for (const auto& object : domain_objects) { + server_session->AppendDomainRequestHandler(object); + dst_cmdbuf[domain_offset++] = + static_cast<u32_le>(server_session->NumDomainRequestHandlers()); } } diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index cb1c5aff3..2bdd9f02c 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -6,6 +6,7 @@ #include <array> #include <memory> +#include <optional> #include <string> #include <type_traits> #include <vector> @@ -15,6 +16,8 @@ #include "core/hle/ipc.h" #include "core/hle/kernel/object.h" +union ResultCode; + namespace Service { class ServiceFrameworkBase; } @@ -166,12 +169,12 @@ public: return buffer_c_desciptors; } - const IPC::DomainMessageHeader* GetDomainMessageHeader() const { - return domain_message_header.get(); + const IPC::DomainMessageHeader& GetDomainMessageHeader() const { + return domain_message_header.value(); } bool HasDomainMessageHeader() const { - return domain_message_header != nullptr; + return domain_message_header.has_value(); } /// Helper function to read a buffer using the appropriate buffer descriptor @@ -208,14 +211,12 @@ public: template <typename T> SharedPtr<T> GetCopyObject(std::size_t index) { - ASSERT(index < copy_objects.size()); - return DynamicObjectCast<T>(copy_objects[index]); + return DynamicObjectCast<T>(copy_objects.at(index)); } template <typename T> SharedPtr<T> GetMoveObject(std::size_t index) { - ASSERT(index < move_objects.size()); - return DynamicObjectCast<T>(move_objects[index]); + return DynamicObjectCast<T>(move_objects.at(index)); } void AddMoveObject(SharedPtr<Object> object) { @@ -232,7 +233,7 @@ public: template <typename T> std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const { - return std::static_pointer_cast<T>(domain_request_handlers[index]); + return std::static_pointer_cast<T>(domain_request_handlers.at(index)); } void SetDomainRequestHandlers( @@ -272,10 +273,10 @@ private: boost::container::small_vector<SharedPtr<Object>, 8> copy_objects; boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; - std::shared_ptr<IPC::CommandHeader> command_header; - std::shared_ptr<IPC::HandleDescriptorHeader> handle_descriptor_header; - std::shared_ptr<IPC::DataPayloadHeader> data_payload_header; - std::shared_ptr<IPC::DomainMessageHeader> domain_message_header; + std::optional<IPC::CommandHeader> command_header; + std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header; + std::optional<IPC::DataPayloadHeader> data_payload_header; + std::optional<IPC::DomainMessageHeader> domain_message_header; std::vector<IPC::BufferDescriptorX> buffer_x_desciptors; std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors; std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index dd749eed4..4d224d01d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -12,6 +12,7 @@ #include "core/core.h" #include "core/core_timing.h" +#include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" @@ -86,11 +87,13 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_ } struct KernelCore::Impl { - void Initialize(KernelCore& kernel, Core::Timing::CoreTiming& core_timing) { + explicit Impl(Core::System& system) : system{system} {} + + void Initialize(KernelCore& kernel) { Shutdown(); InitializeSystemResourceLimit(kernel); - InitializeThreads(core_timing); + InitializeThreads(); } void Shutdown() { @@ -122,9 +125,9 @@ struct KernelCore::Impl { ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess()); } - void InitializeThreads(Core::Timing::CoreTiming& core_timing) { + void InitializeThreads() { thread_wakeup_event_type = - core_timing.RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); + system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); } std::atomic<u32> next_object_id{0}; @@ -145,15 +148,18 @@ struct KernelCore::Impl { /// Map of named ports managed by the kernel, which can be retrieved using /// the ConnectToPort SVC. NamedPortTable named_ports; + + // System context + Core::System& system; }; -KernelCore::KernelCore() : impl{std::make_unique<Impl>()} {} +KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system)} {} KernelCore::~KernelCore() { Shutdown(); } -void KernelCore::Initialize(Core::Timing::CoreTiming& core_timing) { - impl->Initialize(*this, core_timing); +void KernelCore::Initialize() { + impl->Initialize(*this); } void KernelCore::Shutdown() { diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 154bced42..ff17ff865 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -11,6 +11,10 @@ template <typename T> class ResultVal; +namespace Core { +class System; +} + namespace Core::Timing { class CoreTiming; struct EventType; @@ -18,6 +22,7 @@ struct EventType; namespace Kernel { +class AddressArbiter; class ClientPort; class HandleTable; class Process; @@ -30,7 +35,14 @@ private: using NamedPortTable = std::unordered_map<std::string, SharedPtr<ClientPort>>; public: - KernelCore(); + /// Constructs an instance of the kernel using the given System + /// instance as a context for any necessary system-related state, + /// such as threads, CPU core state, etc. + /// + /// @post After execution of the constructor, the provided System + /// object *must* outlive the kernel instance itself. + /// + explicit KernelCore(Core::System& system); ~KernelCore(); KernelCore(const KernelCore&) = delete; @@ -40,11 +52,7 @@ public: KernelCore& operator=(KernelCore&&) = delete; /// Resets the kernel to a clean slate for use. - /// - /// @param core_timing CoreTiming instance used to create any necessary - /// kernel-specific callback events. - /// - void Initialize(Core::Timing::CoreTiming& core_timing); + void Initialize(); /// Clears all resources in use by the kernel instance. void Shutdown(); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index c5aa19afa..49fced7b1 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -53,9 +53,10 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi CodeSet::CodeSet() = default; CodeSet::~CodeSet() = default; -SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { - SharedPtr<Process> process(new Process(kernel)); +SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) { + auto& kernel = system.Kernel(); + SharedPtr<Process> process(new Process(system)); process->name = std::move(name); process->resource_limit = kernel.GetSystemResourceLimit(); process->status = ProcessStatus::Created; @@ -99,7 +100,13 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { vm_manager.Reset(metadata.GetAddressSpaceType()); const auto& caps = metadata.GetKernelCapabilities(); - return capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); + const auto capability_init_result = + capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); + if (capability_init_result.IsError()) { + return capability_init_result; + } + + return handle_table.SetSize(capabilities.GetHandleTableSize()); } void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { @@ -126,7 +133,7 @@ void Process::PrepareForTermination() { if (thread->GetOwnerProcess() != this) continue; - if (thread == GetCurrentThread()) + if (thread == system.CurrentScheduler().GetCurrentThread()) continue; // TODO(Subv): When are the other running/ready threads terminated? @@ -138,7 +145,6 @@ void Process::PrepareForTermination() { } }; - const auto& system = Core::System::GetInstance(); stop_threads(system.Scheduler(0).GetThreadList()); stop_threads(system.Scheduler(1).GetThreadList()); stop_threads(system.Scheduler(2).GetThreadList()); @@ -221,14 +227,12 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable); // Clear instruction cache in CPU JIT - Core::System::GetInstance().ArmInterface(0).ClearInstructionCache(); - Core::System::GetInstance().ArmInterface(1).ClearInstructionCache(); - Core::System::GetInstance().ArmInterface(2).ClearInstructionCache(); - Core::System::GetInstance().ArmInterface(3).ClearInstructionCache(); + system.InvalidateCpuInstructionCaches(); } -Kernel::Process::Process(KernelCore& kernel) : WaitObject{kernel} {} -Kernel::Process::~Process() {} +Process::Process(Core::System& system) + : WaitObject{system.Kernel()}, address_arbiter{system}, system{system} {} +Process::~Process() = default; void Process::Acquire(Thread* thread) { ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index dcc57ae9f..47ffd4ad3 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -12,12 +12,17 @@ #include <vector> #include <boost/container/static_vector.hpp> #include "common/common_types.h" +#include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/wait_object.h" #include "core/hle/result.h" +namespace Core { +class System; +} + namespace FileSys { class ProgramMetadata; } @@ -116,7 +121,7 @@ public: static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; - static SharedPtr<Process> Create(KernelCore& kernel, std::string&& name); + static SharedPtr<Process> Create(Core::System& system, std::string&& name); std::string GetTypeName() const override { return "Process"; @@ -150,6 +155,16 @@ public: return handle_table; } + /// Gets a reference to the process' address arbiter. + AddressArbiter& GetAddressArbiter() { + return address_arbiter; + } + + /// Gets a const reference to the process' address arbiter. + const AddressArbiter& GetAddressArbiter() const { + return address_arbiter; + } + /// Gets the current status of the process ProcessStatus GetStatus() const { return status; @@ -251,7 +266,7 @@ public: void FreeTLSSlot(VAddr tls_address); private: - explicit Process(KernelCore& kernel); + explicit Process(Core::System& system); ~Process() override; /// Checks if the specified thread should wait until this process is available. @@ -309,9 +324,16 @@ private: /// Per-process handle table for storing created object handles in. HandleTable handle_table; + /// Per-process address arbiter. + AddressArbiter address_arbiter; + /// Random values for svcGetInfo RandomEntropy std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; + /// System context + Core::System& system; + + /// Name of this process std::string name; }; diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 3a2164b25..583e35b79 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp @@ -96,7 +96,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() { interrupt_capabilities.set(); // Allow using the maximum possible amount of handles - handle_table_size = static_cast<u32>(HandleTable::MAX_COUNT); + handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT); // Allow all debugging capabilities. is_debuggable = true; @@ -337,7 +337,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) { return ERR_RESERVED_VALUE; } - handle_table_size = (flags >> 16) & 0x3FF; + handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h index fbc8812a3..5cdd80747 100644 --- a/src/core/hle/kernel/process_capability.h +++ b/src/core/hle/kernel/process_capability.h @@ -156,7 +156,7 @@ public: } /// Gets the number of total allowable handles for the process' handle table. - u32 GetHandleTableSize() const { + s32 GetHandleTableSize() const { return handle_table_size; } @@ -252,7 +252,7 @@ private: u64 core_mask = 0; u64 priority_mask = 0; - u32 handle_table_size = 0; + s32 handle_table_size = 0; u32 kernel_version = 0; ProgramType program_type = ProgramType::SysModule; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 44f30d070..e524509df 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -19,7 +19,8 @@ namespace Kernel { std::mutex Scheduler::scheduler_mutex; -Scheduler::Scheduler(Core::ARM_Interface& cpu_core) : cpu_core(cpu_core) {} +Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) + : cpu_core{cpu_core}, system{system} {} Scheduler::~Scheduler() { for (auto& thread : thread_list) { @@ -61,7 +62,7 @@ Thread* Scheduler::PopNextReadyThread() { void Scheduler::SwitchContext(Thread* new_thread) { Thread* const previous_thread = GetCurrentThread(); - Process* const previous_process = Core::CurrentProcess(); + Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -94,8 +95,8 @@ void Scheduler::SwitchContext(Thread* new_thread) { auto* const thread_owner_process = current_thread->GetOwnerProcess(); if (previous_process != thread_owner_process) { - Core::System::GetInstance().Kernel().MakeCurrentProcess(thread_owner_process); - SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table); + system.Kernel().MakeCurrentProcess(thread_owner_process); + SetCurrentPageTable(&thread_owner_process->VMManager().page_table); } cpu_core.LoadContext(new_thread->GetContext()); @@ -111,7 +112,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { const u64 prev_switch_ticks = last_context_switch_time; - const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); + const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; if (thread != nullptr) { @@ -198,8 +199,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { ASSERT(thread->GetPriority() < THREADPRIO_COUNT); // Yield this thread -- sleep for zero time and force reschedule to different thread - WaitCurrentThread_Sleep(); - GetCurrentThread()->WakeAfterDelay(0); + GetCurrentThread()->Sleep(0); } void Scheduler::YieldWithLoadBalancing(Thread* thread) { @@ -214,8 +214,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) { ASSERT(priority < THREADPRIO_COUNT); // Sleep for zero time to be able to force reschedule to different thread - WaitCurrentThread_Sleep(); - GetCurrentThread()->WakeAfterDelay(0); + GetCurrentThread()->Sleep(0); Thread* suggested_thread = nullptr; @@ -223,8 +222,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) { // Take the first non-nullptr one for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { const auto res = - Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread( - core, priority); + system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority); // If scheduler provides a suggested thread if (res != nullptr) { diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 97ced4dfc..1c5bf57d9 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -13,7 +13,8 @@ namespace Core { class ARM_Interface; -} +class System; +} // namespace Core namespace Kernel { @@ -21,7 +22,7 @@ class Process; class Scheduler final { public: - explicit Scheduler(Core::ARM_Interface& cpu_core); + explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core); ~Scheduler(); /// Returns whether there are any threads that are ready to run. @@ -162,6 +163,7 @@ private: Core::ARM_Interface& cpu_core; u64 last_context_switch_time = 0; + Core::System& system; static std::mutex scheduler_mutex; }; diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp index d6ceeb2da..0e1515c89 100644 --- a/src/core/hle/kernel/server_port.cpp +++ b/src/core/hle/kernel/server_port.cpp @@ -26,6 +26,10 @@ ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() { return MakeResult(std::move(session)); } +void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) { + pending_sessions.push_back(std::move(pending_session)); +} + bool ServerPort::ShouldWait(Thread* thread) const { // If there are no pending sessions, we wait until a new one is added. return pending_sessions.empty(); diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h index e52f8245f..9bc667cf2 100644 --- a/src/core/hle/kernel/server_port.h +++ b/src/core/hle/kernel/server_port.h @@ -22,6 +22,8 @@ class SessionRequestHandler; class ServerPort final : public WaitObject { public: + using HLEHandler = std::shared_ptr<SessionRequestHandler>; + /** * Creates a pair of ServerPort and an associated ClientPort. * @@ -51,22 +53,27 @@ public: */ ResultVal<SharedPtr<ServerSession>> Accept(); + /// Whether or not this server port has an HLE handler available. + bool HasHLEHandler() const { + return hle_handler != nullptr; + } + + /// Gets the HLE handler for this port. + HLEHandler GetHLEHandler() const { + return hle_handler; + } + /** * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port * will inherit a reference to this handler. */ - void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) { + void SetHleHandler(HLEHandler hle_handler_) { hle_handler = std::move(hle_handler_); } - std::string name; ///< Name of port (optional) - - /// ServerSessions waiting to be accepted by the port - std::vector<SharedPtr<ServerSession>> pending_sessions; - - /// This session's HLE request handler template (optional) - /// ServerSessions created from this port inherit a reference to this handler. - std::shared_ptr<SessionRequestHandler> hle_handler; + /// Appends a ServerSession to the collection of ServerSessions + /// waiting to be accepted by this port. + void AppendPendingSession(SharedPtr<ServerSession> pending_session); bool ShouldWait(Thread* thread) const override; void Acquire(Thread* thread) override; @@ -74,6 +81,16 @@ public: private: explicit ServerPort(KernelCore& kernel); ~ServerPort() override; + + /// ServerSessions waiting to be accepted by the port + std::vector<SharedPtr<ServerSession>> pending_sessions; + + /// This session's HLE request handler template (optional) + /// ServerSessions created from this port inherit a reference to this handler. + HLEHandler hle_handler; + + /// Name of the port (optional) + std::string name; }; } // namespace Kernel diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 027434f92..4d8a337a7 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -63,42 +63,71 @@ void ServerSession::Acquire(Thread* thread) { pending_requesting_threads.pop_back(); } +void ServerSession::ClientDisconnected() { + // We keep a shared pointer to the hle handler to keep it alive throughout + // the call to ClientDisconnected, as ClientDisconnected invalidates the + // hle_handler member itself during the course of the function executing. + std::shared_ptr<SessionRequestHandler> handler = hle_handler; + if (handler) { + // Note that after this returns, this server session's hle_handler is + // invalidated (set to null). + handler->ClientDisconnected(this); + } + + // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set + // their WaitSynchronization result to 0xC920181A. + + // Clean up the list of client threads with pending requests, they are unneeded now that the + // client endpoint is closed. + pending_requesting_threads.clear(); + currently_handling = nullptr; +} + +void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { + domain_request_handlers.push_back(std::move(handler)); +} + +std::size_t ServerSession::NumDomainRequestHandlers() const { + return domain_request_handlers.size(); +} + ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { - auto* const domain_message_header = context.GetDomainMessageHeader(); - if (domain_message_header) { - // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs - context.SetDomainRequestHandlers(domain_request_handlers); - - // If there is a DomainMessageHeader, then this is CommandType "Request" - const u32 object_id{context.GetDomainMessageHeader()->object_id}; - switch (domain_message_header->command) { - case IPC::DomainMessageHeader::CommandType::SendMessage: - if (object_id > domain_request_handlers.size()) { - LOG_CRITICAL(IPC, - "object_id {} is too big! This probably means a recent service call " - "to {} needed to return a new interface!", - object_id, name); - UNREACHABLE(); - return RESULT_SUCCESS; // Ignore error if asserts are off - } - return domain_request_handlers[object_id - 1]->HandleSyncRequest(context); - - case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { - LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); - - domain_request_handlers[object_id - 1] = nullptr; - - IPC::ResponseBuilder rb{context, 2}; - rb.Push(RESULT_SUCCESS); - return RESULT_SUCCESS; - } + if (!context.HasDomainMessageHeader()) { + return RESULT_SUCCESS; + } + + // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs + context.SetDomainRequestHandlers(domain_request_handlers); + + // If there is a DomainMessageHeader, then this is CommandType "Request" + const auto& domain_message_header = context.GetDomainMessageHeader(); + const u32 object_id{domain_message_header.object_id}; + switch (domain_message_header.command) { + case IPC::DomainMessageHeader::CommandType::SendMessage: + if (object_id > domain_request_handlers.size()) { + LOG_CRITICAL(IPC, + "object_id {} is too big! This probably means a recent service call " + "to {} needed to return a new interface!", + object_id, name); + UNREACHABLE(); + return RESULT_SUCCESS; // Ignore error if asserts are off } + return domain_request_handlers[object_id - 1]->HandleSyncRequest(context); - LOG_CRITICAL(IPC, "Unknown domain command={}", - static_cast<int>(domain_message_header->command.Value())); - ASSERT(false); + case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { + LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); + + domain_request_handlers[object_id - 1] = nullptr; + + IPC::ResponseBuilder rb{context, 2}; + rb.Push(RESULT_SUCCESS); + return RESULT_SUCCESS; + } } + LOG_CRITICAL(IPC, "Unknown domain command={}", + static_cast<int>(domain_message_header.command.Value())); + ASSERT(false); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index e0e9d64c8..aea4ccfeb 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -46,6 +46,14 @@ public: return HANDLE_TYPE; } + Session* GetParent() { + return parent.get(); + } + + const Session* GetParent() const { + return parent.get(); + } + using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>; /** @@ -78,23 +86,16 @@ public: void Acquire(Thread* thread) override; - std::string name; ///< The name of this session (optional) - std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint. - std::shared_ptr<SessionRequestHandler> - hle_handler; ///< This session's HLE request handler (applicable when not a domain) + /// Called when a client disconnection occurs. + void ClientDisconnected(); - /// This is the list of domain request handlers (after conversion to a domain) - std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; - - /// List of threads that are pending a response after a sync request. This list is processed in - /// a LIFO manner, thus, the last request will be dispatched first. - /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. - std::vector<SharedPtr<Thread>> pending_requesting_threads; + /// Adds a new domain request handler to the collection of request handlers within + /// this ServerSession instance. + void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler); - /// Thread whose request is currently being handled. A request is considered "handled" when a - /// response is sent via svcReplyAndReceive. - /// TODO(Subv): Find a better name for this. - SharedPtr<Thread> currently_handling; + /// Retrieves the total number of domain request handlers that have been + /// appended to this ServerSession instance. + std::size_t NumDomainRequestHandlers() const; /// Returns true if the session has been converted to a domain, otherwise False bool IsDomain() const { @@ -129,8 +130,30 @@ private: /// object handle. ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context); + /// The parent session, which links to the client endpoint. + std::shared_ptr<Session> parent; + + /// This session's HLE request handler (applicable when not a domain) + std::shared_ptr<SessionRequestHandler> hle_handler; + + /// This is the list of domain request handlers (after conversion to a domain) + std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; + + /// List of threads that are pending a response after a sync request. This list is processed in + /// a LIFO manner, thus, the last request will be dispatched first. + /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. + std::vector<SharedPtr<Thread>> pending_requesting_threads; + + /// Thread whose request is currently being handled. A request is considered "handled" when a + /// response is sent via svcReplyAndReceive. + /// TODO(Subv): Find a better name for this. + SharedPtr<Thread> currently_handling; + /// When set to True, converts the session to a domain at the end of the command bool convert_to_domain{}; + + /// The name of this session (optional) + std::string name; }; } // namespace Kernel diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index 22d0c1dd5..62861da36 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -6,7 +6,6 @@ #include "common/assert.h" #include "common/logging/log.h" -#include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/shared_memory.h" @@ -34,8 +33,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_ shared_memory->backing_block_offset = 0; // Refresh the address mappings for the current process. - if (Core::CurrentProcess() != nullptr) { - Core::CurrentProcess()->VMManager().RefreshMemoryBlockMappings( + if (kernel.CurrentProcess() != nullptr) { + kernel.CurrentProcess()->VMManager().RefreshMemoryBlockMappings( shared_memory->backing_block.get()); } } else { diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index c5d399bab..047fa0c19 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -20,6 +20,7 @@ #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" +#include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/mutex.h" @@ -47,23 +48,6 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) { return address + size > address; } -// Checks if a given address range lies within a larger address range. -constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, - VAddr address_range_end) { - const VAddr end_address = address + size - 1; - return address_range_begin <= address && end_address <= address_range_end - 1; -} - -bool IsInsideAddressSpace(const VMManager& vm, VAddr address, u64 size) { - return IsInsideAddressRange(address, size, vm.GetAddressSpaceBaseAddress(), - vm.GetAddressSpaceEndAddress()); -} - -bool IsInsideNewMapRegion(const VMManager& vm, VAddr address, u64 size) { - return IsInsideAddressRange(address, size, vm.GetNewMapRegionBaseAddress(), - vm.GetNewMapRegionEndAddress()); -} - // 8 GiB constexpr u64 MAIN_MEMORY_SIZE = 0x200000000; @@ -105,14 +89,14 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add return ERR_INVALID_ADDRESS_STATE; } - if (!IsInsideAddressSpace(vm_manager, src_addr, size)) { + if (!vm_manager.IsWithinAddressSpace(src_addr, size)) { LOG_ERROR(Kernel_SVC, "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", src_addr, size); return ERR_INVALID_ADDRESS_STATE; } - if (!IsInsideNewMapRegion(vm_manager, dst_addr, size)) { + if (!vm_manager.IsWithinNewMapRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}", dst_addr, size); @@ -238,7 +222,7 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) { auto* const current_process = Core::CurrentProcess(); auto& vm_manager = current_process->VMManager(); - if (!IsInsideAddressSpace(vm_manager, addr, size)) { + if (!vm_manager.IsWithinAddressSpace(addr, size)) { LOG_ERROR(Kernel_SVC, "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, size); @@ -299,7 +283,7 @@ static ResultCode SetMemoryAttribute(VAddr address, u64 size, u32 mask, u32 attr } auto& vm_manager = Core::CurrentProcess()->VMManager(); - if (!IsInsideAddressSpace(vm_manager, address, size)) { + if (!vm_manager.IsWithinAddressSpace(address, size)) { LOG_ERROR(Kernel_SVC, "Given address (0x{:016X}) is outside the bounds of the address space.", address); return ERR_INVALID_ADDRESS_STATE; @@ -1300,10 +1284,14 @@ static ResultCode StartThread(Handle thread_handle) { /// Called when a thread exits static void ExitThread() { - LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC()); + auto& system = Core::System::GetInstance(); - ExitCurrentThread(); - Core::System::GetInstance().PrepareReschedule(); + LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); + + auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); + current_thread->Stop(); + system.CurrentScheduler().RemoveThread(current_thread); + system.PrepareReschedule(); } /// Sleep the current thread @@ -1316,32 +1304,32 @@ static void SleepThread(s64 nanoseconds) { YieldAndWaitForLoadBalancing = -2, }; + auto& system = Core::System::GetInstance(); + auto& scheduler = system.CurrentScheduler(); + auto* const current_thread = scheduler.GetCurrentThread(); + if (nanoseconds <= 0) { - auto& scheduler{Core::System::GetInstance().CurrentScheduler()}; switch (static_cast<SleepType>(nanoseconds)) { case SleepType::YieldWithoutLoadBalancing: - scheduler.YieldWithoutLoadBalancing(GetCurrentThread()); + scheduler.YieldWithoutLoadBalancing(current_thread); break; case SleepType::YieldWithLoadBalancing: - scheduler.YieldWithLoadBalancing(GetCurrentThread()); + scheduler.YieldWithLoadBalancing(current_thread); break; case SleepType::YieldAndWaitForLoadBalancing: - scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread()); + scheduler.YieldAndWaitForLoadBalancing(current_thread); break; default: UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); } } else { - // Sleep current thread and check for next thread to schedule - WaitCurrentThread_Sleep(); - - // Create an event to wake the thread up after the specified nanosecond delay has passed - GetCurrentThread()->WakeAfterDelay(nanoseconds); + current_thread->Sleep(nanoseconds); } // Reschedule all CPU cores - for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) - Core::System::GetInstance().CpuCore(i).PrepareReschedule(); + for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) { + system.CpuCore(i).PrepareReschedule(); + } } /// Wait process wide key atomic @@ -1495,20 +1483,10 @@ static ResultCode WaitForAddress(VAddr address, u32 type, s32 value, s64 timeout return ERR_INVALID_ADDRESS; } - switch (static_cast<AddressArbiter::ArbitrationType>(type)) { - case AddressArbiter::ArbitrationType::WaitIfLessThan: - return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, false); - case AddressArbiter::ArbitrationType::DecrementAndWaitIfLessThan: - return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, true); - case AddressArbiter::ArbitrationType::WaitIfEqual: - return AddressArbiter::WaitForAddressIfEqual(address, value, timeout); - default: - LOG_ERROR(Kernel_SVC, - "Invalid arbitration type, expected WaitIfLessThan, DecrementAndWaitIfLessThan " - "or WaitIfEqual but got {}", - type); - return ERR_INVALID_ENUM_VALUE; - } + const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); + auto& address_arbiter = + Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter(); + return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); } // Signals to an address (via Address Arbiter) @@ -1526,21 +1504,10 @@ static ResultCode SignalToAddress(VAddr address, u32 type, s32 value, s32 num_to return ERR_INVALID_ADDRESS; } - switch (static_cast<AddressArbiter::SignalType>(type)) { - case AddressArbiter::SignalType::Signal: - return AddressArbiter::SignalToAddress(address, num_to_wake); - case AddressArbiter::SignalType::IncrementAndSignalIfEqual: - return AddressArbiter::IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); - case AddressArbiter::SignalType::ModifyByWaitingCountAndSignalIfEqual: - return AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, - num_to_wake); - default: - LOG_ERROR(Kernel_SVC, - "Invalid signal type, expected Signal, IncrementAndSignalIfEqual " - "or ModifyByWaitingCountAndSignalIfEqual but got {}", - type); - return ERR_INVALID_ENUM_VALUE; - } + const auto signal_type = static_cast<AddressArbiter::SignalType>(type); + auto& address_arbiter = + Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter(); + return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); } /// This returns the total CPU ticks elapsed since the CPU was powered-on diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 6661e2130..2e712c9cb 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -68,11 +68,6 @@ void Thread::Stop() { owner_process->FreeTLSSlot(tls_address); } -void WaitCurrentThread_Sleep() { - Thread* thread = GetCurrentThread(); - thread->SetStatus(ThreadStatus::WaitSleep); -} - void ExitCurrentThread() { Thread* thread = GetCurrentThread(); thread->Stop(); @@ -184,8 +179,6 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name return ERR_INVALID_PROCESSOR_ID; } - // TODO(yuriks): Other checks, returning 0xD9001BEA - if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) { LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); // TODO (bunnei): Find the correct error code to use here @@ -393,6 +386,14 @@ void Thread::SetActivity(ThreadActivity value) { } } +void Thread::Sleep(s64 nanoseconds) { + // Sleep current thread and check for next thread to schedule + SetStatus(ThreadStatus::WaitSleep); + + // Create an event to wake the thread up after the specified nanosecond delay has passed + WakeAfterDelay(nanoseconds); +} + //////////////////////////////////////////////////////////////////////////////////////////////////// /** diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index c48b21aba..ccdefeecc 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -383,6 +383,9 @@ public: void SetActivity(ThreadActivity value); + /// Sleeps this thread for the given amount of nanoseconds. + void Sleep(s64 nanoseconds); + private: explicit Thread(KernelCore& kernel); ~Thread() override; @@ -460,14 +463,4 @@ private: */ Thread* GetCurrentThread(); -/** - * Waits the current thread on a sleep - */ -void WaitCurrentThread_Sleep(); - -/** - * Stops the current thread and removes it from the thread_list - */ -void ExitCurrentThread(); - } // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 10ad94aa6..05c59af34 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -17,8 +17,8 @@ #include "core/memory_setup.h" namespace Kernel { - -static const char* GetMemoryStateName(MemoryState state) { +namespace { +const char* GetMemoryStateName(MemoryState state) { static constexpr const char* names[] = { "Unmapped", "Io", "Normal", "CodeStatic", @@ -35,6 +35,14 @@ static const char* GetMemoryStateName(MemoryState state) { return names[ToSvcMemoryState(state)]; } +// Checks if a given address range lies within a larger address range. +constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, + VAddr address_range_end) { + const VAddr end_address = address + size - 1; + return address_range_begin <= address && end_address <= address_range_end - 1; +} +} // Anonymous namespace + bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { ASSERT(base + size == next.base); if (permissions != next.permissions || state != next.state || attribute != next.attribute || @@ -249,8 +257,7 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p } ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { - if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() || - target + size < target) { + if (!IsWithinHeapRegion(target, size)) { return ERR_INVALID_ADDRESS; } @@ -285,8 +292,7 @@ ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission p } ResultCode VMManager::HeapFree(VAddr target, u64 size) { - if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() || - target + size < target) { + if (!IsWithinHeapRegion(target, size)) { return ERR_INVALID_ADDRESS; } @@ -706,6 +712,11 @@ u64 VMManager::GetAddressSpaceWidth() const { return address_space_width; } +bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const { + return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(), + GetAddressSpaceEndAddress()); +} + VAddr VMManager::GetASLRRegionBaseAddress() const { return aslr_region_base; } @@ -750,6 +761,11 @@ u64 VMManager::GetCodeRegionSize() const { return code_region_end - code_region_base; } +bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const { + return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(), + GetCodeRegionEndAddress()); +} + VAddr VMManager::GetHeapRegionBaseAddress() const { return heap_region_base; } @@ -762,6 +778,11 @@ u64 VMManager::GetHeapRegionSize() const { return heap_region_end - heap_region_base; } +bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { + return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), + GetHeapRegionEndAddress()); +} + VAddr VMManager::GetMapRegionBaseAddress() const { return map_region_base; } @@ -774,6 +795,10 @@ u64 VMManager::GetMapRegionSize() const { return map_region_end - map_region_base; } +bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const { + return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress()); +} + VAddr VMManager::GetNewMapRegionBaseAddress() const { return new_map_region_base; } @@ -786,6 +811,11 @@ u64 VMManager::GetNewMapRegionSize() const { return new_map_region_end - new_map_region_base; } +bool VMManager::IsWithinNewMapRegion(VAddr address, u64 size) const { + return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(), + GetNewMapRegionEndAddress()); +} + VAddr VMManager::GetTLSIORegionBaseAddress() const { return tls_io_region_base; } @@ -798,4 +828,9 @@ u64 VMManager::GetTLSIORegionSize() const { return tls_io_region_end - tls_io_region_base; } +bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const { + return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(), + GetTLSIORegionEndAddress()); +} + } // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 6091533bc..88e0b3c02 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -432,18 +432,21 @@ public: /// Gets the address space width in bits. u64 GetAddressSpaceWidth() const; + /// Determines whether or not the given address range lies within the address space. + bool IsWithinAddressSpace(VAddr address, u64 size) const; + /// Gets the base address of the ASLR region. VAddr GetASLRRegionBaseAddress() const; /// Gets the end address of the ASLR region. VAddr GetASLRRegionEndAddress() const; - /// Determines whether or not the specified address range is within the ASLR region. - bool IsWithinASLRRegion(VAddr address, u64 size) const; - /// Gets the size of the ASLR region u64 GetASLRRegionSize() const; + /// Determines whether or not the specified address range is within the ASLR region. + bool IsWithinASLRRegion(VAddr address, u64 size) const; + /// Gets the base address of the code region. VAddr GetCodeRegionBaseAddress() const; @@ -453,6 +456,9 @@ public: /// Gets the total size of the code region in bytes. u64 GetCodeRegionSize() const; + /// Determines whether or not the specified range is within the code region. + bool IsWithinCodeRegion(VAddr address, u64 size) const; + /// Gets the base address of the heap region. VAddr GetHeapRegionBaseAddress() const; @@ -462,6 +468,9 @@ public: /// Gets the total size of the heap region in bytes. u64 GetHeapRegionSize() const; + /// Determines whether or not the specified range is within the heap region. + bool IsWithinHeapRegion(VAddr address, u64 size) const; + /// Gets the base address of the map region. VAddr GetMapRegionBaseAddress() const; @@ -471,6 +480,9 @@ public: /// Gets the total size of the map region in bytes. u64 GetMapRegionSize() const; + /// Determines whether or not the specified range is within the map region. + bool IsWithinMapRegion(VAddr address, u64 size) const; + /// Gets the base address of the new map region. VAddr GetNewMapRegionBaseAddress() const; @@ -480,6 +492,9 @@ public: /// Gets the total size of the new map region in bytes. u64 GetNewMapRegionSize() const; + /// Determines whether or not the given address range is within the new map region + bool IsWithinNewMapRegion(VAddr address, u64 size) const; + /// Gets the base address of the TLS IO region. VAddr GetTLSIORegionBaseAddress() const; @@ -489,6 +504,9 @@ public: /// Gets the total size of the TLS IO region in bytes. u64 GetTLSIORegionSize() const; + /// Determines if the given address range is within the TLS IO region. + bool IsWithinTLSIORegion(VAddr address, u64 size) const; + /// Each VMManager has its own page table, which is set as the main one when the owning process /// is scheduled. Memory::PageTable page_table; diff --git a/src/core/hle/result.h b/src/core/hle/result.h index bfb77cc31..ab84f5ddc 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -8,20 +8,11 @@ #include <utility> #include "common/assert.h" #include "common/bit_field.h" -#include "common/common_funcs.h" #include "common/common_types.h" // All the constants in this file come from http://switchbrew.org/index.php?title=Error_codes /** - * Detailed description of the error. Code 0 always means success. - */ -enum class ErrorDescription : u32 { - Success = 0, - RemoteProcessDead = 301, -}; - -/** * Identifies the module which caused the error. Error codes can be propagated through a call * chain, meaning that this doesn't always correspond to the module where the API call made is * contained. @@ -121,7 +112,7 @@ enum class ErrorModule : u32 { ShopN = 811, }; -/// Encapsulates a CTR-OS error code, allowing it to be separated into its constituent fields. +/// Encapsulates a Horizon OS error code, allowing it to be separated into its constituent fields. union ResultCode { u32 raw; @@ -134,17 +125,9 @@ union ResultCode { constexpr explicit ResultCode(u32 raw) : raw(raw) {} - constexpr ResultCode(ErrorModule module, ErrorDescription description) - : ResultCode(module, static_cast<u32>(description)) {} - constexpr ResultCode(ErrorModule module_, u32 description_) : raw(module.FormatValue(module_) | description.FormatValue(description_)) {} - constexpr ResultCode& operator=(const ResultCode& o) { - raw = o.raw; - return *this; - } - constexpr bool IsSuccess() const { return raw == 0; } diff --git a/src/core/hle/service/am/applets/software_keyboard.cpp b/src/core/hle/service/am/applets/software_keyboard.cpp index f255f74b5..8c5bd6059 100644 --- a/src/core/hle/service/am/applets/software_keyboard.cpp +++ b/src/core/hle/service/am/applets/software_keyboard.cpp @@ -7,6 +7,7 @@ #include "common/string_util.h" #include "core/core.h" #include "core/frontend/applets/software_keyboard.h" +#include "core/hle/result.h" #include "core/hle/service/am/am.h" #include "core/hle/service/am/applets/software_keyboard.h" diff --git a/src/core/hle/service/am/applets/software_keyboard.h b/src/core/hle/service/am/applets/software_keyboard.h index efd5753a1..b93a30d28 100644 --- a/src/core/hle/service/am/applets/software_keyboard.h +++ b/src/core/hle/service/am/applets/software_keyboard.h @@ -9,10 +9,13 @@ #include <vector> #include "common/common_funcs.h" +#include "common/common_types.h" #include "common/swap.h" #include "core/hle/service/am/am.h" #include "core/hle/service/am/applets/applets.h" +union ResultCode; + namespace Service::AM::Applets { enum class KeysetDisable : u32 { diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 6831c0735..21f5e64c7 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp @@ -18,17 +18,11 @@ #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/writable_event.h" #include "core/hle/service/audio/audout_u.h" +#include "core/hle/service/audio/errors.h" #include "core/memory.h" namespace Service::Audio { -namespace ErrCodes { -enum { - ErrorUnknown = 2, - BufferCountExceeded = 8, -}; -} - constexpr std::array<char, 10> DefaultDevice{{"DeviceOut"}}; constexpr int DefaultSampleRate{48000}; @@ -100,7 +94,7 @@ private: if (stream->IsPlaying()) { IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(ErrorModule::Audio, ErrCodes::ErrorUnknown)); + rb.Push(ERR_OPERATION_FAILED); return; } @@ -113,7 +107,9 @@ private: void StopAudioOut(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "called"); - audio_core.StopStream(stream); + if (stream->IsPlaying()) { + audio_core.StopStream(stream); + } IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); @@ -143,7 +139,8 @@ private: if (!audio_core.QueueBuffer(stream, tag, std::move(samples))) { IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(ErrorModule::Audio, ErrCodes::BufferCountExceeded)); + rb.Push(ERR_BUFFER_COUNT_EXCEEDED); + return; } IPC::ResponseBuilder rb{ctx, 2}; diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 7e0cc64a8..c9de10a24 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -17,6 +17,7 @@ #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/writable_event.h" #include "core/hle/service/audio/audren_u.h" +#include "core/hle/service/audio/errors.h" namespace Service::Audio { @@ -37,7 +38,7 @@ public: {8, &IAudioRenderer::SetRenderingTimeLimit, "SetRenderingTimeLimit"}, {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, {10, &IAudioRenderer::RequestUpdateImpl, "RequestUpdateAuto"}, - {11, nullptr, "ExecuteAudioRendererRendering"}, + {11, &IAudioRenderer::ExecuteAudioRendererRendering, "ExecuteAudioRendererRendering"}, }; // clang-format on RegisterHandlers(functions); @@ -138,6 +139,17 @@ private: rb.Push(rendering_time_limit_percent); } + void ExecuteAudioRendererRendering(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_Audio, "called"); + + // This service command currently only reports an unsupported operation + // error code, or aborts. Given that, we just always return an error + // code in this case. + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ERR_NOT_SUPPORTED); + } + Kernel::EventPair system_event; std::unique_ptr<AudioCore::AudioRenderer> renderer; u32 rendering_time_limit_percent = 100; @@ -235,7 +247,7 @@ AudRenU::AudRenU() : ServiceFramework("audren:u") { {0, &AudRenU::OpenAudioRenderer, "OpenAudioRenderer"}, {1, &AudRenU::GetAudioRendererWorkBufferSize, "GetAudioRendererWorkBufferSize"}, {2, &AudRenU::GetAudioDeviceService, "GetAudioDeviceService"}, - {3, nullptr, "OpenAudioRendererAuto"}, + {3, &AudRenU::OpenAudioRendererAuto, "OpenAudioRendererAuto"}, {4, &AudRenU::GetAudioDeviceServiceWithRevisionInfo, "GetAudioDeviceServiceWithRevisionInfo"}, }; // clang-format on @@ -248,12 +260,7 @@ AudRenU::~AudRenU() = default; void AudRenU::OpenAudioRenderer(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "called"); - IPC::RequestParser rp{ctx}; - auto params = rp.PopRaw<AudioCore::AudioRendererParameter>(); - IPC::ResponseBuilder rb{ctx, 2, 0, 1}; - - rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<Audio::IAudioRenderer>(std::move(params)); + OpenAudioRendererImpl(ctx); } void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { @@ -262,20 +269,20 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "called"); u64 buffer_sz = Common::AlignUp(4 * params.mix_buffer_count, 0x40); - buffer_sz += params.unknown_c * 1024; - buffer_sz += 0x940 * (params.unknown_c + 1); + buffer_sz += params.submix_count * 1024; + buffer_sz += 0x940 * (params.submix_count + 1); buffer_sz += 0x3F0 * params.voice_count; - buffer_sz += Common::AlignUp(8 * (params.unknown_c + 1), 0x10); + buffer_sz += Common::AlignUp(8 * (params.submix_count + 1), 0x10); buffer_sz += Common::AlignUp(8 * params.voice_count, 0x10); - buffer_sz += - Common::AlignUp((0x3C0 * (params.sink_count + params.unknown_c) + 4 * params.sample_count) * - (params.mix_buffer_count + 6), - 0x40); + buffer_sz += Common::AlignUp( + (0x3C0 * (params.sink_count + params.submix_count) + 4 * params.sample_count) * + (params.mix_buffer_count + 6), + 0x40); if (IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { - u32 count = params.unknown_c + 1; + const u32 count = params.submix_count + 1; u64 node_count = Common::AlignUp(count, 0x40); - u64 node_state_buffer_sz = + const u64 node_state_buffer_sz = 4 * (node_count * node_count) + 0xC * node_count + 2 * (node_count / 8); u64 edge_matrix_buffer_sz = 0; node_count = Common::AlignUp(count * count, 0x40); @@ -289,19 +296,19 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { buffer_sz += 0x20 * (params.effect_count + 4 * params.voice_count) + 0x50; if (IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { - buffer_sz += 0xE0 * params.unknown_2c; + buffer_sz += 0xE0 * params.num_splitter_send_channels; buffer_sz += 0x20 * params.splitter_count; - buffer_sz += Common::AlignUp(4 * params.unknown_2c, 0x10); + buffer_sz += Common::AlignUp(4 * params.num_splitter_send_channels, 0x10); } buffer_sz = Common::AlignUp(buffer_sz, 0x40) + 0x170 * params.sink_count; u64 output_sz = buffer_sz + 0x280 * params.sink_count + 0x4B0 * params.effect_count + ((params.voice_count * 256) | 0x40); - if (params.unknown_1c >= 1) { + if (params.performance_frame_count >= 1) { output_sz = Common::AlignUp(((16 * params.sink_count + 16 * params.effect_count + 16 * params.voice_count + 16) + 0x658) * - (params.unknown_1c + 1) + + (params.performance_frame_count + 1) + 0xc0, 0x40) + output_sz; @@ -325,6 +332,12 @@ void AudRenU::GetAudioDeviceService(Kernel::HLERequestContext& ctx) { rb.PushIpcInterface<Audio::IAudioDevice>(); } +void AudRenU::OpenAudioRendererAuto(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_Audio, "called"); + + OpenAudioRendererImpl(ctx); +} + void AudRenU::GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_Audio, "(STUBBED) called"); @@ -335,6 +348,15 @@ void AudRenU::GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& c // based on the current revision } +void AudRenU::OpenAudioRendererImpl(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto params = rp.PopRaw<AudioCore::AudioRendererParameter>(); + IPC::ResponseBuilder rb{ctx, 2, 0, 1}; + + rb.Push(RESULT_SUCCESS); + rb.PushIpcInterface<IAudioRenderer>(params); +} + bool AudRenU::IsFeatureSupported(AudioFeatures feature, u32_le revision) const { u32_be version_num = (revision - Common::MakeMagic('R', 'E', 'V', '0')); // Byte swap switch (feature) { diff --git a/src/core/hle/service/audio/audren_u.h b/src/core/hle/service/audio/audren_u.h index 3d63388fb..e55d25973 100644 --- a/src/core/hle/service/audio/audren_u.h +++ b/src/core/hle/service/audio/audren_u.h @@ -21,8 +21,11 @@ private: void OpenAudioRenderer(Kernel::HLERequestContext& ctx); void GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx); void GetAudioDeviceService(Kernel::HLERequestContext& ctx); + void OpenAudioRendererAuto(Kernel::HLERequestContext& ctx); void GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx); + void OpenAudioRendererImpl(Kernel::HLERequestContext& ctx); + enum class AudioFeatures : u32 { Splitter, }; diff --git a/src/core/hle/service/audio/errors.h b/src/core/hle/service/audio/errors.h new file mode 100644 index 000000000..6f8c09bcf --- /dev/null +++ b/src/core/hle/service/audio/errors.h @@ -0,0 +1,15 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/result.h" + +namespace Service::Audio { + +constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::Audio, 2}; +constexpr ResultCode ERR_BUFFER_COUNT_EXCEEDED{ErrorModule::Audio, 8}; +constexpr ResultCode ERR_NOT_SUPPORTED{ErrorModule::Audio, 513}; + +} // namespace Service::Audio diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index 11eba4a12..377e12cfa 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp @@ -9,43 +9,32 @@ #include <opus.h> -#include "common/common_funcs.h" +#include "common/assert.h" #include "common/logging/log.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/hle_ipc.h" #include "core/hle/service/audio/hwopus.h" namespace Service::Audio { - +namespace { struct OpusDeleter { void operator()(void* ptr) const { operator delete(ptr); } }; -class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { -public: - IHardwareOpusDecoderManager(std::unique_ptr<OpusDecoder, OpusDeleter> decoder, u32 sample_rate, - u32 channel_count) - : ServiceFramework("IHardwareOpusDecoderManager"), decoder(std::move(decoder)), - sample_rate(sample_rate), channel_count(channel_count) { - // clang-format off - static const FunctionInfo functions[] = { - {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, - {1, nullptr, "SetContext"}, - {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, - {3, nullptr, "SetContextForMultiStream"}, - {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, - {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, - {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, - {7, nullptr, "DecodeInterleavedForMultiStream"}, - }; - // clang-format on +using OpusDecoderPtr = std::unique_ptr<OpusDecoder, OpusDeleter>; - RegisterHandlers(functions); - } +struct OpusPacketHeader { + // Packet size in bytes. + u32_be size; + // Indicates the final range of the codec's entropy coder. + u32_be final_range; +}; +static_assert(sizeof(OpusPacketHeader) == 0x8, "OpusHeader is an invalid size"); -private: +class OpusDecoderStateBase { +public: /// Describes extra behavior that may be asked of the decoding context. enum class ExtraBehavior { /// No extra behavior. @@ -55,30 +44,36 @@ private: ResetContext, }; - void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Audio, "called"); - - DecodeInterleavedHelper(ctx, nullptr, ExtraBehavior::None); - } - - void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Audio, "called"); + enum class PerfTime { + Disabled, + Enabled, + }; - u64 performance = 0; - DecodeInterleavedHelper(ctx, &performance, ExtraBehavior::None); - } + virtual ~OpusDecoderStateBase() = default; - void DecodeInterleaved(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Audio, "called"); - - IPC::RequestParser rp{ctx}; - const auto extra_behavior = - rp.Pop<bool>() ? ExtraBehavior::ResetContext : ExtraBehavior::None; + // Decodes interleaved Opus packets. Optionally allows reporting time taken to + // perform the decoding, as well as any relevant extra behavior. + virtual void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, + ExtraBehavior extra_behavior) = 0; +}; - u64 performance = 0; - DecodeInterleavedHelper(ctx, &performance, extra_behavior); +// Represents the decoder state for a non-multistream decoder. +class OpusDecoderState final : public OpusDecoderStateBase { +public: + explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count) + : decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {} + + void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, + ExtraBehavior extra_behavior) override { + if (perf_time == PerfTime::Disabled) { + DecodeInterleavedHelper(ctx, nullptr, extra_behavior); + } else { + u64 performance = 0; + DecodeInterleavedHelper(ctx, &performance, extra_behavior); + } } +private: void DecodeInterleavedHelper(Kernel::HLERequestContext& ctx, u64* performance, ExtraBehavior extra_behavior) { u32 consumed = 0; @@ -89,8 +84,7 @@ private: ResetDecoderContext(); } - if (!Decoder_DecodeInterleaved(consumed, sample_count, ctx.ReadBuffer(), samples, - performance)) { + if (!DecodeOpusData(consumed, sample_count, ctx.ReadBuffer(), samples, performance)) { LOG_ERROR(Audio, "Failed to decode opus data"); IPC::ResponseBuilder rb{ctx, 2}; // TODO(ogniK): Use correct error code @@ -109,27 +103,27 @@ private: ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); } - bool Decoder_DecodeInterleaved(u32& consumed, u32& sample_count, const std::vector<u8>& input, - std::vector<opus_int16>& output, u64* out_performance_time) { + bool DecodeOpusData(u32& consumed, u32& sample_count, const std::vector<u8>& input, + std::vector<opus_int16>& output, u64* out_performance_time) const { const auto start_time = std::chrono::high_resolution_clock::now(); const std::size_t raw_output_sz = output.size() * sizeof(opus_int16); - if (sizeof(OpusHeader) > input.size()) { + if (sizeof(OpusPacketHeader) > input.size()) { LOG_ERROR(Audio, "Input is smaller than the header size, header_sz={}, input_sz={}", - sizeof(OpusHeader), input.size()); + sizeof(OpusPacketHeader), input.size()); return false; } - OpusHeader hdr{}; - std::memcpy(&hdr, input.data(), sizeof(OpusHeader)); - if (sizeof(OpusHeader) + static_cast<u32>(hdr.sz) > input.size()) { + OpusPacketHeader hdr{}; + std::memcpy(&hdr, input.data(), sizeof(OpusPacketHeader)); + if (sizeof(OpusPacketHeader) + static_cast<u32>(hdr.size) > input.size()) { LOG_ERROR(Audio, "Input does not fit in the opus header size. data_sz={}, input_sz={}", - sizeof(OpusHeader) + static_cast<u32>(hdr.sz), input.size()); + sizeof(OpusPacketHeader) + static_cast<u32>(hdr.size), input.size()); return false; } - const auto frame = input.data() + sizeof(OpusHeader); + const auto frame = input.data() + sizeof(OpusPacketHeader); const auto decoded_sample_count = opus_packet_get_nb_samples( - frame, static_cast<opus_int32>(input.size() - sizeof(OpusHeader)), + frame, static_cast<opus_int32>(input.size() - sizeof(OpusPacketHeader)), static_cast<opus_int32>(sample_rate)); if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) { LOG_ERROR( @@ -141,18 +135,18 @@ private: const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); const auto out_sample_count = - opus_decode(decoder.get(), frame, hdr.sz, output.data(), frame_size, 0); + opus_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0); if (out_sample_count < 0) { LOG_ERROR(Audio, "Incorrect sample count received from opus_decode, " "output_sample_count={}, frame_size={}, data_sz_from_hdr={}", - out_sample_count, frame_size, static_cast<u32>(hdr.sz)); + out_sample_count, frame_size, static_cast<u32>(hdr.size)); return false; } const auto end_time = std::chrono::high_resolution_clock::now() - start_time; sample_count = out_sample_count; - consumed = static_cast<u32>(sizeof(OpusHeader) + hdr.sz); + consumed = static_cast<u32>(sizeof(OpusPacketHeader) + hdr.size); if (out_performance_time != nullptr) { *out_performance_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count(); @@ -167,21 +161,66 @@ private: opus_decoder_ctl(decoder.get(), OPUS_RESET_STATE); } - struct OpusHeader { - u32_be sz; // Needs to be BE for some odd reason - INSERT_PADDING_WORDS(1); - }; - static_assert(sizeof(OpusHeader) == 0x8, "OpusHeader is an invalid size"); - - std::unique_ptr<OpusDecoder, OpusDeleter> decoder; + OpusDecoderPtr decoder; u32 sample_rate; u32 channel_count; }; -static std::size_t WorkerBufferSize(u32 channel_count) { +class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { +public: + explicit IHardwareOpusDecoderManager(std::unique_ptr<OpusDecoderStateBase> decoder_state) + : ServiceFramework("IHardwareOpusDecoderManager"), decoder_state{std::move(decoder_state)} { + // clang-format off + static const FunctionInfo functions[] = { + {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, + {1, nullptr, "SetContext"}, + {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, + {3, nullptr, "SetContextForMultiStream"}, + {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, + {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, + {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, + {7, nullptr, "DecodeInterleavedForMultiStream"}, + }; + // clang-format on + + RegisterHandlers(functions); + } + +private: + void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Audio, "called"); + + decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Disabled, + OpusDecoderStateBase::ExtraBehavior::None); + } + + void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Audio, "called"); + + decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Enabled, + OpusDecoderStateBase::ExtraBehavior::None); + } + + void DecodeInterleaved(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Audio, "called"); + + IPC::RequestParser rp{ctx}; + const auto extra_behavior = rp.Pop<bool>() + ? OpusDecoderStateBase::ExtraBehavior::ResetContext + : OpusDecoderStateBase::ExtraBehavior::None; + + decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Enabled, + extra_behavior); + } + + std::unique_ptr<OpusDecoderStateBase> decoder_state; +}; + +std::size_t WorkerBufferSize(u32 channel_count) { ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); return opus_decoder_get_size(static_cast<int>(channel_count)); } +} // Anonymous namespace void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; @@ -220,8 +259,7 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { const std::size_t worker_sz = WorkerBufferSize(channel_count); ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); - std::unique_ptr<OpusDecoder, OpusDeleter> decoder{ - static_cast<OpusDecoder*>(operator new(worker_sz))}; + OpusDecoderPtr decoder{static_cast<OpusDecoder*>(operator new(worker_sz))}; if (const int err = opus_decoder_init(decoder.get(), sample_rate, channel_count)) { LOG_ERROR(Audio, "Failed to init opus decoder with error={}", err); IPC::ResponseBuilder rb{ctx, 2}; @@ -232,8 +270,8 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<IHardwareOpusDecoderManager>(std::move(decoder), sample_rate, - channel_count); + rb.PushIpcInterface<IHardwareOpusDecoderManager>( + std::make_unique<OpusDecoderState>(std::move(decoder), sample_rate, channel_count)); } HwOpus::HwOpus() : ServiceFramework("hwopus") { diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index 6d897c842..7cc58db4c 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -15,7 +15,7 @@ namespace Kernel { class SharedMemory; } -namespace SM { +namespace Service::SM { class ServiceManager; } diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 21ccfe1f8..20c7c39aa 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -23,7 +23,7 @@ u32 nvdisp_disp0::ioctl(Ioctl command, const std::vector<u8>& input, std::vector void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform, - const MathUtil::Rectangle<int>& crop_rect) { + const Common::Rectangle<int>& crop_rect) { VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); LOG_TRACE(Service, "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", @@ -36,7 +36,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3 auto& instance = Core::System::GetInstance(); instance.GetPerfStats().EndGameFrame(); - instance.Renderer().SwapBuffers(framebuffer); + instance.GPU().SwapBuffers(framebuffer); } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index a45086e45..ace71169f 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -25,7 +25,7 @@ public: /// Performs a screen flip, drawing the buffer pointed to by the handle. void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform, - const MathUtil::Rectangle<int>& crop_rect); + const Common::Rectangle<int>& crop_rect); private: std::shared_ptr<nvmap> nvmap_dev; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 466db7ccd..b031ebc66 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -10,6 +10,7 @@ #include "core/core.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" #include "core/hle/service/nvdrv/devices/nvmap.h" +#include "core/memory.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" @@ -178,7 +179,7 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou auto& gpu = system_instance.GPU(); auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset); ASSERT(cpu_addr); - system_instance.Renderer().Rasterizer().FlushAndInvalidateRegion(*cpu_addr, itr->second.size); + gpu.FlushAndInvalidateRegion(ToCacheAddr(Memory::GetPointer(*cpu_addr)), itr->second.size); params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 0a650f36c..8ce7bc7a5 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -136,16 +136,6 @@ u32 nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector< return 0; } -static void PushGPUEntries(Tegra::CommandList&& entries) { - if (entries.empty()) { - return; - } - - auto& dma_pusher{Core::System::GetInstance().GPU().DmaPusher()}; - dma_pusher.Push(std::move(entries)); - dma_pusher.DispatchCalls(); -} - u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& output) { if (input.size() < sizeof(IoctlSubmitGpfifo)) { UNIMPLEMENTED(); @@ -163,7 +153,7 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp std::memcpy(entries.data(), &input[sizeof(IoctlSubmitGpfifo)], params.num_entries * sizeof(Tegra::CommandListHeader)); - PushGPUEntries(std::move(entries)); + Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries)); params.fence_out.id = 0; params.fence_out.value = 0; @@ -184,7 +174,7 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output) Memory::ReadBlock(params.address, entries.data(), params.num_entries * sizeof(Tegra::CommandListHeader)); - PushGPUEntries(std::move(entries)); + Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries)); params.fence_out.id = 0; params.fence_out.value = 0; diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index fc07d9bb8..4d150fc71 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp @@ -63,7 +63,7 @@ const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const { } void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, - const MathUtil::Rectangle<int>& crop_rect) { + const Common::Rectangle<int>& crop_rect) { auto itr = std::find_if(queue.begin(), queue.end(), [&](const Buffer& buffer) { return buffer.slot == slot; }); ASSERT(itr != queue.end()); diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index ab90d591e..e1ccb6171 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h @@ -67,14 +67,14 @@ public: Status status = Status::Free; IGBPBuffer igbp_buffer; BufferTransformFlags transform; - MathUtil::Rectangle<int> crop_rect; + Common::Rectangle<int> crop_rect; }; void SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer); std::optional<u32> DequeueBuffer(u32 width, u32 height); const IGBPBuffer& RequestBuffer(u32 slot) const; void QueueBuffer(u32 slot, BufferTransformFlags transform, - const MathUtil::Rectangle<int>& crop_rect); + const Common::Rectangle<int>& crop_rect); std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); void ReleaseBuffer(u32 slot); u32 Query(QueryType type); diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 3babc3f7c..fc496b654 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -14,11 +14,12 @@ #include "core/core_timing_util.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/writable_event.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvflinger/buffer_queue.h" #include "core/hle/service/nvflinger/nvflinger.h" +#include "core/hle/service/vi/display/vi_display.h" +#include "core/hle/service/vi/layer/vi_layer.h" #include "core/perf_stats.h" #include "video_core/renderer_base.h" @@ -28,6 +29,12 @@ constexpr std::size_t SCREEN_REFRESH_RATE = 60; constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { + displays.emplace_back(0, "Default"); + displays.emplace_back(1, "External"); + displays.emplace_back(2, "Edid"); + displays.emplace_back(3, "Internal"); + displays.emplace_back(4, "Null"); + // Schedule the screen composition events composition_event = core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { @@ -52,13 +59,14 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { // TODO(Subv): Currently we only support the Default display. ASSERT(name == "Default"); - const auto itr = std::find_if(displays.begin(), displays.end(), - [&](const Display& display) { return display.name == name; }); + const auto itr = + std::find_if(displays.begin(), displays.end(), + [&](const VI::Display& display) { return display.GetName() == name; }); if (itr == displays.end()) { return {}; } - return itr->id; + return itr->GetID(); } std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { @@ -68,13 +76,10 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { return {}; } - ASSERT_MSG(display->layers.empty(), "Only one layer is supported per display at the moment"); - const u64 layer_id = next_layer_id++; const u32 buffer_queue_id = next_buffer_queue_id++; - auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id); - display->layers.emplace_back(layer_id, buffer_queue); - buffer_queues.emplace_back(std::move(buffer_queue)); + buffer_queues.emplace_back(buffer_queue_id, layer_id); + display->CreateLayer(layer_id, buffer_queues.back()); return layer_id; } @@ -85,7 +90,7 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co return {}; } - return layer->buffer_queue->GetId(); + return layer->GetBufferQueue().GetId(); } Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { @@ -95,20 +100,29 @@ Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_i return nullptr; } - return display->vsync_event.readable; + return display->GetVSyncEvent(); +} + +BufferQueue& NVFlinger::FindBufferQueue(u32 id) { + const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), + [id](const auto& queue) { return queue.GetId() == id; }); + + ASSERT(itr != buffer_queues.end()); + return *itr; } -std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const { +const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const { const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), - [&](const auto& queue) { return queue->GetId() == id; }); + [id](const auto& queue) { return queue.GetId() == id; }); ASSERT(itr != buffer_queues.end()); return *itr; } -Display* NVFlinger::FindDisplay(u64 display_id) { - const auto itr = std::find_if(displays.begin(), displays.end(), - [&](const Display& display) { return display.id == display_id; }); +VI::Display* NVFlinger::FindDisplay(u64 display_id) { + const auto itr = + std::find_if(displays.begin(), displays.end(), + [&](const VI::Display& display) { return display.GetID() == display_id; }); if (itr == displays.end()) { return nullptr; @@ -117,9 +131,10 @@ Display* NVFlinger::FindDisplay(u64 display_id) { return &*itr; } -const Display* NVFlinger::FindDisplay(u64 display_id) const { - const auto itr = std::find_if(displays.begin(), displays.end(), - [&](const Display& display) { return display.id == display_id; }); +const VI::Display* NVFlinger::FindDisplay(u64 display_id) const { + const auto itr = + std::find_if(displays.begin(), displays.end(), + [&](const VI::Display& display) { return display.GetID() == display_id; }); if (itr == displays.end()) { return nullptr; @@ -128,57 +143,41 @@ const Display* NVFlinger::FindDisplay(u64 display_id) const { return &*itr; } -Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) { +VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) { auto* const display = FindDisplay(display_id); if (display == nullptr) { return nullptr; } - const auto itr = std::find_if(display->layers.begin(), display->layers.end(), - [&](const Layer& layer) { return layer.id == layer_id; }); - - if (itr == display->layers.end()) { - return nullptr; - } - - return &*itr; + return display->FindLayer(layer_id); } -const Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { +const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { const auto* const display = FindDisplay(display_id); if (display == nullptr) { return nullptr; } - const auto itr = std::find_if(display->layers.begin(), display->layers.end(), - [&](const Layer& layer) { return layer.id == layer_id; }); - - if (itr == display->layers.end()) { - return nullptr; - } - - return &*itr; + return display->FindLayer(layer_id); } void NVFlinger::Compose() { for (auto& display : displays) { // Trigger vsync for this display at the end of drawing - SCOPE_EXIT({ display.vsync_event.writable->Signal(); }); + SCOPE_EXIT({ display.SignalVSyncEvent(); }); // Don't do anything for displays without layers. - if (display.layers.empty()) + if (!display.HasLayers()) continue; // TODO(Subv): Support more than 1 layer. - ASSERT_MSG(display.layers.size() == 1, "Max 1 layer per display is supported"); - - Layer& layer = display.layers[0]; - auto& buffer_queue = layer.buffer_queue; + VI::Layer& layer = display.GetLayer(0); + auto& buffer_queue = layer.GetBufferQueue(); // Search for a queued buffer and acquire it - auto buffer = buffer_queue->AcquireBuffer(); + auto buffer = buffer_queue.AcquireBuffer(); MicroProfileFlip(); @@ -187,7 +186,7 @@ void NVFlinger::Compose() { // There was no queued buffer to draw, render previous frame system_instance.GetPerfStats().EndGameFrame(); - system_instance.Renderer().SwapBuffers({}); + system_instance.GPU().SwapBuffers({}); continue; } @@ -203,19 +202,8 @@ void NVFlinger::Compose() { igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, buffer->get().transform, buffer->get().crop_rect); - buffer_queue->ReleaseBuffer(buffer->get().slot); + buffer_queue.ReleaseBuffer(buffer->get().slot); } } -Layer::Layer(u64 id, std::shared_ptr<BufferQueue> queue) : id(id), buffer_queue(std::move(queue)) {} -Layer::~Layer() = default; - -Display::Display(u64 id, std::string name) : id(id), name(std::move(name)) { - auto& kernel = Core::System::GetInstance().Kernel(); - vsync_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky, - fmt::format("Display VSync Event {}", id)); -} - -Display::~Display() = default; - } // namespace Service::NVFlinger diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 437aa592d..c0a83fffb 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h @@ -4,7 +4,6 @@ #pragma once -#include <array> #include <memory> #include <optional> #include <string> @@ -26,31 +25,17 @@ class WritableEvent; namespace Service::Nvidia { class Module; -} +} // namespace Service::Nvidia + +namespace Service::VI { +class Display; +class Layer; +} // namespace Service::VI namespace Service::NVFlinger { class BufferQueue; -struct Layer { - Layer(u64 id, std::shared_ptr<BufferQueue> queue); - ~Layer(); - - u64 id; - std::shared_ptr<BufferQueue> buffer_queue; -}; - -struct Display { - Display(u64 id, std::string name); - ~Display(); - - u64 id; - std::string name; - - std::vector<Layer> layers; - Kernel::EventPair vsync_event; -}; - class NVFlinger final { public: explicit NVFlinger(Core::Timing::CoreTiming& core_timing); @@ -80,7 +65,10 @@ public: Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; /// Obtains a buffer queue identified by the ID. - std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const; + BufferQueue& FindBufferQueue(u32 id); + + /// Obtains a buffer queue identified by the ID. + const BufferQueue& FindBufferQueue(u32 id) const; /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when /// finished. @@ -88,27 +76,21 @@ public: private: /// Finds the display identified by the specified ID. - Display* FindDisplay(u64 display_id); + VI::Display* FindDisplay(u64 display_id); /// Finds the display identified by the specified ID. - const Display* FindDisplay(u64 display_id) const; + const VI::Display* FindDisplay(u64 display_id) const; /// Finds the layer identified by the specified ID in the desired display. - Layer* FindLayer(u64 display_id, u64 layer_id); + VI::Layer* FindLayer(u64 display_id, u64 layer_id); /// Finds the layer identified by the specified ID in the desired display. - const Layer* FindLayer(u64 display_id, u64 layer_id) const; + const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const; std::shared_ptr<Nvidia::Module> nvdrv; - std::array<Display, 5> displays{{ - {0, "Default"}, - {1, "External"}, - {2, "Edid"}, - {3, "Internal"}, - {4, "Null"}, - }}; - std::vector<std::shared_ptr<BufferQueue>> buffer_queues; + std::vector<VI::Display> displays; + std::vector<BufferQueue> buffer_queues; /// Id to use for the next layer that is created, this counter is shared among all displays. u64 next_layer_id = 1; diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index 117f87a45..00806b0ed 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp @@ -11,7 +11,6 @@ #include "core/hle/ipc.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/server_port.h" @@ -76,7 +75,8 @@ namespace Service { * Creates a function string for logging, complete with the name (or header code, depending * on what's passed in) the port name, and all the cmd_buff arguments. */ -[[maybe_unused]] static std::string MakeFunctionString(const char* name, const char* port_name, +[[maybe_unused]] static std::string MakeFunctionString(std::string_view name, + std::string_view port_name, const u32* cmd_buff) { // Number of params == bits 0-5 + bits 6-11 int num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F); @@ -158,9 +158,7 @@ void ServiceFrameworkBase::InvokeRequest(Kernel::HLERequestContext& ctx) { return ReportUnimplementedFunction(ctx, info); } - LOG_TRACE( - Service, "{}", - MakeFunctionString(info->name, GetServiceName().c_str(), ctx.CommandBuffer()).c_str()); + LOG_TRACE(Service, "{}", MakeFunctionString(info->name, GetServiceName(), ctx.CommandBuffer())); handler_invoker(this, info->handler_callback, ctx); } @@ -169,7 +167,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co case IPC::CommandType::Close: { IPC::ResponseBuilder rb{context, 2}; rb.Push(RESULT_SUCCESS); - return ResultCode(ErrorModule::HIPC, ErrorDescription::RemoteProcessDead); + return IPC::ERR_REMOTE_PROCESS_DEAD; } case IPC::CommandType::ControlWithContext: case IPC::CommandType::Control: { diff --git a/src/core/hle/service/sm/controller.cpp b/src/core/hle/service/sm/controller.cpp index 74da4d5e6..e9ee73710 100644 --- a/src/core/hle/service/sm/controller.cpp +++ b/src/core/hle/service/sm/controller.cpp @@ -30,7 +30,7 @@ void Controller::DuplicateSession(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; rb.Push(RESULT_SUCCESS); - Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->parent->client}; + Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->GetParent()->client}; rb.PushMoveObjects(session); LOG_DEBUG(Service, "session={}", session->GetObjectId()); diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h index bef25433e..b9d6381b4 100644 --- a/src/core/hle/service/sm/sm.h +++ b/src/core/hle/service/sm/sm.h @@ -67,7 +67,7 @@ public: if (port == nullptr) { return nullptr; } - return std::static_pointer_cast<T>(port->hle_handler); + return std::static_pointer_cast<T>(port->GetHLEHandler()); } void InvokeControlRequest(Kernel::HLERequestContext& context); diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp new file mode 100644 index 000000000..01d80311b --- /dev/null +++ b/src/core/hle/service/vi/display/vi_display.cpp @@ -0,0 +1,71 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <utility> + +#include <fmt/format.h> + +#include "common/assert.h" +#include "core/core.h" +#include "core/hle/kernel/readable_event.h" +#include "core/hle/service/vi/display/vi_display.h" +#include "core/hle/service/vi/layer/vi_layer.h" + +namespace Service::VI { + +Display::Display(u64 id, std::string name) : id{id}, name{std::move(name)} { + auto& kernel = Core::System::GetInstance().Kernel(); + vsync_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky, + fmt::format("Display VSync Event {}", id)); +} + +Display::~Display() = default; + +Layer& Display::GetLayer(std::size_t index) { + return layers.at(index); +} + +const Layer& Display::GetLayer(std::size_t index) const { + return layers.at(index); +} + +Kernel::SharedPtr<Kernel::ReadableEvent> Display::GetVSyncEvent() const { + return vsync_event.readable; +} + +void Display::SignalVSyncEvent() { + vsync_event.writable->Signal(); +} + +void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) { + // TODO(Subv): Support more than 1 layer. + ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); + + layers.emplace_back(id, buffer_queue); +} + +Layer* Display::FindLayer(u64 id) { + const auto itr = std::find_if(layers.begin(), layers.end(), + [id](const VI::Layer& layer) { return layer.GetID() == id; }); + + if (itr == layers.end()) { + return nullptr; + } + + return &*itr; +} + +const Layer* Display::FindLayer(u64 id) const { + const auto itr = std::find_if(layers.begin(), layers.end(), + [id](const VI::Layer& layer) { return layer.GetID() == id; }); + + if (itr == layers.end()) { + return nullptr; + } + + return &*itr; +} + +} // namespace Service::VI diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h new file mode 100644 index 000000000..2acd46ff8 --- /dev/null +++ b/src/core/hle/service/vi/display/vi_display.h @@ -0,0 +1,98 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <string> +#include <vector> + +#include "common/common_types.h" +#include "core/hle/kernel/writable_event.h" + +namespace Service::NVFlinger { +class BufferQueue; +} + +namespace Service::VI { + +class Layer; + +/// Represents a single display type +class Display { +public: + /// Constructs a display with a given unique ID and name. + /// + /// @param id The unique ID for this display. + /// @param name The name for this display. + /// + Display(u64 id, std::string name); + ~Display(); + + Display(const Display&) = delete; + Display& operator=(const Display&) = delete; + + Display(Display&&) = default; + Display& operator=(Display&&) = default; + + /// Gets the unique ID assigned to this display. + u64 GetID() const { + return id; + } + + /// Gets the name of this display + const std::string& GetName() const { + return name; + } + + /// Whether or not this display has any layers added to it. + bool HasLayers() const { + return !layers.empty(); + } + + /// Gets a layer for this display based off an index. + Layer& GetLayer(std::size_t index); + + /// Gets a layer for this display based off an index. + const Layer& GetLayer(std::size_t index) const; + + /// Gets the readable vsync event. + Kernel::SharedPtr<Kernel::ReadableEvent> GetVSyncEvent() const; + + /// Signals the internal vsync event. + void SignalVSyncEvent(); + + /// Creates and adds a layer to this display with the given ID. + /// + /// @param id The ID to assign to the created layer. + /// @param buffer_queue The buffer queue for the layer instance to use. + /// + void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue); + + /// Attempts to find a layer with the given ID. + /// + /// @param id The layer ID. + /// + /// @returns If found, the Layer instance with the given ID. + /// If not found, then nullptr is returned. + /// + Layer* FindLayer(u64 id); + + /// Attempts to find a layer with the given ID. + /// + /// @param id The layer ID. + /// + /// @returns If found, the Layer instance with the given ID. + /// If not found, then nullptr is returned. + /// + const Layer* FindLayer(u64 id) const; + +private: + u64 id; + std::string name; + + std::vector<Layer> layers; + Kernel::EventPair vsync_event; +}; + +} // namespace Service::VI diff --git a/src/core/hle/service/vi/layer/vi_layer.cpp b/src/core/hle/service/vi/layer/vi_layer.cpp new file mode 100644 index 000000000..954225c26 --- /dev/null +++ b/src/core/hle/service/vi/layer/vi_layer.cpp @@ -0,0 +1,13 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/service/vi/layer/vi_layer.h" + +namespace Service::VI { + +Layer::Layer(u64 id, NVFlinger::BufferQueue& queue) : id{id}, buffer_queue{queue} {} + +Layer::~Layer() = default; + +} // namespace Service::VI diff --git a/src/core/hle/service/vi/layer/vi_layer.h b/src/core/hle/service/vi/layer/vi_layer.h new file mode 100644 index 000000000..c6bfd01f6 --- /dev/null +++ b/src/core/hle/service/vi/layer/vi_layer.h @@ -0,0 +1,52 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Service::NVFlinger { +class BufferQueue; +} + +namespace Service::VI { + +/// Represents a single display layer. +class Layer { +public: + /// Constructs a layer with a given ID and buffer queue. + /// + /// @param id The ID to assign to this layer. + /// @param queue The buffer queue for this layer to use. + /// + Layer(u64 id, NVFlinger::BufferQueue& queue); + ~Layer(); + + Layer(const Layer&) = delete; + Layer& operator=(const Layer&) = delete; + + Layer(Layer&&) = default; + Layer& operator=(Layer&&) = delete; + + /// Gets the ID for this layer. + u64 GetID() const { + return id; + } + + /// Gets a reference to the buffer queue this layer is using. + NVFlinger::BufferQueue& GetBufferQueue() { + return buffer_queue; + } + + /// Gets a const reference to the buffer queue this layer is using. + const NVFlinger::BufferQueue& GetBufferQueue() const { + return buffer_queue; + } + +private: + u64 id; + NVFlinger::BufferQueue& buffer_queue; +}; + +} // namespace Service::VI diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index a317a2885..566cd6006 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -24,6 +24,7 @@ #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvflinger/buffer_queue.h" #include "core/hle/service/nvflinger/nvflinger.h" +#include "core/hle/service/service.h" #include "core/hle/service/vi/vi.h" #include "core/hle/service/vi/vi_m.h" #include "core/hle/service/vi/vi_s.h" @@ -33,6 +34,7 @@ namespace Service::VI { constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::VI, 1}; +constexpr ResultCode ERR_PERMISSION_DENIED{ErrorModule::VI, 5}; constexpr ResultCode ERR_UNSUPPORTED{ErrorModule::VI, 6}; constexpr ResultCode ERR_NOT_FOUND{ErrorModule::VI, 7}; @@ -420,7 +422,7 @@ public: u32_le fence_is_valid; std::array<Fence, 2> fences; - MathUtil::Rectangle<int> GetCropRect() const { + Common::Rectangle<int> GetCropRect() const { return {crop_left, crop_top, crop_right, crop_bottom}; } }; @@ -525,7 +527,7 @@ private: LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, static_cast<u32>(transaction), flags); - auto buffer_queue = nv_flinger->FindBufferQueue(id); + auto& buffer_queue = nv_flinger->FindBufferQueue(id); if (transaction == TransactionId::Connect) { IGBPConnectRequestParcel request{ctx.ReadBuffer()}; @@ -538,7 +540,7 @@ private: } else if (transaction == TransactionId::SetPreallocatedBuffer) { IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; - buffer_queue->SetPreallocatedBuffer(request.data.slot, request.buffer); + buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer); IGBPSetPreallocatedBufferResponseParcel response{}; ctx.WriteBuffer(response.Serialize()); @@ -546,7 +548,7 @@ private: IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; const u32 width{request.data.width}; const u32 height{request.data.height}; - std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); + std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); if (slot) { // Buffer is available @@ -559,8 +561,8 @@ private: [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { // Repeat TransactParcel DequeueBuffer when a buffer is available - auto buffer_queue = nv_flinger->FindBufferQueue(id); - std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); + auto& buffer_queue = nv_flinger->FindBufferQueue(id); + std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); IGBPDequeueBufferResponseParcel response{*slot}; @@ -568,28 +570,28 @@ private: IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); }, - buffer_queue->GetWritableBufferWaitEvent()); + buffer_queue.GetWritableBufferWaitEvent()); } } else if (transaction == TransactionId::RequestBuffer) { IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; - auto& buffer = buffer_queue->RequestBuffer(request.slot); + auto& buffer = buffer_queue.RequestBuffer(request.slot); IGBPRequestBufferResponseParcel response{buffer}; ctx.WriteBuffer(response.Serialize()); } else if (transaction == TransactionId::QueueBuffer) { IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; - buffer_queue->QueueBuffer(request.data.slot, request.data.transform, - request.data.GetCropRect()); + buffer_queue.QueueBuffer(request.data.slot, request.data.transform, + request.data.GetCropRect()); IGBPQueueBufferResponseParcel response{1280, 720}; ctx.WriteBuffer(response.Serialize()); } else if (transaction == TransactionId::Query) { IGBPQueryRequestParcel request{ctx.ReadBuffer()}; - u32 value = - buffer_queue->Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); + const u32 value = + buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); IGBPQueryResponseParcel response{value}; ctx.WriteBuffer(response.Serialize()); @@ -629,12 +631,12 @@ private: LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); - const auto buffer_queue = nv_flinger->FindBufferQueue(id); + const auto& buffer_queue = nv_flinger->FindBufferQueue(id); // TODO(Subv): Find out what this actually is. IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(RESULT_SUCCESS); - rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent()); + rb.PushCopyObjects(buffer_queue.GetBufferWaitEvent()); } std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; @@ -752,6 +754,7 @@ public: {1102, nullptr, "GetDisplayResolution"}, {2010, &IManagerDisplayService::CreateManagedLayer, "CreateManagedLayer"}, {2011, nullptr, "DestroyManagedLayer"}, + {2012, nullptr, "CreateStrayLayer"}, {2050, nullptr, "CreateIndirectLayer"}, {2051, nullptr, "DestroyIndirectLayer"}, {2052, nullptr, "CreateIndirectProducerEndPoint"}, @@ -1202,26 +1205,40 @@ IApplicationDisplayService::IApplicationDisplayService( RegisterHandlers(functions); } -Module::Interface::Interface(std::shared_ptr<Module> module, const char* name, - std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) - : ServiceFramework(name), module(std::move(module)), nv_flinger(std::move(nv_flinger)) {} +static bool IsValidServiceAccess(Permission permission, Policy policy) { + if (permission == Permission::User) { + return policy == Policy::User; + } + + if (permission == Permission::System || permission == Permission::Manager) { + return policy == Policy::User || policy == Policy::Compositor; + } -Module::Interface::~Interface() = default; + return false; +} -void Module::Interface::GetDisplayService(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_VI, "(STUBBED) called"); +void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, + std::shared_ptr<NVFlinger::NVFlinger> nv_flinger, + Permission permission) { + IPC::RequestParser rp{ctx}; + const auto policy = rp.PopEnum<Policy>(); + + if (!IsValidServiceAccess(permission, policy)) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ERR_PERMISSION_DENIED); + return; + } IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<IApplicationDisplayService>(nv_flinger); + rb.PushIpcInterface<IApplicationDisplayService>(std::move(nv_flinger)); } void InstallInterfaces(SM::ServiceManager& service_manager, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) { - auto module = std::make_shared<Module>(); - std::make_shared<VI_M>(module, nv_flinger)->InstallAsService(service_manager); - std::make_shared<VI_S>(module, nv_flinger)->InstallAsService(service_manager); - std::make_shared<VI_U>(module, nv_flinger)->InstallAsService(service_manager); + std::make_shared<VI_M>(nv_flinger)->InstallAsService(service_manager); + std::make_shared<VI_S>(nv_flinger)->InstallAsService(service_manager); + std::make_shared<VI_U>(nv_flinger)->InstallAsService(service_manager); } } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi.h b/src/core/hle/service/vi/vi.h index e3963502a..6b66f8b81 100644 --- a/src/core/hle/service/vi/vi.h +++ b/src/core/hle/service/vi/vi.h @@ -4,12 +4,21 @@ #pragma once -#include "core/hle/service/service.h" +#include <memory> +#include "common/common_types.h" + +namespace Kernel { +class HLERequestContext; +} namespace Service::NVFlinger { class NVFlinger; } +namespace Service::SM { +class ServiceManager; +} + namespace Service::VI { enum class DisplayResolution : u32 { @@ -19,22 +28,25 @@ enum class DisplayResolution : u32 { UndockedHeight = 720, }; -class Module final { -public: - class Interface : public ServiceFramework<Interface> { - public: - explicit Interface(std::shared_ptr<Module> module, const char* name, - std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); - ~Interface() override; - - void GetDisplayService(Kernel::HLERequestContext& ctx); +/// Permission level for a particular VI service instance +enum class Permission { + User, + System, + Manager, +}; - protected: - std::shared_ptr<Module> module; - std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; - }; +/// A policy type that may be requested via GetDisplayService and +/// GetDisplayServiceWithProxyNameExchange +enum class Policy { + User, + Compositor, }; +namespace detail { +void GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, + std::shared_ptr<NVFlinger::NVFlinger> nv_flinger, Permission permission); +} // namespace detail + /// Registers all VI services with the specified service manager. void InstallInterfaces(SM::ServiceManager& service_manager, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); diff --git a/src/core/hle/service/vi/vi_m.cpp b/src/core/hle/service/vi/vi_m.cpp index 207c06b16..06070087f 100644 --- a/src/core/hle/service/vi/vi_m.cpp +++ b/src/core/hle/service/vi/vi_m.cpp @@ -2,12 +2,14 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/logging/log.h" +#include "core/hle/service/vi/vi.h" #include "core/hle/service/vi/vi_m.h" namespace Service::VI { -VI_M::VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) - : Module::Interface(std::move(module), "vi:m", std::move(nv_flinger)) { +VI_M::VI_M(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) + : ServiceFramework{"vi:m"}, nv_flinger{std::move(nv_flinger)} { static const FunctionInfo functions[] = { {2, &VI_M::GetDisplayService, "GetDisplayService"}, {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, @@ -17,4 +19,10 @@ VI_M::VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> VI_M::~VI_M() = default; +void VI_M::GetDisplayService(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_VI, "called"); + + detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::Manager); +} + } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi_m.h b/src/core/hle/service/vi/vi_m.h index 487d58d50..290e06689 100644 --- a/src/core/hle/service/vi/vi_m.h +++ b/src/core/hle/service/vi/vi_m.h @@ -4,14 +4,27 @@ #pragma once -#include "core/hle/service/vi/vi.h" +#include "core/hle/service/service.h" + +namespace Kernel { +class HLERequestContext; +} + +namespace Service::NVFlinger { +class NVFlinger; +} namespace Service::VI { -class VI_M final : public Module::Interface { +class VI_M final : public ServiceFramework<VI_M> { public: - explicit VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); + explicit VI_M(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); ~VI_M() override; + +private: + void GetDisplayService(Kernel::HLERequestContext& ctx); + + std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; }; } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi_s.cpp b/src/core/hle/service/vi/vi_s.cpp index 920e6a1f6..57c596cc4 100644 --- a/src/core/hle/service/vi/vi_s.cpp +++ b/src/core/hle/service/vi/vi_s.cpp @@ -2,12 +2,14 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/logging/log.h" +#include "core/hle/service/vi/vi.h" #include "core/hle/service/vi/vi_s.h" namespace Service::VI { -VI_S::VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) - : Module::Interface(std::move(module), "vi:s", std::move(nv_flinger)) { +VI_S::VI_S(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) + : ServiceFramework{"vi:s"}, nv_flinger{std::move(nv_flinger)} { static const FunctionInfo functions[] = { {1, &VI_S::GetDisplayService, "GetDisplayService"}, {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, @@ -17,4 +19,10 @@ VI_S::VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> VI_S::~VI_S() = default; +void VI_S::GetDisplayService(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_VI, "called"); + + detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::System); +} + } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi_s.h b/src/core/hle/service/vi/vi_s.h index bbc31148f..47804dc0b 100644 --- a/src/core/hle/service/vi/vi_s.h +++ b/src/core/hle/service/vi/vi_s.h @@ -4,14 +4,27 @@ #pragma once -#include "core/hle/service/vi/vi.h" +#include "core/hle/service/service.h" + +namespace Kernel { +class HLERequestContext; +} + +namespace Service::NVFlinger { +class NVFlinger; +} namespace Service::VI { -class VI_S final : public Module::Interface { +class VI_S final : public ServiceFramework<VI_S> { public: - explicit VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); + explicit VI_S(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); ~VI_S() override; + +private: + void GetDisplayService(Kernel::HLERequestContext& ctx); + + std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; }; } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi_u.cpp b/src/core/hle/service/vi/vi_u.cpp index d81e410d6..9d5ceb608 100644 --- a/src/core/hle/service/vi/vi_u.cpp +++ b/src/core/hle/service/vi/vi_u.cpp @@ -2,12 +2,14 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/logging/log.h" +#include "core/hle/service/vi/vi.h" #include "core/hle/service/vi/vi_u.h" namespace Service::VI { -VI_U::VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) - : Module::Interface(std::move(module), "vi:u", std::move(nv_flinger)) { +VI_U::VI_U(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) + : ServiceFramework{"vi:u"}, nv_flinger{std::move(nv_flinger)} { static const FunctionInfo functions[] = { {0, &VI_U::GetDisplayService, "GetDisplayService"}, }; @@ -16,4 +18,10 @@ VI_U::VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> VI_U::~VI_U() = default; +void VI_U::GetDisplayService(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_VI, "called"); + + detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::User); +} + } // namespace Service::VI diff --git a/src/core/hle/service/vi/vi_u.h b/src/core/hle/service/vi/vi_u.h index b92f28c92..19bdb73b0 100644 --- a/src/core/hle/service/vi/vi_u.h +++ b/src/core/hle/service/vi/vi_u.h @@ -4,14 +4,27 @@ #pragma once -#include "core/hle/service/vi/vi.h" +#include "core/hle/service/service.h" + +namespace Kernel { +class HLERequestContext; +} + +namespace Service::NVFlinger { +class NVFlinger; +} namespace Service::VI { -class VI_U final : public Module::Interface { +class VI_U final : public ServiceFramework<VI_U> { public: - explicit VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); + explicit VI_U(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); ~VI_U() override; + +private: + void GetDisplayService(Kernel::HLERequestContext& ctx); + + std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; }; } // namespace Service::VI diff --git a/src/core/memory.cpp b/src/core/memory.cpp index e9166dbd9..4fde53033 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -67,19 +67,27 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, (base + size) * PAGE_SIZE); - RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, - FlushMode::FlushAndInvalidate); + // During boot, current_page_table might not be set yet, in which case we need not flush + if (current_page_table) { + RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, + FlushMode::FlushAndInvalidate); + } VAddr end = base + size; - while (base != end) { - ASSERT_MSG(base < page_table.pointers.size(), "out of range mapping at {:016X}", base); + ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", + base + page_table.pointers.size()); + + std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); - page_table.attributes[base] = type; - page_table.pointers[base] = memory; + if (memory == nullptr) { + std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); + } else { + while (base != end) { + page_table.pointers[base] = memory; - base += 1; - if (memory != nullptr) + base += 1; memory += PAGE_SIZE; + } } } @@ -166,9 +174,6 @@ T Read(const VAddr vaddr) { return value; } - // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); - PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; switch (type) { case PageType::Unmapped: @@ -199,9 +204,6 @@ void Write(const VAddr vaddr, const T data) { return; } - // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); - PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; switch (type) { case PageType::Unmapped: @@ -357,16 +359,16 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { const VAddr overlap_end = std::min(end, region_end); const VAddr overlap_size = overlap_end - overlap_start; - auto& rasterizer = system_instance.Renderer().Rasterizer(); + auto& gpu = system_instance.GPU(); switch (mode) { case FlushMode::Flush: - rasterizer.FlushRegion(overlap_start, overlap_size); + gpu.FlushRegion(ToCacheAddr(GetPointer(overlap_start)), overlap_size); break; case FlushMode::Invalidate: - rasterizer.InvalidateRegion(overlap_start, overlap_size); + gpu.InvalidateRegion(ToCacheAddr(GetPointer(overlap_start)), overlap_size); break; case FlushMode::FlushAndInvalidate: - rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size); + gpu.FlushAndInvalidateRegion(ToCacheAddr(GetPointer(overlap_start)), overlap_size); break; } }; diff --git a/src/core/settings.cpp b/src/core/settings.cpp index 2e232e1e7..6dd3139cc 100644 --- a/src/core/settings.cpp +++ b/src/core/settings.cpp @@ -91,7 +91,10 @@ void LogSettings() { LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor); LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); LogSetting("Renderer_FrameLimit", Settings::values.frame_limit); + LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); LogSetting("Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); + LogSetting("Renderer_UseAsynchronousGpuEmulation", + Settings::values.use_asynchronous_gpu_emulation); LogSetting("Audio_OutputEngine", Settings::values.sink_id); LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); LogSetting("Audio_OutputDevice", Settings::values.audio_device_id); diff --git a/src/core/settings.h b/src/core/settings.h index 7e76e0466..cdfb2f742 100644 --- a/src/core/settings.h +++ b/src/core/settings.h @@ -393,6 +393,7 @@ struct Values { u16 frame_limit; bool use_disk_shader_cache; bool use_accurate_gpu_emulation; + bool use_asynchronous_gpu_emulation; float bg_red; float bg_green; diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index 58dfcc4df..e1db06811 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp @@ -162,6 +162,8 @@ TelemetrySession::TelemetrySession() { Settings::values.use_disk_shader_cache); AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); + AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAsynchronousGpuEmulation", + Settings::values.use_asynchronous_gpu_emulation); AddField(Telemetry::FieldType::UserConfig, "System_UseDockedMode", Settings::values.use_docked_mode); } diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt index 1c7db28c0..5b4e032bd 100644 --- a/src/input_common/CMakeLists.txt +++ b/src/input_common/CMakeLists.txt @@ -7,15 +7,18 @@ add_library(input_common STATIC main.h motion_emu.cpp motion_emu.h - - $<$<BOOL:${SDL2_FOUND}>:sdl/sdl.cpp sdl/sdl.h> + sdl/sdl.cpp + sdl/sdl.h ) -create_target_directory_groups(input_common) - -target_link_libraries(input_common PUBLIC core PRIVATE common) - if(SDL2_FOUND) + target_sources(input_common PRIVATE + sdl/sdl_impl.cpp + sdl/sdl_impl.h + ) target_link_libraries(input_common PRIVATE SDL2) target_compile_definitions(input_common PRIVATE HAVE_SDL2) endif() + +create_target_directory_groups(input_common) +target_link_libraries(input_common PUBLIC core PRIVATE common) diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp index 37f572853..8e66c1b15 100644 --- a/src/input_common/main.cpp +++ b/src/input_common/main.cpp @@ -17,10 +17,7 @@ namespace InputCommon { static std::shared_ptr<Keyboard> keyboard; static std::shared_ptr<MotionEmu> motion_emu; - -#ifdef HAVE_SDL2 -static std::thread poll_thread; -#endif +static std::unique_ptr<SDL::State> sdl; void Init() { keyboard = std::make_shared<Keyboard>(); @@ -30,15 +27,7 @@ void Init() { motion_emu = std::make_shared<MotionEmu>(); Input::RegisterFactory<Input::MotionDevice>("motion_emu", motion_emu); -#ifdef HAVE_SDL2 - SDL::Init(); -#endif -} - -void StartJoystickEventHandler() { -#ifdef HAVE_SDL2 - poll_thread = std::thread(SDL::PollLoop); -#endif + sdl = SDL::Init(); } void Shutdown() { @@ -47,11 +36,7 @@ void Shutdown() { Input::UnregisterFactory<Input::AnalogDevice>("analog_from_button"); Input::UnregisterFactory<Input::MotionDevice>("motion_emu"); motion_emu.reset(); - -#ifdef HAVE_SDL2 - SDL::Shutdown(); - poll_thread.join(); -#endif + sdl.reset(); } Keyboard* GetKeyboard() { @@ -88,7 +73,7 @@ namespace Polling { std::vector<std::unique_ptr<DevicePoller>> GetPollers(DeviceType type) { #ifdef HAVE_SDL2 - return SDL::Polling::GetPollers(type); + return sdl->GetPollers(type); #else return {}; #endif diff --git a/src/input_common/main.h b/src/input_common/main.h index 9eb13106e..77a0ce90b 100644 --- a/src/input_common/main.h +++ b/src/input_common/main.h @@ -20,8 +20,6 @@ void Init(); /// Deregisters all built-in input device factories and shuts them down. void Shutdown(); -void StartJoystickEventHandler(); - class Keyboard; /// Gets the keyboard button device factory. diff --git a/src/input_common/motion_emu.cpp b/src/input_common/motion_emu.cpp index 9570c060e..6d96d4019 100644 --- a/src/input_common/motion_emu.cpp +++ b/src/input_common/motion_emu.cpp @@ -32,12 +32,12 @@ public: } void BeginTilt(int x, int y) { - mouse_origin = Math::MakeVec(x, y); + mouse_origin = Common::MakeVec(x, y); is_tilting = true; } void Tilt(int x, int y) { - auto mouse_move = Math::MakeVec(x, y) - mouse_origin; + auto mouse_move = Common::MakeVec(x, y) - mouse_origin; if (is_tilting) { std::lock_guard<std::mutex> guard(tilt_mutex); if (mouse_move.x == 0 && mouse_move.y == 0) { @@ -45,7 +45,7 @@ public: } else { tilt_direction = mouse_move.Cast<float>(); tilt_angle = - std::clamp(tilt_direction.Normalize() * sensitivity, 0.0f, MathUtil::PI * 0.5f); + std::clamp(tilt_direction.Normalize() * sensitivity, 0.0f, Common::PI * 0.5f); } } } @@ -56,7 +56,7 @@ public: is_tilting = false; } - std::tuple<Math::Vec3<float>, Math::Vec3<float>> GetStatus() { + std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() { std::lock_guard<std::mutex> guard(status_mutex); return status; } @@ -66,17 +66,17 @@ private: const std::chrono::steady_clock::duration update_duration; const float sensitivity; - Math::Vec2<int> mouse_origin; + Common::Vec2<int> mouse_origin; std::mutex tilt_mutex; - Math::Vec2<float> tilt_direction; + Common::Vec2<float> tilt_direction; float tilt_angle = 0; bool is_tilting = false; Common::Event shutdown_event; - std::tuple<Math::Vec3<float>, Math::Vec3<float>> status; + std::tuple<Common::Vec3<float>, Common::Vec3<float>> status; std::mutex status_mutex; // Note: always keep the thread declaration at the end so that other objects are initialized @@ -85,8 +85,8 @@ private: void MotionEmuThread() { auto update_time = std::chrono::steady_clock::now(); - Math::Quaternion<float> q = MakeQuaternion(Math::Vec3<float>(), 0); - Math::Quaternion<float> old_q; + Common::Quaternion<float> q = Common::MakeQuaternion(Common::Vec3<float>(), 0); + Common::Quaternion<float> old_q; while (!shutdown_event.WaitUntil(update_time)) { update_time += update_duration; @@ -96,18 +96,18 @@ private: std::lock_guard<std::mutex> guard(tilt_mutex); // Find the quaternion describing current 3DS tilting - q = MakeQuaternion(Math::MakeVec(-tilt_direction.y, 0.0f, tilt_direction.x), - tilt_angle); + q = Common::MakeQuaternion( + Common::MakeVec(-tilt_direction.y, 0.0f, tilt_direction.x), tilt_angle); } auto inv_q = q.Inverse(); // Set the gravity vector in world space - auto gravity = Math::MakeVec(0.0f, -1.0f, 0.0f); + auto gravity = Common::MakeVec(0.0f, -1.0f, 0.0f); // Find the angular rate vector in world space auto angular_rate = ((q - old_q) * inv_q).xyz * 2; - angular_rate *= 1000 / update_millisecond / MathUtil::PI * 180; + angular_rate *= 1000 / update_millisecond / Common::PI * 180; // Transform the two vectors from world space to 3DS space gravity = QuaternionRotate(inv_q, gravity); @@ -131,7 +131,7 @@ public: device = std::make_shared<MotionEmuDevice>(update_millisecond, sensitivity); } - std::tuple<Math::Vec3<float>, Math::Vec3<float>> GetStatus() const override { + std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() const override { return device->GetStatus(); } diff --git a/src/input_common/sdl/sdl.cpp b/src/input_common/sdl/sdl.cpp index faf3c1fa3..644db3448 100644 --- a/src/input_common/sdl/sdl.cpp +++ b/src/input_common/sdl/sdl.cpp @@ -1,631 +1,19 @@ -// Copyright 2017 Citra Emulator Project +// Copyright 2018 Citra Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <algorithm> -#include <atomic> -#include <cmath> -#include <functional> -#include <iterator> -#include <mutex> -#include <string> -#include <thread> -#include <tuple> -#include <unordered_map> -#include <utility> -#include <vector> -#include <SDL.h> -#include "common/assert.h" -#include "common/logging/log.h" -#include "common/math_util.h" -#include "common/param_package.h" -#include "common/threadsafe_queue.h" -#include "input_common/main.h" #include "input_common/sdl/sdl.h" +#ifdef HAVE_SDL2 +#include "input_common/sdl/sdl_impl.h" +#endif -namespace InputCommon { +namespace InputCommon::SDL { -namespace SDL { - -class SDLJoystick; -class SDLButtonFactory; -class SDLAnalogFactory; - -/// Map of GUID of a list of corresponding virtual Joysticks -static std::unordered_map<std::string, std::vector<std::shared_ptr<SDLJoystick>>> joystick_map; -static std::mutex joystick_map_mutex; - -static std::shared_ptr<SDLButtonFactory> button_factory; -static std::shared_ptr<SDLAnalogFactory> analog_factory; - -/// Used by the Pollers during config -static std::atomic<bool> polling; -static Common::SPSCQueue<SDL_Event> event_queue; - -static std::atomic<bool> initialized = false; - -static std::string GetGUID(SDL_Joystick* joystick) { - SDL_JoystickGUID guid = SDL_JoystickGetGUID(joystick); - char guid_str[33]; - SDL_JoystickGetGUIDString(guid, guid_str, sizeof(guid_str)); - return guid_str; -} - -class SDLJoystick { -public: - SDLJoystick(std::string guid_, int port_, SDL_Joystick* joystick, - decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) - : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} - - void SetButton(int button, bool value) { - std::lock_guard<std::mutex> lock(mutex); - state.buttons[button] = value; - } - - bool GetButton(int button) const { - std::lock_guard<std::mutex> lock(mutex); - return state.buttons.at(button); - } - - void SetAxis(int axis, Sint16 value) { - std::lock_guard<std::mutex> lock(mutex); - state.axes[axis] = value; - } - - float GetAxis(int axis) const { - std::lock_guard<std::mutex> lock(mutex); - return state.axes.at(axis) / 32767.0f; - } - - std::tuple<float, float> GetAnalog(int axis_x, int axis_y) const { - float x = GetAxis(axis_x); - float y = GetAxis(axis_y); - y = -y; // 3DS uses an y-axis inverse from SDL - - // Make sure the coordinates are in the unit circle, - // otherwise normalize it. - float r = x * x + y * y; - if (r > 1.0f) { - r = std::sqrt(r); - x /= r; - y /= r; - } - - return std::make_tuple(x, y); - } - - void SetHat(int hat, Uint8 direction) { - std::lock_guard<std::mutex> lock(mutex); - state.hats[hat] = direction; - } - - bool GetHatDirection(int hat, Uint8 direction) const { - std::lock_guard<std::mutex> lock(mutex); - return (state.hats.at(hat) & direction) != 0; - } - /** - * The guid of the joystick - */ - const std::string& GetGUID() const { - return guid; - } - - /** - * The number of joystick from the same type that were connected before this joystick - */ - int GetPort() const { - return port; - } - - SDL_Joystick* GetSDLJoystick() const { - return sdl_joystick.get(); - } - - void SetSDLJoystick(SDL_Joystick* joystick, - decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) { - sdl_joystick = - std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)>(joystick, deleter); - } - -private: - struct State { - std::unordered_map<int, bool> buttons; - std::unordered_map<int, Sint16> axes; - std::unordered_map<int, Uint8> hats; - } state; - std::string guid; - int port; - std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)> sdl_joystick; - mutable std::mutex mutex; -}; - -/** - * Get the nth joystick with the corresponding GUID - */ -static std::shared_ptr<SDLJoystick> GetSDLJoystickByGUID(const std::string& guid, int port) { - std::lock_guard<std::mutex> lock(joystick_map_mutex); - const auto it = joystick_map.find(guid); - if (it != joystick_map.end()) { - while (it->second.size() <= port) { - auto joystick = std::make_shared<SDLJoystick>(guid, it->second.size(), nullptr, - [](SDL_Joystick*) {}); - it->second.emplace_back(std::move(joystick)); - } - return it->second[port]; - } - auto joystick = std::make_shared<SDLJoystick>(guid, 0, nullptr, [](SDL_Joystick*) {}); - return joystick_map[guid].emplace_back(std::move(joystick)); -} - -/** - * Check how many identical joysticks (by guid) were connected before the one with sdl_id and so tie - * it to a SDLJoystick with the same guid and that port - */ -static std::shared_ptr<SDLJoystick> GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { - std::lock_guard<std::mutex> lock(joystick_map_mutex); - auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); - const std::string guid = GetGUID(sdl_joystick); - auto map_it = joystick_map.find(guid); - if (map_it != joystick_map.end()) { - auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), - [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { - return sdl_joystick == joystick->GetSDLJoystick(); - }); - if (vec_it != map_it->second.end()) { - // This is the common case: There is already an existing SDL_Joystick maped to a - // SDLJoystick. return the SDLJoystick - return *vec_it; - } - // Search for a SDLJoystick without a mapped SDL_Joystick... - auto nullptr_it = std::find_if(map_it->second.begin(), map_it->second.end(), - [](const std::shared_ptr<SDLJoystick>& joystick) { - return !joystick->GetSDLJoystick(); - }); - if (nullptr_it != map_it->second.end()) { - // ... and map it - (*nullptr_it)->SetSDLJoystick(sdl_joystick); - return *nullptr_it; - } - // There is no SDLJoystick without a mapped SDL_Joystick - // Create a new SDLJoystick - auto joystick = std::make_shared<SDLJoystick>(guid, map_it->second.size(), sdl_joystick); - return map_it->second.emplace_back(std::move(joystick)); - } - auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); - return joystick_map[guid].emplace_back(std::move(joystick)); -} - -void InitJoystick(int joystick_index) { - std::lock_guard<std::mutex> lock(joystick_map_mutex); - SDL_Joystick* sdl_joystick = SDL_JoystickOpen(joystick_index); - if (!sdl_joystick) { - LOG_ERROR(Input, "failed to open joystick {}", joystick_index); - return; - } - std::string guid = GetGUID(sdl_joystick); - if (joystick_map.find(guid) == joystick_map.end()) { - auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); - joystick_map[guid].emplace_back(std::move(joystick)); - return; - } - auto& joystick_guid_list = joystick_map[guid]; - const auto it = std::find_if( - joystick_guid_list.begin(), joystick_guid_list.end(), - [](const std::shared_ptr<SDLJoystick>& joystick) { return !joystick->GetSDLJoystick(); }); - if (it != joystick_guid_list.end()) { - (*it)->SetSDLJoystick(sdl_joystick); - return; - } - auto joystick = std::make_shared<SDLJoystick>(guid, joystick_guid_list.size(), sdl_joystick); - joystick_guid_list.emplace_back(std::move(joystick)); -} - -void CloseJoystick(SDL_Joystick* sdl_joystick) { - std::lock_guard<std::mutex> lock(joystick_map_mutex); - std::string guid = GetGUID(sdl_joystick); - // This call to guid is save since the joystick is guranteed to be in that map - auto& joystick_guid_list = joystick_map[guid]; - const auto joystick_it = - std::find_if(joystick_guid_list.begin(), joystick_guid_list.end(), - [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { - return joystick->GetSDLJoystick() == sdl_joystick; - }); - (*joystick_it)->SetSDLJoystick(nullptr, [](SDL_Joystick*) {}); -} - -void HandleGameControllerEvent(const SDL_Event& event) { - switch (event.type) { - case SDL_JOYBUTTONUP: { - auto joystick = GetSDLJoystickBySDLID(event.jbutton.which); - if (joystick) { - joystick->SetButton(event.jbutton.button, false); - } - break; - } - case SDL_JOYBUTTONDOWN: { - auto joystick = GetSDLJoystickBySDLID(event.jbutton.which); - if (joystick) { - joystick->SetButton(event.jbutton.button, true); - } - break; - } - case SDL_JOYHATMOTION: { - auto joystick = GetSDLJoystickBySDLID(event.jhat.which); - if (joystick) { - joystick->SetHat(event.jhat.hat, event.jhat.value); - } - break; - } - case SDL_JOYAXISMOTION: { - auto joystick = GetSDLJoystickBySDLID(event.jaxis.which); - if (joystick) { - joystick->SetAxis(event.jaxis.axis, event.jaxis.value); - } - break; - } - case SDL_JOYDEVICEREMOVED: - LOG_DEBUG(Input, "Controller removed with Instance_ID {}", event.jdevice.which); - CloseJoystick(SDL_JoystickFromInstanceID(event.jdevice.which)); - break; - case SDL_JOYDEVICEADDED: - LOG_DEBUG(Input, "Controller connected with device index {}", event.jdevice.which); - InitJoystick(event.jdevice.which); - break; - } -} - -void CloseSDLJoysticks() { - std::lock_guard<std::mutex> lock(joystick_map_mutex); - joystick_map.clear(); -} - -void PollLoop() { - if (SDL_Init(SDL_INIT_JOYSTICK) < 0) { - LOG_CRITICAL(Input, "SDL_Init(SDL_INIT_JOYSTICK) failed with: {}", SDL_GetError()); - return; - } - - SDL_Event event; - while (initialized) { - // Wait for 10 ms or until an event happens - if (SDL_WaitEventTimeout(&event, 10)) { - // Don't handle the event if we are configuring - if (polling) { - event_queue.Push(event); - } else { - HandleGameControllerEvent(event); - } - } - } - CloseSDLJoysticks(); - SDL_QuitSubSystem(SDL_INIT_JOYSTICK); -} - -class SDLButton final : public Input::ButtonDevice { -public: - explicit SDLButton(std::shared_ptr<SDLJoystick> joystick_, int button_) - : joystick(std::move(joystick_)), button(button_) {} - - bool GetStatus() const override { - return joystick->GetButton(button); - } - -private: - std::shared_ptr<SDLJoystick> joystick; - int button; -}; - -class SDLDirectionButton final : public Input::ButtonDevice { -public: - explicit SDLDirectionButton(std::shared_ptr<SDLJoystick> joystick_, int hat_, Uint8 direction_) - : joystick(std::move(joystick_)), hat(hat_), direction(direction_) {} - - bool GetStatus() const override { - return joystick->GetHatDirection(hat, direction); - } - -private: - std::shared_ptr<SDLJoystick> joystick; - int hat; - Uint8 direction; -}; - -class SDLAxisButton final : public Input::ButtonDevice { -public: - explicit SDLAxisButton(std::shared_ptr<SDLJoystick> joystick_, int axis_, float threshold_, - bool trigger_if_greater_) - : joystick(std::move(joystick_)), axis(axis_), threshold(threshold_), - trigger_if_greater(trigger_if_greater_) {} - - bool GetStatus() const override { - float axis_value = joystick->GetAxis(axis); - if (trigger_if_greater) - return axis_value > threshold; - return axis_value < threshold; - } - -private: - std::shared_ptr<SDLJoystick> joystick; - int axis; - float threshold; - bool trigger_if_greater; -}; - -class SDLAnalog final : public Input::AnalogDevice { -public: - SDLAnalog(std::shared_ptr<SDLJoystick> joystick_, int axis_x_, int axis_y_) - : joystick(std::move(joystick_)), axis_x(axis_x_), axis_y(axis_y_) {} - - std::tuple<float, float> GetStatus() const override { - return joystick->GetAnalog(axis_x, axis_y); - } - -private: - std::shared_ptr<SDLJoystick> joystick; - int axis_x; - int axis_y; -}; - -/// A button device factory that creates button devices from SDL joystick -class SDLButtonFactory final : public Input::Factory<Input::ButtonDevice> { -public: - /** - * Creates a button device from a joystick button - * @param params contains parameters for creating the device: - * - "guid": the guid of the joystick to bind - * - "port": the nth joystick of the same type to bind - * - "button"(optional): the index of the button to bind - * - "hat"(optional): the index of the hat to bind as direction buttons - * - "axis"(optional): the index of the axis to bind - * - "direction"(only used for hat): the direction name of the hat to bind. Can be "up", - * "down", "left" or "right" - * - "threshold"(only used for axis): a float value in (-1.0, 1.0) which the button is - * triggered if the axis value crosses - * - "direction"(only used for axis): "+" means the button is triggered when the axis - * value is greater than the threshold; "-" means the button is triggered when the axis - * value is smaller than the threshold - */ - std::unique_ptr<Input::ButtonDevice> Create(const Common::ParamPackage& params) override { - const std::string guid = params.Get("guid", "0"); - const int port = params.Get("port", 0); - - auto joystick = GetSDLJoystickByGUID(guid, port); - - if (params.Has("hat")) { - const int hat = params.Get("hat", 0); - const std::string direction_name = params.Get("direction", ""); - Uint8 direction; - if (direction_name == "up") { - direction = SDL_HAT_UP; - } else if (direction_name == "down") { - direction = SDL_HAT_DOWN; - } else if (direction_name == "left") { - direction = SDL_HAT_LEFT; - } else if (direction_name == "right") { - direction = SDL_HAT_RIGHT; - } else { - direction = 0; - } - // This is necessary so accessing GetHat with hat won't crash - joystick->SetHat(hat, SDL_HAT_CENTERED); - return std::make_unique<SDLDirectionButton>(joystick, hat, direction); - } - - if (params.Has("axis")) { - const int axis = params.Get("axis", 0); - const float threshold = params.Get("threshold", 0.5f); - const std::string direction_name = params.Get("direction", ""); - bool trigger_if_greater; - if (direction_name == "+") { - trigger_if_greater = true; - } else if (direction_name == "-") { - trigger_if_greater = false; - } else { - trigger_if_greater = true; - LOG_ERROR(Input, "Unknown direction '{}'", direction_name); - } - // This is necessary so accessing GetAxis with axis won't crash - joystick->SetAxis(axis, 0); - return std::make_unique<SDLAxisButton>(joystick, axis, threshold, trigger_if_greater); - } - - const int button = params.Get("button", 0); - // This is necessary so accessing GetButton with button won't crash - joystick->SetButton(button, false); - return std::make_unique<SDLButton>(joystick, button); - } -}; - -/// An analog device factory that creates analog devices from SDL joystick -class SDLAnalogFactory final : public Input::Factory<Input::AnalogDevice> { -public: - /** - * Creates analog device from joystick axes - * @param params contains parameters for creating the device: - * - "guid": the guid of the joystick to bind - * - "port": the nth joystick of the same type - * - "axis_x": the index of the axis to be bind as x-axis - * - "axis_y": the index of the axis to be bind as y-axis - */ - std::unique_ptr<Input::AnalogDevice> Create(const Common::ParamPackage& params) override { - const std::string guid = params.Get("guid", "0"); - const int port = params.Get("port", 0); - const int axis_x = params.Get("axis_x", 0); - const int axis_y = params.Get("axis_y", 1); - - auto joystick = GetSDLJoystickByGUID(guid, port); - - // This is necessary so accessing GetAxis with axis_x and axis_y won't crash - joystick->SetAxis(axis_x, 0); - joystick->SetAxis(axis_y, 0); - return std::make_unique<SDLAnalog>(joystick, axis_x, axis_y); - } -}; - -void Init() { - using namespace Input; - RegisterFactory<ButtonDevice>("sdl", std::make_shared<SDLButtonFactory>()); - RegisterFactory<AnalogDevice>("sdl", std::make_shared<SDLAnalogFactory>()); - polling = false; - initialized = true; -} - -void Shutdown() { - if (initialized) { - using namespace Input; - UnregisterFactory<ButtonDevice>("sdl"); - UnregisterFactory<AnalogDevice>("sdl"); - initialized = false; - } -} - -Common::ParamPackage SDLEventToButtonParamPackage(const SDL_Event& event) { - Common::ParamPackage params({{"engine", "sdl"}}); - switch (event.type) { - case SDL_JOYAXISMOTION: { - auto joystick = GetSDLJoystickBySDLID(event.jaxis.which); - params.Set("port", joystick->GetPort()); - params.Set("guid", joystick->GetGUID()); - params.Set("axis", event.jaxis.axis); - if (event.jaxis.value > 0) { - params.Set("direction", "+"); - params.Set("threshold", "0.5"); - } else { - params.Set("direction", "-"); - params.Set("threshold", "-0.5"); - } - break; - } - case SDL_JOYBUTTONUP: { - auto joystick = GetSDLJoystickBySDLID(event.jbutton.which); - params.Set("port", joystick->GetPort()); - params.Set("guid", joystick->GetGUID()); - params.Set("button", event.jbutton.button); - break; - } - case SDL_JOYHATMOTION: { - auto joystick = GetSDLJoystickBySDLID(event.jhat.which); - params.Set("port", joystick->GetPort()); - params.Set("guid", joystick->GetGUID()); - params.Set("hat", event.jhat.hat); - switch (event.jhat.value) { - case SDL_HAT_UP: - params.Set("direction", "up"); - break; - case SDL_HAT_DOWN: - params.Set("direction", "down"); - break; - case SDL_HAT_LEFT: - params.Set("direction", "left"); - break; - case SDL_HAT_RIGHT: - params.Set("direction", "right"); - break; - default: - return {}; - } - break; - } - } - return params; -} - -namespace Polling { - -class SDLPoller : public InputCommon::Polling::DevicePoller { -public: - void Start() override { - event_queue.Clear(); - polling = true; - } - - void Stop() override { - polling = false; - } -}; - -class SDLButtonPoller final : public SDLPoller { -public: - Common::ParamPackage GetNextInput() override { - SDL_Event event; - while (event_queue.Pop(event)) { - switch (event.type) { - case SDL_JOYAXISMOTION: - if (std::abs(event.jaxis.value / 32767.0) < 0.5) { - break; - } - case SDL_JOYBUTTONUP: - case SDL_JOYHATMOTION: - return SDLEventToButtonParamPackage(event); - } - } - return {}; - } -}; - -class SDLAnalogPoller final : public SDLPoller { -public: - void Start() override { - SDLPoller::Start(); - - // Reset stored axes - analog_xaxis = -1; - analog_yaxis = -1; - analog_axes_joystick = -1; - } - - Common::ParamPackage GetNextInput() override { - SDL_Event event; - while (event_queue.Pop(event)) { - if (event.type != SDL_JOYAXISMOTION || std::abs(event.jaxis.value / 32767.0) < 0.5) { - continue; - } - // An analog device needs two axes, so we need to store the axis for later and wait for - // a second SDL event. The axes also must be from the same joystick. - int axis = event.jaxis.axis; - if (analog_xaxis == -1) { - analog_xaxis = axis; - analog_axes_joystick = event.jaxis.which; - } else if (analog_yaxis == -1 && analog_xaxis != axis && - analog_axes_joystick == event.jaxis.which) { - analog_yaxis = axis; - } - } - Common::ParamPackage params; - if (analog_xaxis != -1 && analog_yaxis != -1) { - auto joystick = GetSDLJoystickBySDLID(event.jaxis.which); - params.Set("engine", "sdl"); - params.Set("port", joystick->GetPort()); - params.Set("guid", joystick->GetGUID()); - params.Set("axis_x", analog_xaxis); - params.Set("axis_y", analog_yaxis); - analog_xaxis = -1; - analog_yaxis = -1; - analog_axes_joystick = -1; - return params; - } - return params; - } - -private: - int analog_xaxis = -1; - int analog_yaxis = -1; - SDL_JoystickID analog_axes_joystick = -1; -}; - -std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( - InputCommon::Polling::DeviceType type) { - std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> pollers; - switch (type) { - case InputCommon::Polling::DeviceType::Analog: - pollers.push_back(std::make_unique<SDLAnalogPoller>()); - break; - case InputCommon::Polling::DeviceType::Button: - pollers.push_back(std::make_unique<SDLButtonPoller>()); - break; - } - return pollers; +std::unique_ptr<State> Init() { +#ifdef HAVE_SDL2 + return std::make_unique<SDLState>(); +#else + return std::make_unique<NullState>(); +#endif } -} // namespace Polling -} // namespace SDL -} // namespace InputCommon +} // namespace InputCommon::SDL diff --git a/src/input_common/sdl/sdl.h b/src/input_common/sdl/sdl.h index 0206860d3..02a8d2e2c 100644 --- a/src/input_common/sdl/sdl.h +++ b/src/input_common/sdl/sdl.h @@ -1,4 +1,4 @@ -// Copyright 2017 Citra Emulator Project +// Copyright 2018 Citra Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -7,45 +7,36 @@ #include <memory> #include <vector> #include "core/frontend/input.h" +#include "input_common/main.h" union SDL_Event; + namespace Common { class ParamPackage; -} -namespace InputCommon { -namespace Polling { +} // namespace Common + +namespace InputCommon::Polling { class DevicePoller; enum class DeviceType; -} // namespace Polling -} // namespace InputCommon - -namespace InputCommon { -namespace SDL { - -/// Initializes and registers SDL device factories -void Init(); - -/// Unresisters SDL device factories and shut them down. -void Shutdown(); - -/// Needs to be called before SDL_QuitSubSystem. -void CloseSDLJoysticks(); +} // namespace InputCommon::Polling -/// Handle SDL_Events for joysticks from SDL_PollEvent -void HandleGameControllerEvent(const SDL_Event& event); +namespace InputCommon::SDL { -/// A Loop that calls HandleGameControllerEvent until Shutdown is called -void PollLoop(); +class State { +public: + /// Unresisters SDL device factories and shut them down. + virtual ~State() = default; -/// Creates a ParamPackage from an SDL_Event that can directly be used to create a ButtonDevice -Common::ParamPackage SDLEventToButtonParamPackage(const SDL_Event& event); + virtual std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( + InputCommon::Polling::DeviceType type) = 0; +}; -namespace Polling { +class NullState : public State { +public: + std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( + InputCommon::Polling::DeviceType type) override {} +}; -/// Get all DevicePoller that use the SDL backend for a specific device type -std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( - InputCommon::Polling::DeviceType type); +std::unique_ptr<State> Init(); -} // namespace Polling -} // namespace SDL -} // namespace InputCommon +} // namespace InputCommon::SDL diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp new file mode 100644 index 000000000..934339d3b --- /dev/null +++ b/src/input_common/sdl/sdl_impl.cpp @@ -0,0 +1,669 @@ +// Copyright 2018 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <atomic> +#include <cmath> +#include <functional> +#include <iterator> +#include <mutex> +#include <string> +#include <thread> +#include <tuple> +#include <unordered_map> +#include <utility> +#include <vector> +#include <SDL.h> +#include "common/assert.h" +#include "common/logging/log.h" +#include "common/math_util.h" +#include "common/param_package.h" +#include "common/threadsafe_queue.h" +#include "core/frontend/input.h" +#include "input_common/sdl/sdl_impl.h" + +namespace InputCommon { + +namespace SDL { + +static std::string GetGUID(SDL_Joystick* joystick) { + SDL_JoystickGUID guid = SDL_JoystickGetGUID(joystick); + char guid_str[33]; + SDL_JoystickGetGUIDString(guid, guid_str, sizeof(guid_str)); + return guid_str; +} + +/// Creates a ParamPackage from an SDL_Event that can directly be used to create a ButtonDevice +static Common::ParamPackage SDLEventToButtonParamPackage(SDLState& state, const SDL_Event& event); + +static int SDLEventWatcher(void* userdata, SDL_Event* event) { + SDLState* sdl_state = reinterpret_cast<SDLState*>(userdata); + // Don't handle the event if we are configuring + if (sdl_state->polling) { + sdl_state->event_queue.Push(*event); + } else { + sdl_state->HandleGameControllerEvent(*event); + } + return 0; +} + +class SDLJoystick { +public: + SDLJoystick(std::string guid_, int port_, SDL_Joystick* joystick, + decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) + : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} + + void SetButton(int button, bool value) { + std::lock_guard<std::mutex> lock(mutex); + state.buttons[button] = value; + } + + bool GetButton(int button) const { + std::lock_guard<std::mutex> lock(mutex); + return state.buttons.at(button); + } + + void SetAxis(int axis, Sint16 value) { + std::lock_guard<std::mutex> lock(mutex); + state.axes[axis] = value; + } + + float GetAxis(int axis) const { + std::lock_guard<std::mutex> lock(mutex); + return state.axes.at(axis) / 32767.0f; + } + + std::tuple<float, float> GetAnalog(int axis_x, int axis_y) const { + float x = GetAxis(axis_x); + float y = GetAxis(axis_y); + y = -y; // 3DS uses an y-axis inverse from SDL + + // Make sure the coordinates are in the unit circle, + // otherwise normalize it. + float r = x * x + y * y; + if (r > 1.0f) { + r = std::sqrt(r); + x /= r; + y /= r; + } + + return std::make_tuple(x, y); + } + + void SetHat(int hat, Uint8 direction) { + std::lock_guard<std::mutex> lock(mutex); + state.hats[hat] = direction; + } + + bool GetHatDirection(int hat, Uint8 direction) const { + std::lock_guard<std::mutex> lock(mutex); + return (state.hats.at(hat) & direction) != 0; + } + /** + * The guid of the joystick + */ + const std::string& GetGUID() const { + return guid; + } + + /** + * The number of joystick from the same type that were connected before this joystick + */ + int GetPort() const { + return port; + } + + SDL_Joystick* GetSDLJoystick() const { + return sdl_joystick.get(); + } + + void SetSDLJoystick(SDL_Joystick* joystick, + decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) { + sdl_joystick = + std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)>(joystick, deleter); + } + +private: + struct State { + std::unordered_map<int, bool> buttons; + std::unordered_map<int, Sint16> axes; + std::unordered_map<int, Uint8> hats; + } state; + std::string guid; + int port; + std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)> sdl_joystick; + mutable std::mutex mutex; +}; + +/** + * Get the nth joystick with the corresponding GUID + */ +std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) { + std::lock_guard<std::mutex> lock(joystick_map_mutex); + const auto it = joystick_map.find(guid); + if (it != joystick_map.end()) { + while (it->second.size() <= port) { + auto joystick = std::make_shared<SDLJoystick>(guid, it->second.size(), nullptr, + [](SDL_Joystick*) {}); + it->second.emplace_back(std::move(joystick)); + } + return it->second[port]; + } + auto joystick = std::make_shared<SDLJoystick>(guid, 0, nullptr, [](SDL_Joystick*) {}); + return joystick_map[guid].emplace_back(std::move(joystick)); +} + +/** + * Check how many identical joysticks (by guid) were connected before the one with sdl_id and so tie + * it to a SDLJoystick with the same guid and that port + */ +std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { + auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); + const std::string guid = GetGUID(sdl_joystick); + std::lock_guard<std::mutex> lock(joystick_map_mutex); + auto map_it = joystick_map.find(guid); + if (map_it != joystick_map.end()) { + auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), + [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { + return sdl_joystick == joystick->GetSDLJoystick(); + }); + if (vec_it != map_it->second.end()) { + // This is the common case: There is already an existing SDL_Joystick maped to a + // SDLJoystick. return the SDLJoystick + return *vec_it; + } + // Search for a SDLJoystick without a mapped SDL_Joystick... + auto nullptr_it = std::find_if(map_it->second.begin(), map_it->second.end(), + [](const std::shared_ptr<SDLJoystick>& joystick) { + return !joystick->GetSDLJoystick(); + }); + if (nullptr_it != map_it->second.end()) { + // ... and map it + (*nullptr_it)->SetSDLJoystick(sdl_joystick); + return *nullptr_it; + } + // There is no SDLJoystick without a mapped SDL_Joystick + // Create a new SDLJoystick + auto joystick = std::make_shared<SDLJoystick>(guid, map_it->second.size(), sdl_joystick); + return map_it->second.emplace_back(std::move(joystick)); + } + auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); + return joystick_map[guid].emplace_back(std::move(joystick)); +} + +void SDLState::InitJoystick(int joystick_index) { + SDL_Joystick* sdl_joystick = SDL_JoystickOpen(joystick_index); + if (!sdl_joystick) { + LOG_ERROR(Input, "failed to open joystick {}", joystick_index); + return; + } + std::string guid = GetGUID(sdl_joystick); + std::lock_guard<std::mutex> lock(joystick_map_mutex); + if (joystick_map.find(guid) == joystick_map.end()) { + auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); + joystick_map[guid].emplace_back(std::move(joystick)); + return; + } + auto& joystick_guid_list = joystick_map[guid]; + const auto it = std::find_if( + joystick_guid_list.begin(), joystick_guid_list.end(), + [](const std::shared_ptr<SDLJoystick>& joystick) { return !joystick->GetSDLJoystick(); }); + if (it != joystick_guid_list.end()) { + (*it)->SetSDLJoystick(sdl_joystick); + return; + } + auto joystick = std::make_shared<SDLJoystick>(guid, joystick_guid_list.size(), sdl_joystick); + joystick_guid_list.emplace_back(std::move(joystick)); +} + +void SDLState::CloseJoystick(SDL_Joystick* sdl_joystick) { + std::string guid = GetGUID(sdl_joystick); + std::shared_ptr<SDLJoystick> joystick; + { + std::lock_guard<std::mutex> lock(joystick_map_mutex); + // This call to guid is safe since the joystick is guaranteed to be in the map + auto& joystick_guid_list = joystick_map[guid]; + const auto joystick_it = + std::find_if(joystick_guid_list.begin(), joystick_guid_list.end(), + [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { + return joystick->GetSDLJoystick() == sdl_joystick; + }); + joystick = *joystick_it; + } + // Destruct SDL_Joystick outside the lock guard because SDL can internally call event calback + // which locks the mutex again + joystick->SetSDLJoystick(nullptr, [](SDL_Joystick*) {}); +} + +void SDLState::HandleGameControllerEvent(const SDL_Event& event) { + switch (event.type) { + case SDL_JOYBUTTONUP: { + if (auto joystick = GetSDLJoystickBySDLID(event.jbutton.which)) { + joystick->SetButton(event.jbutton.button, false); + } + break; + } + case SDL_JOYBUTTONDOWN: { + if (auto joystick = GetSDLJoystickBySDLID(event.jbutton.which)) { + joystick->SetButton(event.jbutton.button, true); + } + break; + } + case SDL_JOYHATMOTION: { + if (auto joystick = GetSDLJoystickBySDLID(event.jhat.which)) { + joystick->SetHat(event.jhat.hat, event.jhat.value); + } + break; + } + case SDL_JOYAXISMOTION: { + if (auto joystick = GetSDLJoystickBySDLID(event.jaxis.which)) { + joystick->SetAxis(event.jaxis.axis, event.jaxis.value); + } + break; + } + case SDL_JOYDEVICEREMOVED: + LOG_DEBUG(Input, "Controller removed with Instance_ID {}", event.jdevice.which); + CloseJoystick(SDL_JoystickFromInstanceID(event.jdevice.which)); + break; + case SDL_JOYDEVICEADDED: + LOG_DEBUG(Input, "Controller connected with device index {}", event.jdevice.which); + InitJoystick(event.jdevice.which); + break; + } +} + +void SDLState::CloseJoysticks() { + std::lock_guard<std::mutex> lock(joystick_map_mutex); + joystick_map.clear(); +} + +class SDLButton final : public Input::ButtonDevice { +public: + explicit SDLButton(std::shared_ptr<SDLJoystick> joystick_, int button_) + : joystick(std::move(joystick_)), button(button_) {} + + bool GetStatus() const override { + return joystick->GetButton(button); + } + +private: + std::shared_ptr<SDLJoystick> joystick; + int button; +}; + +class SDLDirectionButton final : public Input::ButtonDevice { +public: + explicit SDLDirectionButton(std::shared_ptr<SDLJoystick> joystick_, int hat_, Uint8 direction_) + : joystick(std::move(joystick_)), hat(hat_), direction(direction_) {} + + bool GetStatus() const override { + return joystick->GetHatDirection(hat, direction); + } + +private: + std::shared_ptr<SDLJoystick> joystick; + int hat; + Uint8 direction; +}; + +class SDLAxisButton final : public Input::ButtonDevice { +public: + explicit SDLAxisButton(std::shared_ptr<SDLJoystick> joystick_, int axis_, float threshold_, + bool trigger_if_greater_) + : joystick(std::move(joystick_)), axis(axis_), threshold(threshold_), + trigger_if_greater(trigger_if_greater_) {} + + bool GetStatus() const override { + float axis_value = joystick->GetAxis(axis); + if (trigger_if_greater) + return axis_value > threshold; + return axis_value < threshold; + } + +private: + std::shared_ptr<SDLJoystick> joystick; + int axis; + float threshold; + bool trigger_if_greater; +}; + +class SDLAnalog final : public Input::AnalogDevice { +public: + SDLAnalog(std::shared_ptr<SDLJoystick> joystick_, int axis_x_, int axis_y_, float deadzone_) + : joystick(std::move(joystick_)), axis_x(axis_x_), axis_y(axis_y_), deadzone(deadzone_) {} + + std::tuple<float, float> GetStatus() const override { + const auto [x, y] = joystick->GetAnalog(axis_x, axis_y); + const float r = std::sqrt((x * x) + (y * y)); + if (r > deadzone) { + return std::make_tuple(x / r * (r - deadzone) / (1 - deadzone), + y / r * (r - deadzone) / (1 - deadzone)); + } + return std::make_tuple<float, float>(0.0f, 0.0f); + } + +private: + std::shared_ptr<SDLJoystick> joystick; + const int axis_x; + const int axis_y; + const float deadzone; +}; + +/// A button device factory that creates button devices from SDL joystick +class SDLButtonFactory final : public Input::Factory<Input::ButtonDevice> { +public: + explicit SDLButtonFactory(SDLState& state_) : state(state_) {} + + /** + * Creates a button device from a joystick button + * @param params contains parameters for creating the device: + * - "guid": the guid of the joystick to bind + * - "port": the nth joystick of the same type to bind + * - "button"(optional): the index of the button to bind + * - "hat"(optional): the index of the hat to bind as direction buttons + * - "axis"(optional): the index of the axis to bind + * - "direction"(only used for hat): the direction name of the hat to bind. Can be "up", + * "down", "left" or "right" + * - "threshold"(only used for axis): a float value in (-1.0, 1.0) which the button is + * triggered if the axis value crosses + * - "direction"(only used for axis): "+" means the button is triggered when the axis + * value is greater than the threshold; "-" means the button is triggered when the axis + * value is smaller than the threshold + */ + std::unique_ptr<Input::ButtonDevice> Create(const Common::ParamPackage& params) override { + const std::string guid = params.Get("guid", "0"); + const int port = params.Get("port", 0); + + auto joystick = state.GetSDLJoystickByGUID(guid, port); + + if (params.Has("hat")) { + const int hat = params.Get("hat", 0); + const std::string direction_name = params.Get("direction", ""); + Uint8 direction; + if (direction_name == "up") { + direction = SDL_HAT_UP; + } else if (direction_name == "down") { + direction = SDL_HAT_DOWN; + } else if (direction_name == "left") { + direction = SDL_HAT_LEFT; + } else if (direction_name == "right") { + direction = SDL_HAT_RIGHT; + } else { + direction = 0; + } + // This is necessary so accessing GetHat with hat won't crash + joystick->SetHat(hat, SDL_HAT_CENTERED); + return std::make_unique<SDLDirectionButton>(joystick, hat, direction); + } + + if (params.Has("axis")) { + const int axis = params.Get("axis", 0); + const float threshold = params.Get("threshold", 0.5f); + const std::string direction_name = params.Get("direction", ""); + bool trigger_if_greater; + if (direction_name == "+") { + trigger_if_greater = true; + } else if (direction_name == "-") { + trigger_if_greater = false; + } else { + trigger_if_greater = true; + LOG_ERROR(Input, "Unknown direction {}", direction_name); + } + // This is necessary so accessing GetAxis with axis won't crash + joystick->SetAxis(axis, 0); + return std::make_unique<SDLAxisButton>(joystick, axis, threshold, trigger_if_greater); + } + + const int button = params.Get("button", 0); + // This is necessary so accessing GetButton with button won't crash + joystick->SetButton(button, false); + return std::make_unique<SDLButton>(joystick, button); + } + +private: + SDLState& state; +}; + +/// An analog device factory that creates analog devices from SDL joystick +class SDLAnalogFactory final : public Input::Factory<Input::AnalogDevice> { +public: + explicit SDLAnalogFactory(SDLState& state_) : state(state_) {} + /** + * Creates analog device from joystick axes + * @param params contains parameters for creating the device: + * - "guid": the guid of the joystick to bind + * - "port": the nth joystick of the same type + * - "axis_x": the index of the axis to be bind as x-axis + * - "axis_y": the index of the axis to be bind as y-axis + */ + std::unique_ptr<Input::AnalogDevice> Create(const Common::ParamPackage& params) override { + const std::string guid = params.Get("guid", "0"); + const int port = params.Get("port", 0); + const int axis_x = params.Get("axis_x", 0); + const int axis_y = params.Get("axis_y", 1); + float deadzone = std::clamp(params.Get("deadzone", 0.0f), 0.0f, .99f); + + auto joystick = state.GetSDLJoystickByGUID(guid, port); + + // This is necessary so accessing GetAxis with axis_x and axis_y won't crash + joystick->SetAxis(axis_x, 0); + joystick->SetAxis(axis_y, 0); + return std::make_unique<SDLAnalog>(joystick, axis_x, axis_y, deadzone); + } + +private: + SDLState& state; +}; + +SDLState::SDLState() { + using namespace Input; + RegisterFactory<ButtonDevice>("sdl", std::make_shared<SDLButtonFactory>(*this)); + RegisterFactory<AnalogDevice>("sdl", std::make_shared<SDLAnalogFactory>(*this)); + + // If the frontend is going to manage the event loop, then we dont start one here + start_thread = !SDL_WasInit(SDL_INIT_JOYSTICK); + if (start_thread && SDL_Init(SDL_INIT_JOYSTICK) < 0) { + LOG_CRITICAL(Input, "SDL_Init(SDL_INIT_JOYSTICK) failed with: {}", SDL_GetError()); + return; + } + if (SDL_SetHint(SDL_HINT_JOYSTICK_ALLOW_BACKGROUND_EVENTS, "1") == SDL_FALSE) { + LOG_ERROR(Input, "Failed to set Hint for background events", SDL_GetError()); + } + + SDL_AddEventWatch(&SDLEventWatcher, this); + + initialized = true; + if (start_thread) { + poll_thread = std::thread([&] { + using namespace std::chrono_literals; + SDL_Event event; + while (initialized) { + SDL_PumpEvents(); + std::this_thread::sleep_for(std::chrono::duration(10ms)); + } + }); + } + // Because the events for joystick connection happens before we have our event watcher added, we + // can just open all the joysticks right here + for (int i = 0; i < SDL_NumJoysticks(); ++i) { + InitJoystick(i); + } +} + +SDLState::~SDLState() { + using namespace Input; + UnregisterFactory<ButtonDevice>("sdl"); + UnregisterFactory<AnalogDevice>("sdl"); + + CloseJoysticks(); + SDL_DelEventWatch(&SDLEventWatcher, this); + + initialized = false; + if (start_thread) { + poll_thread.join(); + SDL_QuitSubSystem(SDL_INIT_JOYSTICK); + } +} + +Common::ParamPackage SDLEventToButtonParamPackage(SDLState& state, const SDL_Event& event) { + Common::ParamPackage params({{"engine", "sdl"}}); + + switch (event.type) { + case SDL_JOYAXISMOTION: { + auto joystick = state.GetSDLJoystickBySDLID(event.jaxis.which); + params.Set("port", joystick->GetPort()); + params.Set("guid", joystick->GetGUID()); + params.Set("axis", event.jaxis.axis); + if (event.jaxis.value > 0) { + params.Set("direction", "+"); + params.Set("threshold", "0.5"); + } else { + params.Set("direction", "-"); + params.Set("threshold", "-0.5"); + } + break; + } + case SDL_JOYBUTTONUP: { + auto joystick = state.GetSDLJoystickBySDLID(event.jbutton.which); + params.Set("port", joystick->GetPort()); + params.Set("guid", joystick->GetGUID()); + params.Set("button", event.jbutton.button); + break; + } + case SDL_JOYHATMOTION: { + auto joystick = state.GetSDLJoystickBySDLID(event.jhat.which); + params.Set("port", joystick->GetPort()); + params.Set("guid", joystick->GetGUID()); + params.Set("hat", event.jhat.hat); + switch (event.jhat.value) { + case SDL_HAT_UP: + params.Set("direction", "up"); + break; + case SDL_HAT_DOWN: + params.Set("direction", "down"); + break; + case SDL_HAT_LEFT: + params.Set("direction", "left"); + break; + case SDL_HAT_RIGHT: + params.Set("direction", "right"); + break; + default: + return {}; + } + break; + } + } + return params; +} + +namespace Polling { + +class SDLPoller : public InputCommon::Polling::DevicePoller { +public: + explicit SDLPoller(SDLState& state_) : state(state_) {} + + void Start() override { + state.event_queue.Clear(); + state.polling = true; + } + + void Stop() override { + state.polling = false; + } + +protected: + SDLState& state; +}; + +class SDLButtonPoller final : public SDLPoller { +public: + explicit SDLButtonPoller(SDLState& state_) : SDLPoller(state_) {} + + Common::ParamPackage GetNextInput() override { + SDL_Event event; + while (state.event_queue.Pop(event)) { + switch (event.type) { + case SDL_JOYAXISMOTION: + if (std::abs(event.jaxis.value / 32767.0) < 0.5) { + break; + } + case SDL_JOYBUTTONUP: + case SDL_JOYHATMOTION: + return SDLEventToButtonParamPackage(state, event); + } + } + return {}; + } +}; + +class SDLAnalogPoller final : public SDLPoller { +public: + explicit SDLAnalogPoller(SDLState& state_) : SDLPoller(state_) {} + + void Start() override { + SDLPoller::Start(); + + // Reset stored axes + analog_xaxis = -1; + analog_yaxis = -1; + analog_axes_joystick = -1; + } + + Common::ParamPackage GetNextInput() override { + SDL_Event event; + while (state.event_queue.Pop(event)) { + if (event.type != SDL_JOYAXISMOTION || std::abs(event.jaxis.value / 32767.0) < 0.5) { + continue; + } + // An analog device needs two axes, so we need to store the axis for later and wait for + // a second SDL event. The axes also must be from the same joystick. + int axis = event.jaxis.axis; + if (analog_xaxis == -1) { + analog_xaxis = axis; + analog_axes_joystick = event.jaxis.which; + } else if (analog_yaxis == -1 && analog_xaxis != axis && + analog_axes_joystick == event.jaxis.which) { + analog_yaxis = axis; + } + } + Common::ParamPackage params; + if (analog_xaxis != -1 && analog_yaxis != -1) { + auto joystick = state.GetSDLJoystickBySDLID(event.jaxis.which); + params.Set("engine", "sdl"); + params.Set("port", joystick->GetPort()); + params.Set("guid", joystick->GetGUID()); + params.Set("axis_x", analog_xaxis); + params.Set("axis_y", analog_yaxis); + analog_xaxis = -1; + analog_yaxis = -1; + analog_axes_joystick = -1; + return params; + } + return params; + } + +private: + int analog_xaxis = -1; + int analog_yaxis = -1; + SDL_JoystickID analog_axes_joystick = -1; +}; +} // namespace Polling + +std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> SDLState::GetPollers( + InputCommon::Polling::DeviceType type) { + std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> pollers; + switch (type) { + case InputCommon::Polling::DeviceType::Analog: + pollers.emplace_back(std::make_unique<Polling::SDLAnalogPoller>(*this)); + break; + case InputCommon::Polling::DeviceType::Button: + pollers.emplace_back(std::make_unique<Polling::SDLButtonPoller>(*this)); + break; + return pollers; + } +} + +} // namespace SDL +} // namespace InputCommon diff --git a/src/input_common/sdl/sdl_impl.h b/src/input_common/sdl/sdl_impl.h new file mode 100644 index 000000000..fec82fbe6 --- /dev/null +++ b/src/input_common/sdl/sdl_impl.h @@ -0,0 +1,64 @@ +// Copyright 2018 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <atomic> +#include <memory> +#include <thread> +#include "common/threadsafe_queue.h" +#include "input_common/sdl/sdl.h" + +union SDL_Event; +using SDL_Joystick = struct _SDL_Joystick; +using SDL_JoystickID = s32; + +namespace InputCommon::SDL { + +class SDLJoystick; +class SDLButtonFactory; +class SDLAnalogFactory; + +class SDLState : public State { +public: + /// Initializes and registers SDL device factories + SDLState(); + + /// Unresisters SDL device factories and shut them down. + ~SDLState() override; + + /// Handle SDL_Events for joysticks from SDL_PollEvent + void HandleGameControllerEvent(const SDL_Event& event); + + std::shared_ptr<SDLJoystick> GetSDLJoystickBySDLID(SDL_JoystickID sdl_id); + std::shared_ptr<SDLJoystick> GetSDLJoystickByGUID(const std::string& guid, int port); + + /// Get all DevicePoller that use the SDL backend for a specific device type + std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( + InputCommon::Polling::DeviceType type) override; + + /// Used by the Pollers during config + std::atomic<bool> polling = false; + Common::SPSCQueue<SDL_Event> event_queue; + +private: + void InitJoystick(int joystick_index); + void CloseJoystick(SDL_Joystick* sdl_joystick); + + /// Needs to be called before SDL_QuitSubSystem. + void CloseJoysticks(); + + /// Map of GUID of a list of corresponding virtual Joysticks + std::unordered_map<std::string, std::vector<std::shared_ptr<SDLJoystick>>> joystick_map; + std::mutex joystick_map_mutex; + + std::shared_ptr<SDLButtonFactory> button_factory; + std::shared_ptr<SDLAnalogFactory> analog_factory; + + bool start_thread = false; + std::atomic<bool> initialized = false; + + std::thread poll_thread; +}; +} // namespace InputCommon::SDL diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp index 9b8a44fa1..6fe56833d 100644 --- a/src/tests/core/arm/arm_test_common.cpp +++ b/src/tests/core/arm/arm_test_common.cpp @@ -13,11 +13,11 @@ namespace ArmTests { TestEnvironment::TestEnvironment(bool mutable_memory_) - : mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)) { - - auto process = Kernel::Process::Create(kernel, ""); + : mutable_memory(mutable_memory_), + test_memory(std::make_shared<TestMemory>(this)), kernel{Core::System::GetInstance()} { + auto process = Kernel::Process::Create(Core::System::GetInstance(), ""); kernel.MakeCurrentProcess(process.get()); - page_table = &Core::CurrentProcess()->VMManager().page_table; + page_table = &process->VMManager().page_table; std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); page_table->special_regions.clear(); diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index d35a738d5..14b76680f 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -17,6 +17,12 @@ add_library(video_core STATIC engines/shader_header.h gpu.cpp gpu.h + gpu_asynch.cpp + gpu_asynch.h + gpu_synch.cpp + gpu_synch.h + gpu_thread.cpp + gpu_thread.h macro_interpreter.cpp macro_interpreter.h memory_manager.cpp @@ -74,6 +80,7 @@ add_library(video_core STATIC shader/decode/hfma2.cpp shader/decode/conversion.cpp shader/decode/memory.cpp + shader/decode/texture.cpp shader/decode/float_set_predicate.cpp shader/decode/integer_set_predicate.cpp shader/decode/half_set_predicate.cpp @@ -94,6 +101,8 @@ add_library(video_core STATIC surface.h textures/astc.cpp textures/astc.h + textures/convert.cpp + textures/convert.h textures/decoders.cpp textures/decoders.h textures/texture.h @@ -104,8 +113,22 @@ add_library(video_core STATIC if (ENABLE_VULKAN) target_sources(video_core PRIVATE renderer_vulkan/declarations.h + renderer_vulkan/maxwell_to_vk.cpp + renderer_vulkan/maxwell_to_vk.h + renderer_vulkan/vk_buffer_cache.cpp + renderer_vulkan/vk_buffer_cache.h renderer_vulkan/vk_device.cpp - renderer_vulkan/vk_device.h) + renderer_vulkan/vk_device.h + renderer_vulkan/vk_memory_manager.cpp + renderer_vulkan/vk_memory_manager.h + renderer_vulkan/vk_resource_manager.cpp + renderer_vulkan/vk_resource_manager.h + renderer_vulkan/vk_sampler_cache.cpp + renderer_vulkan/vk_sampler_cache.h + renderer_vulkan/vk_scheduler.cpp + renderer_vulkan/vk_scheduler.h + renderer_vulkan/vk_stream_buffer.cpp + renderer_vulkan/vk_stream_buffer.h) target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include) target_compile_definitions(video_core PRIVATE HAS_VULKAN) diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index eb9bf1878..bff1a37ff 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -33,18 +33,36 @@ void DmaPusher::DispatchCalls() { } bool DmaPusher::Step() { - if (dma_get != dma_put) { - // Push buffer non-empty, read a word - const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get); - ASSERT_MSG(address, "Invalid GPU address"); + if (!ib_enable || dma_pushbuffer.empty()) { + // pushbuffer empty and IB empty or nonexistent - nothing to do + return false; + } - const CommandHeader command_header{Memory::Read32(*address)}; + const CommandList& command_list{dma_pushbuffer.front()}; + const CommandListHeader command_list_header{command_list[dma_pushbuffer_subindex++]}; + GPUVAddr dma_get = command_list_header.addr; + GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32); + bool non_main = command_list_header.is_non_main; - dma_get += sizeof(u32); + if (dma_pushbuffer_subindex >= command_list.size()) { + // We've gone through the current list, remove it from the queue + dma_pushbuffer.pop(); + dma_pushbuffer_subindex = 0; + } - if (!non_main) { - dma_mget = dma_get; - } + if (command_list_header.size == 0) { + return true; + } + + // Push buffer non-empty, read a word + const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get); + ASSERT_MSG(address, "Invalid GPU address"); + + command_headers.resize(command_list_header.size); + + Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32)); + + for (const CommandHeader& command_header : command_headers) { // now, see if we're in the middle of a command if (dma_state.length_pending) { @@ -91,22 +109,11 @@ bool DmaPusher::Step() { break; } } - } else if (ib_enable && !dma_pushbuffer.empty()) { - // Current pushbuffer empty, but we have more IB entries to read - const CommandList& command_list{dma_pushbuffer.front()}; - const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]}; - dma_get = command_list_header.addr; - dma_put = dma_get + command_list_header.size * sizeof(u32); - non_main = command_list_header.is_non_main; - - if (dma_pushbuffer_subindex >= command_list.size()) { - // We've gone through the current list, remove it from the queue - dma_pushbuffer.pop(); - dma_pushbuffer_subindex = 0; - } - } else { - // Otherwise, pushbuffer empty and IB empty or nonexistent - nothing to do - return {}; + } + + if (!non_main) { + // TODO (degasus): This is dead code, as dma_mget is never read. + dma_mget = dma_put; } return true; diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 1097e5c49..27a36348c 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -75,6 +75,8 @@ private: GPU& gpu; + std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once + std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer @@ -89,11 +91,8 @@ private: DmaState dma_state{}; bool dma_increment_once{}; - GPUVAddr dma_put{}; ///< pushbuffer current end address - GPUVAddr dma_get{}; ///< pushbuffer current read address GPUVAddr dma_mget{}; ///< main pushbuffer last read address bool ib_enable{true}; ///< IB mode enabled - bool non_main{}; ///< non-main pushbuffer active }; } // namespace Tegra diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp index ec1a57226..03b7ee5d8 100644 --- a/src/video_core/engines/fermi_2d.cpp +++ b/src/video_core/engines/fermi_2d.cpp @@ -2,12 +2,11 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include "core/core.h" -#include "core/memory.h" +#include "common/assert.h" +#include "common/logging/log.h" +#include "common/math_util.h" #include "video_core/engines/fermi_2d.h" -#include "video_core/engines/maxwell_3d.h" #include "video_core/rasterizer_interface.h" -#include "video_core/textures/decoders.h" namespace Tegra::Engines { @@ -44,10 +43,10 @@ void Fermi2D::HandleSurfaceCopy() { const u32 src_blit_y2{ static_cast<u32>((regs.blit_src_y + (regs.blit_dst_height * regs.blit_dv_dy)) >> 32)}; - const MathUtil::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2}; - const MathUtil::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y, - regs.blit_dst_x + regs.blit_dst_width, - regs.blit_dst_y + regs.blit_dst_height}; + const Common::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2}; + const Common::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y, + regs.blit_dst_x + regs.blit_dst_width, + regs.blit_dst_y + regs.blit_dst_height}; if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst, src_rect, dst_rect)) { UNIMPLEMENTED(); diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h index c69f74cc5..80523e320 100644 --- a/src/video_core/engines/fermi_2d.h +++ b/src/video_core/engines/fermi_2d.h @@ -5,7 +5,7 @@ #pragma once #include <array> -#include "common/assert.h" +#include <cstddef> #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 4ca856b6b..b1d950460 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -2,9 +2,8 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" #include "common/logging/log.h" -#include "core/core.h" -#include "core/memory.h" #include "video_core/engines/kepler_compute.h" #include "video_core/memory_manager.h" diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h index df0a32e0f..6575afd0f 100644 --- a/src/video_core/engines/kepler_compute.h +++ b/src/video_core/engines/kepler_compute.h @@ -5,8 +5,7 @@ #pragma once #include <array> -#include "common/assert.h" -#include "common/bit_field.h" +#include <cstddef> #include "common/common_funcs.h" #include "common/common_types.h" #include "video_core/gpu.h" diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index 5c1029ddf..daefa43a6 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -2,18 +2,20 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" #include "core/memory.h" #include "video_core/engines/kepler_memory.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/rasterizer_interface.h" +#include "video_core/renderer_base.h" namespace Tegra::Engines { -KeplerMemory::KeplerMemory(VideoCore::RasterizerInterface& rasterizer, +KeplerMemory::KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer} {} + : system{system}, memory_manager(memory_manager), rasterizer{rasterizer} {} KeplerMemory::~KeplerMemory() = default; @@ -47,10 +49,11 @@ void KeplerMemory::ProcessData(u32 data) { // We have to invalidate the destination region to evict any outdated surfaces from the cache. // We do this before actually writing the new data because the destination address might contain // a dirty surface that will have to be written back to memory. - rasterizer.InvalidateRegion(*dest_address, sizeof(u32)); + system.Renderer().Rasterizer().InvalidateRegion(ToCacheAddr(Memory::GetPointer(*dest_address)), + sizeof(u32)); Memory::Write32(*dest_address, data); - Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); state.write_offset++; } diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h index fe9ebc5b9..9181e9d80 100644 --- a/src/video_core/engines/kepler_memory.h +++ b/src/video_core/engines/kepler_memory.h @@ -5,13 +5,17 @@ #pragma once #include <array> -#include "common/assert.h" +#include <cstddef> #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -23,7 +27,8 @@ namespace Tegra::Engines { class KeplerMemory final { public: - KeplerMemory(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager); ~KeplerMemory(); /// Write the value to the register identified by method. @@ -76,6 +81,7 @@ public: } state{}; private: + Core::System& system; MemoryManager& memory_manager; VideoCore::RasterizerInterface& rasterizer; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 86ede5faa..49979694e 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -19,8 +19,10 @@ namespace Tegra::Engines { /// First register id that is actually a Macro call. constexpr u32 MacroRegistersStart = 0xE00; -Maxwell3D::Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer}, macro_interpreter(*this) { +Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager) + : memory_manager(memory_manager), system{system}, rasterizer{rasterizer}, + macro_interpreter(*this) { InitializeRegisterDefaults(); } @@ -103,23 +105,25 @@ void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { } void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { - auto debug_context = Core::System::GetInstance().GetGPUDebugContext(); + auto debug_context = system.GetGPUDebugContext(); + + const u32 method = method_call.method; // It is an error to write to a register other than the current macro's ARG register before it // has finished execution. if (executing_macro != 0) { - ASSERT(method_call.method == executing_macro + 1); + ASSERT(method == executing_macro + 1); } // Methods after 0xE00 are special, they're actually triggers for some microcode that was // uploaded to the GPU during initialization. - if (method_call.method >= MacroRegistersStart) { + if (method >= MacroRegistersStart) { // We're trying to execute a macro if (executing_macro == 0) { // A macro call must begin by writing the macro method's register, not its argument. - ASSERT_MSG((method_call.method % 2) == 0, + ASSERT_MSG((method % 2) == 0, "Can't start macro execution by writing to the ARGS register"); - executing_macro = method_call.method; + executing_macro = method; } macro_params.push_back(method_call.argument); @@ -131,66 +135,62 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { return; } - ASSERT_MSG(method_call.method < Regs::NUM_REGS, + ASSERT_MSG(method < Regs::NUM_REGS, "Invalid Maxwell3D register, increase the size of the Regs structure"); if (debug_context) { debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandLoaded, nullptr); } - if (regs.reg_array[method_call.method] != method_call.argument) { - regs.reg_array[method_call.method] = method_call.argument; + if (regs.reg_array[method] != method_call.argument) { + regs.reg_array[method] = method_call.argument; // Color buffers constexpr u32 first_rt_reg = MAXWELL3D_REG_INDEX(rt); constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32); - if (method_call.method >= first_rt_reg && - method_call.method < first_rt_reg + registers_per_rt * Regs::NumRenderTargets) { - const std::size_t rt_index = (method_call.method - first_rt_reg) / registers_per_rt; - dirty_flags.color_buffer |= 1u << static_cast<u32>(rt_index); + if (method >= first_rt_reg && + method < first_rt_reg + registers_per_rt * Regs::NumRenderTargets) { + const std::size_t rt_index = (method - first_rt_reg) / registers_per_rt; + dirty_flags.color_buffer.set(rt_index); } // Zeta buffer constexpr u32 registers_in_zeta = sizeof(regs.zeta) / sizeof(u32); - if (method_call.method == MAXWELL3D_REG_INDEX(zeta_enable) || - method_call.method == MAXWELL3D_REG_INDEX(zeta_width) || - method_call.method == MAXWELL3D_REG_INDEX(zeta_height) || - (method_call.method >= MAXWELL3D_REG_INDEX(zeta) && - method_call.method < MAXWELL3D_REG_INDEX(zeta) + registers_in_zeta)) { + if (method == MAXWELL3D_REG_INDEX(zeta_enable) || + method == MAXWELL3D_REG_INDEX(zeta_width) || + method == MAXWELL3D_REG_INDEX(zeta_height) || + (method >= MAXWELL3D_REG_INDEX(zeta) && + method < MAXWELL3D_REG_INDEX(zeta) + registers_in_zeta)) { dirty_flags.zeta_buffer = true; } // Shader constexpr u32 shader_registers_count = sizeof(regs.shader_config[0]) * Regs::MaxShaderProgram / sizeof(u32); - if (method_call.method >= MAXWELL3D_REG_INDEX(shader_config[0]) && - method_call.method < MAXWELL3D_REG_INDEX(shader_config[0]) + shader_registers_count) { + if (method >= MAXWELL3D_REG_INDEX(shader_config[0]) && + method < MAXWELL3D_REG_INDEX(shader_config[0]) + shader_registers_count) { dirty_flags.shaders = true; } // Vertex format - if (method_call.method >= MAXWELL3D_REG_INDEX(vertex_attrib_format) && - method_call.method < - MAXWELL3D_REG_INDEX(vertex_attrib_format) + regs.vertex_attrib_format.size()) { + if (method >= MAXWELL3D_REG_INDEX(vertex_attrib_format) && + method < MAXWELL3D_REG_INDEX(vertex_attrib_format) + regs.vertex_attrib_format.size()) { dirty_flags.vertex_attrib_format = true; } // Vertex buffer - if (method_call.method >= MAXWELL3D_REG_INDEX(vertex_array) && - method_call.method < MAXWELL3D_REG_INDEX(vertex_array) + 4 * 32) { - dirty_flags.vertex_array |= - 1u << ((method_call.method - MAXWELL3D_REG_INDEX(vertex_array)) >> 2); - } else if (method_call.method >= MAXWELL3D_REG_INDEX(vertex_array_limit) && - method_call.method < MAXWELL3D_REG_INDEX(vertex_array_limit) + 2 * 32) { - dirty_flags.vertex_array |= - 1u << ((method_call.method - MAXWELL3D_REG_INDEX(vertex_array_limit)) >> 1); - } else if (method_call.method >= MAXWELL3D_REG_INDEX(instanced_arrays) && - method_call.method < MAXWELL3D_REG_INDEX(instanced_arrays) + 32) { - dirty_flags.vertex_array |= - 1u << (method_call.method - MAXWELL3D_REG_INDEX(instanced_arrays)); + if (method >= MAXWELL3D_REG_INDEX(vertex_array) && + method < MAXWELL3D_REG_INDEX(vertex_array) + 4 * 32) { + dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array)) >> 2); + } else if (method >= MAXWELL3D_REG_INDEX(vertex_array_limit) && + method < MAXWELL3D_REG_INDEX(vertex_array_limit) + 2 * 32) { + dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array_limit)) >> 1); + } else if (method >= MAXWELL3D_REG_INDEX(instanced_arrays) && + method < MAXWELL3D_REG_INDEX(instanced_arrays) + 32) { + dirty_flags.vertex_array.set(method - MAXWELL3D_REG_INDEX(instanced_arrays)); } } - switch (method_call.method) { + switch (method) { case MAXWELL3D_REG_INDEX(macros.data): { ProcessMacroUpload(method_call.argument); break; @@ -317,7 +317,7 @@ void Maxwell3D::ProcessQueryGet() { LongQueryResult query_result{}; query_result.value = result; // TODO(Subv): Generate a real GPU timestamp and write it here instead of CoreTiming - query_result.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); + query_result.timestamp = system.CoreTiming().GetTicks(); Memory::WriteBlock(*address, &query_result, sizeof(query_result)); } dirty_flags.OnMemoryWrite(); @@ -334,7 +334,7 @@ void Maxwell3D::DrawArrays() { regs.vertex_buffer.count); ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); - auto debug_context = Core::System::GetInstance().GetGPUDebugContext(); + auto debug_context = system.GetGPUDebugContext(); if (debug_context) { debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr); @@ -396,7 +396,10 @@ void Maxwell3D::ProcessCBData(u32 value) { const auto address = memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos); ASSERT_MSG(address, "Invalid GPU address"); - Memory::Write32(*address, value); + u8* ptr{Memory::GetPointer(*address)}; + rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32)); + std::memcpy(ptr, &value, sizeof(u32)); + dirty_flags.OnMemoryWrite(); // Increment the current buffer position. diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 1f76aa670..7fbf1026e 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -5,8 +5,10 @@ #pragma once #include <array> +#include <bitset> #include <unordered_map> #include <vector> + #include "common/assert.h" #include "common/bit_field.h" #include "common/common_funcs.h" @@ -17,6 +19,10 @@ #include "video_core/memory_manager.h" #include "video_core/textures/texture.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -28,7 +34,8 @@ namespace Tegra::Engines { class Maxwell3D final { public: - explicit Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + explicit Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager); ~Maxwell3D() = default; /// Register structure of the Maxwell3D engine. @@ -498,7 +505,7 @@ public: f32 translate_z; INSERT_PADDING_WORDS(2); - MathUtil::Rectangle<s32> GetRect() const { + Common::Rectangle<s32> GetRect() const { return { GetX(), // left GetY() + GetHeight(), // top @@ -1089,19 +1096,18 @@ public: MemoryManager& memory_manager; struct DirtyFlags { - u8 color_buffer = 0xFF; - bool zeta_buffer = true; - - bool shaders = true; + std::bitset<8> color_buffer{0xFF}; + std::bitset<32> vertex_array{0xFFFFFFFF}; bool vertex_attrib_format = true; - u32 vertex_array = 0xFFFFFFFF; + bool zeta_buffer = true; + bool shaders = true; void OnMemoryWrite() { - color_buffer = 0xFF; zeta_buffer = true; shaders = true; - vertex_array = 0xFFFFFFFF; + color_buffer.set(); + vertex_array.set(); } }; @@ -1131,6 +1137,8 @@ public: private: void InitializeRegisterDefaults(); + Core::System& system; + VideoCore::RasterizerInterface& rasterizer; /// Start offsets of each macro in macro_memory diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index d6c41a5ae..415a6319a 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -2,17 +2,21 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" +#include "common/logging/log.h" #include "core/core.h" #include "core/memory.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/rasterizer_interface.h" +#include "video_core/renderer_base.h" #include "video_core/textures/decoders.h" namespace Tegra::Engines { -MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer} {} +MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager) + : memory_manager(memory_manager), system{system}, rasterizer{rasterizer} {} void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) { ASSERT_MSG(method_call.method < Regs::NUM_REGS, @@ -59,7 +63,7 @@ void MaxwellDMA::HandleCopy() { } // All copies here update the main memory, so mark all rasterizer states as invalid. - Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); if (regs.exec.is_dst_linear && regs.exec.is_src_linear) { // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D @@ -89,12 +93,14 @@ void MaxwellDMA::HandleCopy() { const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) { // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated // copying. - rasterizer.FlushRegion(*source_cpu, src_size); + Core::System::GetInstance().Renderer().Rasterizer().FlushRegion( + ToCacheAddr(Memory::GetPointer(*source_cpu)), src_size); // We have to invalidate the destination region to evict any outdated surfaces from the // cache. We do this before actually writing the new data because the destination address // might contain a dirty surface that will have to be written back to memory. - rasterizer.InvalidateRegion(*dest_cpu, dst_size); + Core::System::GetInstance().Renderer().Rasterizer().InvalidateRegion( + ToCacheAddr(Memory::GetPointer(*dest_cpu)), dst_size); }; if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 1f8cd65d2..34c369320 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -5,13 +5,17 @@ #pragma once #include <array> -#include "common/assert.h" +#include <cstddef> #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -20,7 +24,8 @@ namespace Tegra::Engines { class MaxwellDMA final { public: - explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager); ~MaxwellDMA() = default; /// Write the value to the register identified by method. @@ -137,6 +142,8 @@ public: MemoryManager& memory_manager; private: + Core::System& system; + VideoCore::RasterizerInterface& rasterizer; /// Performs the copy from the source buffer to the destination buffer as configured in the diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 1f425f90b..7f613370b 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -6,7 +6,6 @@ #include <bitset> #include <optional> -#include <string> #include <tuple> #include <vector> @@ -325,11 +324,11 @@ enum class TextureQueryType : u64 { enum class TextureProcessMode : u64 { None = 0, - LZ = 1, // Unknown, appears to be the same as none. + LZ = 1, // Load LOD of zero. LB = 2, // Load Bias. - LL = 3, // Load LOD (LevelOfDetail) - LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB - LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL + LL = 3, // Load LOD. + LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB. + LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL. }; enum class TextureMiscMode : u64 { @@ -376,9 +375,9 @@ enum class R2pMode : u64 { }; enum class IpaInterpMode : u64 { - Linear = 0, - Perspective = 1, - Flat = 2, + Pass = 0, + Multiply = 1, + Constant = 2, Sc = 3, }; @@ -1446,6 +1445,7 @@ public: Flow, Synch, Memory, + Texture, FloatSet, FloatSetPredicate, IntegerSet, @@ -1576,14 +1576,14 @@ private: INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"), INST("1110111011010---", Id::LDG, Type::Memory, "LDG"), INST("1110111011011---", Id::STG, Type::Memory, "STG"), - INST("110000----111---", Id::TEX, Type::Memory, "TEX"), - INST("1101111101001---", Id::TXQ, Type::Memory, "TXQ"), - INST("1101-00---------", Id::TEXS, Type::Memory, "TEXS"), - INST("1101101---------", Id::TLDS, Type::Memory, "TLDS"), - INST("110010----111---", Id::TLD4, Type::Memory, "TLD4"), - INST("1101111100------", Id::TLD4S, Type::Memory, "TLD4S"), - INST("110111110110----", Id::TMML_B, Type::Memory, "TMML_B"), - INST("1101111101011---", Id::TMML, Type::Memory, "TMML"), + INST("110000----111---", Id::TEX, Type::Texture, "TEX"), + INST("1101111101001---", Id::TXQ, Type::Texture, "TXQ"), + INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"), + INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"), + INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"), + INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"), + INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), + INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), INST("111000110000----", Id::EXIT, Type::Trivial, "EXIT"), INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h index cf2b76ff6..e86a7f04a 100644 --- a/src/video_core/engines/shader_header.h +++ b/src/video_core/engines/shader_header.h @@ -16,6 +16,13 @@ enum class OutputTopology : u32 { TriangleStrip = 7, }; +enum class AttributeUse : u8 { + Unused = 0, + Constant = 1, + Perspective = 2, + ScreenLinear = 3, +}; + // Documentation in: // http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html#ImapTexture struct Header { @@ -84,9 +91,15 @@ struct Header { } vtg; struct { - INSERT_PADDING_BYTES(3); // ImapSystemValuesA - INSERT_PADDING_BYTES(1); // ImapSystemValuesB - INSERT_PADDING_BYTES(32); // ImapGenericVector[32] + INSERT_PADDING_BYTES(3); // ImapSystemValuesA + INSERT_PADDING_BYTES(1); // ImapSystemValuesB + union { + BitField<0, 2, AttributeUse> x; + BitField<2, 2, AttributeUse> y; + BitField<4, 2, AttributeUse> w; + BitField<6, 2, AttributeUse> z; + u8 raw; + } imap_generic_vector[32]; INSERT_PADDING_BYTES(2); // ImapColor INSERT_PADDING_BYTES(2); // ImapSystemValuesC INSERT_PADDING_BYTES(10); // ImapFixedFncTexture[10] @@ -103,6 +116,28 @@ struct Header { const u32 bit = render_target * 4 + component; return omap.target & (1 << bit); } + AttributeUse GetAttributeIndexUse(u32 attribute, u32 index) const { + return static_cast<AttributeUse>( + (imap_generic_vector[attribute].raw >> (index * 2)) & 0x03); + } + AttributeUse GetAttributeUse(u32 attribute) const { + AttributeUse result = AttributeUse::Unused; + for (u32 i = 0; i < 4; i++) { + const auto index = GetAttributeIndexUse(attribute, i); + if (index == AttributeUse::Unused) { + continue; + } + if (result == AttributeUse::Unused || result == index) { + result = index; + continue; + } + LOG_CRITICAL(HW_GPU, "Generic Attribute Conflict in Interpolation Mode"); + if (index == AttributeUse::Perspective) { + result = index; + } + } + return result; + } } ps; }; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index b86265dfe..08abf8ac9 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -12,7 +12,7 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/gpu.h" -#include "video_core/rasterizer_interface.h" +#include "video_core/renderer_base.h" namespace Tegra { @@ -28,14 +28,15 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) { UNREACHABLE(); } -GPU::GPU(VideoCore::RasterizerInterface& rasterizer) { +GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer) : renderer{renderer} { + auto& rasterizer{renderer.Rasterizer()}; memory_manager = std::make_unique<Tegra::MemoryManager>(); dma_pusher = std::make_unique<Tegra::DmaPusher>(*this); - maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager); + maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager); fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager); kepler_compute = std::make_unique<Engines::KeplerCompute>(*memory_manager); - maxwell_dma = std::make_unique<Engines::MaxwellDMA>(rasterizer, *memory_manager); - kepler_memory = std::make_unique<Engines::KeplerMemory>(rasterizer, *memory_manager); + maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager); + kepler_memory = std::make_unique<Engines::KeplerMemory>(system, rasterizer, *memory_manager); } GPU::~GPU() = default; diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index a482196ea..a14b95c30 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -6,16 +6,24 @@ #include <array> #include <memory> -#include <vector> #include "common/common_types.h" #include "core/hle/service/nvflinger/buffer_queue.h" #include "video_core/dma_pusher.h" #include "video_core/memory_manager.h" -namespace VideoCore { -class RasterizerInterface; +using CacheAddr = std::uintptr_t; +inline CacheAddr ToCacheAddr(const void* host_ptr) { + return reinterpret_cast<CacheAddr>(host_ptr); +} + +namespace Core { +class System; } +namespace VideoCore { +class RendererBase; +} // namespace VideoCore + namespace Tegra { enum class RenderTargetFormat : u32 { @@ -97,7 +105,7 @@ struct FramebufferConfig { using TransformFlags = Service::NVFlinger::BufferQueue::BufferTransformFlags; TransformFlags transform_flags; - MathUtil::Rectangle<int> crop_rect; + Common::Rectangle<int> crop_rect; }; namespace Engines { @@ -116,10 +124,11 @@ enum class EngineID { MAXWELL_DMA_COPY_A = 0xB0B5, }; -class GPU final { +class GPU { public: - explicit GPU(VideoCore::RasterizerInterface& rasterizer); - ~GPU(); + explicit GPU(Core::System& system, VideoCore::RendererBase& renderer); + + virtual ~GPU(); struct MethodCall { u32 method{}; @@ -197,8 +206,42 @@ public: }; } regs{}; + /// Push GPU command entries to be processed + virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0; + + /// Swap buffers (render frame) + virtual void SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0; + + /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory + virtual void FlushRegion(CacheAddr addr, u64 size) = 0; + + /// Notify rasterizer that any caches of the specified region should be invalidated + virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0; + + /// Notify rasterizer that any caches of the specified region should be flushed and invalidated + virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0; + private: + void ProcessBindMethod(const MethodCall& method_call); + void ProcessSemaphoreTriggerMethod(); + void ProcessSemaphoreRelease(); + void ProcessSemaphoreAcquire(); + + /// Calls a GPU puller method. + void CallPullerMethod(const MethodCall& method_call); + + /// Calls a GPU engine method. + void CallEngineMethod(const MethodCall& method_call); + + /// Determines where the method should be executed. + bool ExecuteMethodOnEngine(const MethodCall& method_call); + +protected: std::unique_ptr<Tegra::DmaPusher> dma_pusher; + VideoCore::RendererBase& renderer; + +private: std::unique_ptr<Tegra::MemoryManager> memory_manager; /// Mapping of command subchannels to their bound engine ids. @@ -214,18 +257,6 @@ private: std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; /// Inline memory engine std::unique_ptr<Engines::KeplerMemory> kepler_memory; - - void ProcessBindMethod(const MethodCall& method_call); - void ProcessSemaphoreTriggerMethod(); - void ProcessSemaphoreRelease(); - void ProcessSemaphoreAcquire(); - - // Calls a GPU puller method. - void CallPullerMethod(const MethodCall& method_call); - // Calls a GPU engine method. - void CallEngineMethod(const MethodCall& method_call); - // Determines where the method should be executed. - bool ExecuteMethodOnEngine(const MethodCall& method_call); }; #define ASSERT_REG_POSITION(field_name, position) \ diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp new file mode 100644 index 000000000..8b355cf7b --- /dev/null +++ b/src/video_core/gpu_asynch.cpp @@ -0,0 +1,37 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "video_core/gpu_asynch.h" +#include "video_core/gpu_thread.h" +#include "video_core/renderer_base.h" + +namespace VideoCommon { + +GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer) + : Tegra::GPU(system, renderer), gpu_thread{renderer, *dma_pusher} {} + +GPUAsynch::~GPUAsynch() = default; + +void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) { + gpu_thread.SubmitList(std::move(entries)); +} + +void GPUAsynch::SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { + gpu_thread.SwapBuffers(std::move(framebuffer)); +} + +void GPUAsynch::FlushRegion(CacheAddr addr, u64 size) { + gpu_thread.FlushRegion(addr, size); +} + +void GPUAsynch::InvalidateRegion(CacheAddr addr, u64 size) { + gpu_thread.InvalidateRegion(addr, size); +} + +void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { + gpu_thread.FlushAndInvalidateRegion(addr, size); +} + +} // namespace VideoCommon diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h new file mode 100644 index 000000000..1dcc61a6c --- /dev/null +++ b/src/video_core/gpu_asynch.h @@ -0,0 +1,37 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "video_core/gpu.h" +#include "video_core/gpu_thread.h" + +namespace VideoCore { +class RendererBase; +} // namespace VideoCore + +namespace VideoCommon { + +namespace GPUThread { +class ThreadManager; +} // namespace GPUThread + +/// Implementation of GPU interface that runs the GPU asynchronously +class GPUAsynch : public Tegra::GPU { +public: + explicit GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer); + ~GPUAsynch() override; + + void PushGPUEntries(Tegra::CommandList&& entries) override; + void SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; + void FlushRegion(CacheAddr addr, u64 size) override; + void InvalidateRegion(CacheAddr addr, u64 size) override; + void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; + +private: + GPUThread::ThreadManager gpu_thread; +}; + +} // namespace VideoCommon diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp new file mode 100644 index 000000000..2cfc900ed --- /dev/null +++ b/src/video_core/gpu_synch.cpp @@ -0,0 +1,37 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "video_core/gpu_synch.h" +#include "video_core/renderer_base.h" + +namespace VideoCommon { + +GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer) + : Tegra::GPU(system, renderer) {} + +GPUSynch::~GPUSynch() = default; + +void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) { + dma_pusher->Push(std::move(entries)); + dma_pusher->DispatchCalls(); +} + +void GPUSynch::SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { + renderer.SwapBuffers(std::move(framebuffer)); +} + +void GPUSynch::FlushRegion(CacheAddr addr, u64 size) { + renderer.Rasterizer().FlushRegion(addr, size); +} + +void GPUSynch::InvalidateRegion(CacheAddr addr, u64 size) { + renderer.Rasterizer().InvalidateRegion(addr, size); +} + +void GPUSynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { + renderer.Rasterizer().FlushAndInvalidateRegion(addr, size); +} + +} // namespace VideoCommon diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h new file mode 100644 index 000000000..766b5631c --- /dev/null +++ b/src/video_core/gpu_synch.h @@ -0,0 +1,29 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "video_core/gpu.h" + +namespace VideoCore { +class RendererBase; +} // namespace VideoCore + +namespace VideoCommon { + +/// Implementation of GPU interface that runs the GPU synchronously +class GPUSynch : public Tegra::GPU { +public: + explicit GPUSynch(Core::System& system, VideoCore::RendererBase& renderer); + ~GPUSynch() override; + + void PushGPUEntries(Tegra::CommandList&& entries) override; + void SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; + void FlushRegion(CacheAddr addr, u64 size) override; + void InvalidateRegion(CacheAddr addr, u64 size) override; + void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; +}; + +} // namespace VideoCommon diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp new file mode 100644 index 000000000..086b2f625 --- /dev/null +++ b/src/video_core/gpu_thread.cpp @@ -0,0 +1,98 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/microprofile.h" +#include "core/frontend/scope_acquire_window_context.h" +#include "video_core/dma_pusher.h" +#include "video_core/gpu.h" +#include "video_core/gpu_thread.h" +#include "video_core/renderer_base.h" + +namespace VideoCommon::GPUThread { + +/// Runs the GPU thread +static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher, + SynchState& state) { + MicroProfileOnThreadCreate("GpuThread"); + + // Wait for first GPU command before acquiring the window context + state.WaitForCommands(); + + // If emulation was stopped during disk shader loading, abort before trying to acquire context + if (!state.is_running) { + return; + } + + Core::Frontend::ScopeAcquireWindowContext acquire_context{renderer.GetRenderWindow()}; + + CommandDataContainer next; + while (state.is_running) { + state.WaitForCommands(); + while (!state.queue.Empty()) { + state.queue.Pop(next); + if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) { + dma_pusher.Push(std::move(submit_list->entries)); + dma_pusher.DispatchCalls(); + } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { + state.DecrementFramesCounter(); + renderer.SwapBuffers(std::move(data->framebuffer)); + } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { + renderer.Rasterizer().FlushRegion(data->addr, data->size); + } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { + renderer.Rasterizer().InvalidateRegion(data->addr, data->size); + } else if (const auto data = std::get_if<EndProcessingCommand>(&next.data)) { + return; + } else { + UNREACHABLE(); + } + } + } +} + +ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) + : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), + std::ref(dma_pusher), std::ref(state)} {} + +ThreadManager::~ThreadManager() { + // Notify GPU thread that a shutdown is pending + PushCommand(EndProcessingCommand()); + thread.join(); +} + +void ThreadManager::SubmitList(Tegra::CommandList&& entries) { + PushCommand(SubmitListCommand(std::move(entries))); +} + +void ThreadManager::SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { + state.IncrementFramesCounter(); + PushCommand(SwapBuffersCommand(std::move(framebuffer))); + state.WaitForFrames(); +} + +void ThreadManager::FlushRegion(CacheAddr addr, u64 size) { + PushCommand(FlushRegionCommand(addr, size)); +} + +void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) { + if (state.queue.Empty()) { + // It's quicker to invalidate a single region on the CPU if the queue is already empty + renderer.Rasterizer().InvalidateRegion(addr, size); + } else { + PushCommand(InvalidateRegionCommand(addr, size)); + } +} + +void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { + // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important + InvalidateRegion(addr, size); +} + +void ThreadManager::PushCommand(CommandData&& command_data) { + state.queue.Push(CommandDataContainer(std::move(command_data))); + state.SignalCommands(); +} + +} // namespace VideoCommon::GPUThread diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h new file mode 100644 index 000000000..8cd7db1c6 --- /dev/null +++ b/src/video_core/gpu_thread.h @@ -0,0 +1,185 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <atomic> +#include <condition_variable> +#include <memory> +#include <mutex> +#include <optional> +#include <thread> +#include <variant> + +#include "common/threadsafe_queue.h" +#include "video_core/gpu.h" + +namespace Tegra { +struct FramebufferConfig; +class DmaPusher; +} // namespace Tegra + +namespace VideoCore { +class RendererBase; +} // namespace VideoCore + +namespace VideoCommon::GPUThread { + +/// Command to signal to the GPU thread that processing has ended +struct EndProcessingCommand final {}; + +/// Command to signal to the GPU thread that a command list is ready for processing +struct SubmitListCommand final { + explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {} + + Tegra::CommandList entries; +}; + +/// Command to signal to the GPU thread that a swap buffers is pending +struct SwapBuffersCommand final { + explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer) + : framebuffer{std::move(framebuffer)} {} + + std::optional<Tegra::FramebufferConfig> framebuffer; +}; + +/// Command to signal to the GPU thread to flush a region +struct FlushRegionCommand final { + explicit constexpr FlushRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {} + + CacheAddr addr; + u64 size; +}; + +/// Command to signal to the GPU thread to invalidate a region +struct InvalidateRegionCommand final { + explicit constexpr InvalidateRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {} + + CacheAddr addr; + u64 size; +}; + +/// Command to signal to the GPU thread to flush and invalidate a region +struct FlushAndInvalidateRegionCommand final { + explicit constexpr FlushAndInvalidateRegionCommand(CacheAddr addr, u64 size) + : addr{addr}, size{size} {} + + CacheAddr addr; + u64 size; +}; + +using CommandData = + std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand, + InvalidateRegionCommand, FlushAndInvalidateRegionCommand>; + +struct CommandDataContainer { + CommandDataContainer() = default; + + CommandDataContainer(CommandData&& data) : data{std::move(data)} {} + + CommandDataContainer& operator=(const CommandDataContainer& t) { + data = std::move(t.data); + return *this; + } + + CommandData data; +}; + +/// Struct used to synchronize the GPU thread +struct SynchState final { + std::atomic_bool is_running{true}; + std::atomic_int queued_frame_count{}; + std::mutex frames_mutex; + std::mutex commands_mutex; + std::condition_variable commands_condition; + std::condition_variable frames_condition; + + void IncrementFramesCounter() { + std::lock_guard<std::mutex> lock{frames_mutex}; + ++queued_frame_count; + } + + void DecrementFramesCounter() { + { + std::lock_guard<std::mutex> lock{frames_mutex}; + --queued_frame_count; + + if (queued_frame_count) { + return; + } + } + frames_condition.notify_one(); + } + + void WaitForFrames() { + { + std::lock_guard<std::mutex> lock{frames_mutex}; + if (!queued_frame_count) { + return; + } + } + + // Wait for the GPU to be idle (all commands to be executed) + { + std::unique_lock<std::mutex> lock{frames_mutex}; + frames_condition.wait(lock, [this] { return !queued_frame_count; }); + } + } + + void SignalCommands() { + { + std::unique_lock<std::mutex> lock{commands_mutex}; + if (queue.Empty()) { + return; + } + } + + commands_condition.notify_one(); + } + + void WaitForCommands() { + std::unique_lock<std::mutex> lock{commands_mutex}; + commands_condition.wait(lock, [this] { return !queue.Empty(); }); + } + + using CommandQueue = Common::SPSCQueue<CommandDataContainer>; + CommandQueue queue; +}; + +/// Class used to manage the GPU thread +class ThreadManager final { +public: + explicit ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher); + ~ThreadManager(); + + /// Push GPU command entries to be processed + void SubmitList(Tegra::CommandList&& entries); + + /// Swap buffers (render frame) + void SwapBuffers( + std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer); + + /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory + void FlushRegion(CacheAddr addr, u64 size); + + /// Notify rasterizer that any caches of the specified region should be invalidated + void InvalidateRegion(CacheAddr addr, u64 size); + + /// Notify rasterizer that any caches of the specified region should be flushed and invalidated + void FlushAndInvalidateRegion(CacheAddr addr, u64 size); + +private: + /// Pushes a command to be executed by the GPU thread + void PushCommand(CommandData&& command_data); + +private: + SynchState state; + VideoCore::RendererBase& renderer; + Tegra::DmaPusher& dma_pusher; + std::thread thread; + std::thread::id thread_id; +}; + +} // namespace VideoCommon::GPUThread diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp index b68f4fb13..9692ce143 100644 --- a/src/video_core/morton.cpp +++ b/src/video_core/morton.cpp @@ -16,12 +16,12 @@ namespace VideoCore { using Surface::GetBytesPerPixel; using Surface::PixelFormat; -using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u32, u8*, std::size_t, VAddr); +using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u32, u8*, VAddr); using ConversionArray = std::array<MortonCopyFn, Surface::MaxPixelFormat>; template <bool morton_to_linear, PixelFormat format> static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, - u32 tile_width_spacing, u8* buffer, std::size_t buffer_size, VAddr addr) { + u32 tile_width_spacing, u8* buffer, VAddr addr) { constexpr u32 bytes_per_pixel = GetBytesPerPixel(format); // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual @@ -42,142 +42,138 @@ static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth } static constexpr ConversionArray morton_to_linear_fns = { - // clang-format off - MortonCopy<true, PixelFormat::ABGR8U>, - MortonCopy<true, PixelFormat::ABGR8S>, - MortonCopy<true, PixelFormat::ABGR8UI>, - MortonCopy<true, PixelFormat::B5G6R5U>, - MortonCopy<true, PixelFormat::A2B10G10R10U>, - MortonCopy<true, PixelFormat::A1B5G5R5U>, - MortonCopy<true, PixelFormat::R8U>, - MortonCopy<true, PixelFormat::R8UI>, - MortonCopy<true, PixelFormat::RGBA16F>, - MortonCopy<true, PixelFormat::RGBA16U>, - MortonCopy<true, PixelFormat::RGBA16UI>, - MortonCopy<true, PixelFormat::R11FG11FB10F>, - MortonCopy<true, PixelFormat::RGBA32UI>, - MortonCopy<true, PixelFormat::DXT1>, - MortonCopy<true, PixelFormat::DXT23>, - MortonCopy<true, PixelFormat::DXT45>, - MortonCopy<true, PixelFormat::DXN1>, - MortonCopy<true, PixelFormat::DXN2UNORM>, - MortonCopy<true, PixelFormat::DXN2SNORM>, - MortonCopy<true, PixelFormat::BC7U>, - MortonCopy<true, PixelFormat::BC6H_UF16>, - MortonCopy<true, PixelFormat::BC6H_SF16>, - MortonCopy<true, PixelFormat::ASTC_2D_4X4>, - MortonCopy<true, PixelFormat::BGRA8>, - MortonCopy<true, PixelFormat::RGBA32F>, - MortonCopy<true, PixelFormat::RG32F>, - MortonCopy<true, PixelFormat::R32F>, - MortonCopy<true, PixelFormat::R16F>, - MortonCopy<true, PixelFormat::R16U>, - MortonCopy<true, PixelFormat::R16S>, - MortonCopy<true, PixelFormat::R16UI>, - MortonCopy<true, PixelFormat::R16I>, - MortonCopy<true, PixelFormat::RG16>, - MortonCopy<true, PixelFormat::RG16F>, - MortonCopy<true, PixelFormat::RG16UI>, - MortonCopy<true, PixelFormat::RG16I>, - MortonCopy<true, PixelFormat::RG16S>, - MortonCopy<true, PixelFormat::RGB32F>, - MortonCopy<true, PixelFormat::RGBA8_SRGB>, - MortonCopy<true, PixelFormat::RG8U>, - MortonCopy<true, PixelFormat::RG8S>, - MortonCopy<true, PixelFormat::RG32UI>, - MortonCopy<true, PixelFormat::R32UI>, - MortonCopy<true, PixelFormat::ASTC_2D_8X8>, - MortonCopy<true, PixelFormat::ASTC_2D_8X5>, - MortonCopy<true, PixelFormat::ASTC_2D_5X4>, - MortonCopy<true, PixelFormat::BGRA8_SRGB>, - MortonCopy<true, PixelFormat::DXT1_SRGB>, - MortonCopy<true, PixelFormat::DXT23_SRGB>, - MortonCopy<true, PixelFormat::DXT45_SRGB>, - MortonCopy<true, PixelFormat::BC7U_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_5X5>, - MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_10X8>, - MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>, - MortonCopy<true, PixelFormat::Z32F>, - MortonCopy<true, PixelFormat::Z16>, - MortonCopy<true, PixelFormat::Z24S8>, - MortonCopy<true, PixelFormat::S8Z24>, - MortonCopy<true, PixelFormat::Z32FS8>, - // clang-format on + MortonCopy<true, PixelFormat::ABGR8U>, + MortonCopy<true, PixelFormat::ABGR8S>, + MortonCopy<true, PixelFormat::ABGR8UI>, + MortonCopy<true, PixelFormat::B5G6R5U>, + MortonCopy<true, PixelFormat::A2B10G10R10U>, + MortonCopy<true, PixelFormat::A1B5G5R5U>, + MortonCopy<true, PixelFormat::R8U>, + MortonCopy<true, PixelFormat::R8UI>, + MortonCopy<true, PixelFormat::RGBA16F>, + MortonCopy<true, PixelFormat::RGBA16U>, + MortonCopy<true, PixelFormat::RGBA16UI>, + MortonCopy<true, PixelFormat::R11FG11FB10F>, + MortonCopy<true, PixelFormat::RGBA32UI>, + MortonCopy<true, PixelFormat::DXT1>, + MortonCopy<true, PixelFormat::DXT23>, + MortonCopy<true, PixelFormat::DXT45>, + MortonCopy<true, PixelFormat::DXN1>, + MortonCopy<true, PixelFormat::DXN2UNORM>, + MortonCopy<true, PixelFormat::DXN2SNORM>, + MortonCopy<true, PixelFormat::BC7U>, + MortonCopy<true, PixelFormat::BC6H_UF16>, + MortonCopy<true, PixelFormat::BC6H_SF16>, + MortonCopy<true, PixelFormat::ASTC_2D_4X4>, + MortonCopy<true, PixelFormat::BGRA8>, + MortonCopy<true, PixelFormat::RGBA32F>, + MortonCopy<true, PixelFormat::RG32F>, + MortonCopy<true, PixelFormat::R32F>, + MortonCopy<true, PixelFormat::R16F>, + MortonCopy<true, PixelFormat::R16U>, + MortonCopy<true, PixelFormat::R16S>, + MortonCopy<true, PixelFormat::R16UI>, + MortonCopy<true, PixelFormat::R16I>, + MortonCopy<true, PixelFormat::RG16>, + MortonCopy<true, PixelFormat::RG16F>, + MortonCopy<true, PixelFormat::RG16UI>, + MortonCopy<true, PixelFormat::RG16I>, + MortonCopy<true, PixelFormat::RG16S>, + MortonCopy<true, PixelFormat::RGB32F>, + MortonCopy<true, PixelFormat::RGBA8_SRGB>, + MortonCopy<true, PixelFormat::RG8U>, + MortonCopy<true, PixelFormat::RG8S>, + MortonCopy<true, PixelFormat::RG32UI>, + MortonCopy<true, PixelFormat::R32UI>, + MortonCopy<true, PixelFormat::ASTC_2D_8X8>, + MortonCopy<true, PixelFormat::ASTC_2D_8X5>, + MortonCopy<true, PixelFormat::ASTC_2D_5X4>, + MortonCopy<true, PixelFormat::BGRA8_SRGB>, + MortonCopy<true, PixelFormat::DXT1_SRGB>, + MortonCopy<true, PixelFormat::DXT23_SRGB>, + MortonCopy<true, PixelFormat::DXT45_SRGB>, + MortonCopy<true, PixelFormat::BC7U_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_5X5>, + MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_10X8>, + MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>, + MortonCopy<true, PixelFormat::Z32F>, + MortonCopy<true, PixelFormat::Z16>, + MortonCopy<true, PixelFormat::Z24S8>, + MortonCopy<true, PixelFormat::S8Z24>, + MortonCopy<true, PixelFormat::Z32FS8>, }; static constexpr ConversionArray linear_to_morton_fns = { - // clang-format off - MortonCopy<false, PixelFormat::ABGR8U>, - MortonCopy<false, PixelFormat::ABGR8S>, - MortonCopy<false, PixelFormat::ABGR8UI>, - MortonCopy<false, PixelFormat::B5G6R5U>, - MortonCopy<false, PixelFormat::A2B10G10R10U>, - MortonCopy<false, PixelFormat::A1B5G5R5U>, - MortonCopy<false, PixelFormat::R8U>, - MortonCopy<false, PixelFormat::R8UI>, - MortonCopy<false, PixelFormat::RGBA16F>, - MortonCopy<false, PixelFormat::RGBA16U>, - MortonCopy<false, PixelFormat::RGBA16UI>, - MortonCopy<false, PixelFormat::R11FG11FB10F>, - MortonCopy<false, PixelFormat::RGBA32UI>, - MortonCopy<false, PixelFormat::DXT1>, - MortonCopy<false, PixelFormat::DXT23>, - MortonCopy<false, PixelFormat::DXT45>, - MortonCopy<false, PixelFormat::DXN1>, - MortonCopy<false, PixelFormat::DXN2UNORM>, - MortonCopy<false, PixelFormat::DXN2SNORM>, - MortonCopy<false, PixelFormat::BC7U>, - MortonCopy<false, PixelFormat::BC6H_UF16>, - MortonCopy<false, PixelFormat::BC6H_SF16>, - // TODO(Subv): Swizzling ASTC formats are not supported - nullptr, - MortonCopy<false, PixelFormat::BGRA8>, - MortonCopy<false, PixelFormat::RGBA32F>, - MortonCopy<false, PixelFormat::RG32F>, - MortonCopy<false, PixelFormat::R32F>, - MortonCopy<false, PixelFormat::R16F>, - MortonCopy<false, PixelFormat::R16U>, - MortonCopy<false, PixelFormat::R16S>, - MortonCopy<false, PixelFormat::R16UI>, - MortonCopy<false, PixelFormat::R16I>, - MortonCopy<false, PixelFormat::RG16>, - MortonCopy<false, PixelFormat::RG16F>, - MortonCopy<false, PixelFormat::RG16UI>, - MortonCopy<false, PixelFormat::RG16I>, - MortonCopy<false, PixelFormat::RG16S>, - MortonCopy<false, PixelFormat::RGB32F>, - MortonCopy<false, PixelFormat::RGBA8_SRGB>, - MortonCopy<false, PixelFormat::RG8U>, - MortonCopy<false, PixelFormat::RG8S>, - MortonCopy<false, PixelFormat::RG32UI>, - MortonCopy<false, PixelFormat::R32UI>, - nullptr, - nullptr, - nullptr, - MortonCopy<false, PixelFormat::BGRA8_SRGB>, - MortonCopy<false, PixelFormat::DXT1_SRGB>, - MortonCopy<false, PixelFormat::DXT23_SRGB>, - MortonCopy<false, PixelFormat::DXT45_SRGB>, - MortonCopy<false, PixelFormat::BC7U_SRGB>, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - MortonCopy<false, PixelFormat::Z32F>, - MortonCopy<false, PixelFormat::Z16>, - MortonCopy<false, PixelFormat::Z24S8>, - MortonCopy<false, PixelFormat::S8Z24>, - MortonCopy<false, PixelFormat::Z32FS8>, - // clang-format on + MortonCopy<false, PixelFormat::ABGR8U>, + MortonCopy<false, PixelFormat::ABGR8S>, + MortonCopy<false, PixelFormat::ABGR8UI>, + MortonCopy<false, PixelFormat::B5G6R5U>, + MortonCopy<false, PixelFormat::A2B10G10R10U>, + MortonCopy<false, PixelFormat::A1B5G5R5U>, + MortonCopy<false, PixelFormat::R8U>, + MortonCopy<false, PixelFormat::R8UI>, + MortonCopy<false, PixelFormat::RGBA16F>, + MortonCopy<false, PixelFormat::RGBA16U>, + MortonCopy<false, PixelFormat::RGBA16UI>, + MortonCopy<false, PixelFormat::R11FG11FB10F>, + MortonCopy<false, PixelFormat::RGBA32UI>, + MortonCopy<false, PixelFormat::DXT1>, + MortonCopy<false, PixelFormat::DXT23>, + MortonCopy<false, PixelFormat::DXT45>, + MortonCopy<false, PixelFormat::DXN1>, + MortonCopy<false, PixelFormat::DXN2UNORM>, + MortonCopy<false, PixelFormat::DXN2SNORM>, + MortonCopy<false, PixelFormat::BC7U>, + MortonCopy<false, PixelFormat::BC6H_UF16>, + MortonCopy<false, PixelFormat::BC6H_SF16>, + // TODO(Subv): Swizzling ASTC formats are not supported + nullptr, + MortonCopy<false, PixelFormat::BGRA8>, + MortonCopy<false, PixelFormat::RGBA32F>, + MortonCopy<false, PixelFormat::RG32F>, + MortonCopy<false, PixelFormat::R32F>, + MortonCopy<false, PixelFormat::R16F>, + MortonCopy<false, PixelFormat::R16U>, + MortonCopy<false, PixelFormat::R16S>, + MortonCopy<false, PixelFormat::R16UI>, + MortonCopy<false, PixelFormat::R16I>, + MortonCopy<false, PixelFormat::RG16>, + MortonCopy<false, PixelFormat::RG16F>, + MortonCopy<false, PixelFormat::RG16UI>, + MortonCopy<false, PixelFormat::RG16I>, + MortonCopy<false, PixelFormat::RG16S>, + MortonCopy<false, PixelFormat::RGB32F>, + MortonCopy<false, PixelFormat::RGBA8_SRGB>, + MortonCopy<false, PixelFormat::RG8U>, + MortonCopy<false, PixelFormat::RG8S>, + MortonCopy<false, PixelFormat::RG32UI>, + MortonCopy<false, PixelFormat::R32UI>, + nullptr, + nullptr, + nullptr, + MortonCopy<false, PixelFormat::BGRA8_SRGB>, + MortonCopy<false, PixelFormat::DXT1_SRGB>, + MortonCopy<false, PixelFormat::DXT23_SRGB>, + MortonCopy<false, PixelFormat::DXT45_SRGB>, + MortonCopy<false, PixelFormat::BC7U_SRGB>, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + MortonCopy<false, PixelFormat::Z32F>, + MortonCopy<false, PixelFormat::Z16>, + MortonCopy<false, PixelFormat::Z24S8>, + MortonCopy<false, PixelFormat::S8Z24>, + MortonCopy<false, PixelFormat::Z32FS8>, }; static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFormat format) { @@ -191,45 +187,6 @@ static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFor return morton_to_linear_fns[static_cast<std::size_t>(format)]; } -/// 8x8 Z-Order coordinate from 2D coordinates -static u32 MortonInterleave(u32 x, u32 y) { - static const u32 xlut[] = {0x00, 0x01, 0x04, 0x05, 0x10, 0x11, 0x14, 0x15}; - static const u32 ylut[] = {0x00, 0x02, 0x08, 0x0a, 0x20, 0x22, 0x28, 0x2a}; - return xlut[x % 8] + ylut[y % 8]; -} - -/// Calculates the offset of the position of the pixel in Morton order -static u32 GetMortonOffset(u32 x, u32 y, u32 bytes_per_pixel) { - // Images are split into 8x8 tiles. Each tile is composed of four 4x4 subtiles each - // of which is composed of four 2x2 subtiles each of which is composed of four texels. - // Each structure is embedded into the next-bigger one in a diagonal pattern, e.g. - // texels are laid out in a 2x2 subtile like this: - // 2 3 - // 0 1 - // - // The full 8x8 tile has the texels arranged like this: - // - // 42 43 46 47 58 59 62 63 - // 40 41 44 45 56 57 60 61 - // 34 35 38 39 50 51 54 55 - // 32 33 36 37 48 49 52 53 - // 10 11 14 15 26 27 30 31 - // 08 09 12 13 24 25 28 29 - // 02 03 06 07 18 19 22 23 - // 00 01 04 05 16 17 20 21 - // - // This pattern is what's called Z-order curve, or Morton order. - - const unsigned int block_height = 8; - const unsigned int coarse_x = x & ~7; - - u32 i = MortonInterleave(x, y); - - const unsigned int offset = coarse_x * block_height; - - return (i + offset) * bytes_per_pixel; -} - static u32 MortonInterleave128(u32 x, u32 y) { // 128x128 Z-Order coordinate from 2D coordinates static constexpr u32 xlut[] = { @@ -325,14 +282,14 @@ static u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) { void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, - u8* buffer, std::size_t buffer_size, VAddr addr) { - + u8* buffer, VAddr addr) { GetSwizzleFunction(mode, format)(stride, block_height, height, block_depth, depth, - tile_width_spacing, buffer, buffer_size, addr); + tile_width_spacing, buffer, addr); } -void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel, - u8* morton_data, u8* linear_data, bool morton_to_linear) { +void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel, + u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data) { + const bool morton_to_linear = mode == MortonSwizzleMode::MortonToLinear; u8* data_ptrs[2]; for (u32 y = 0; y < height; ++y) { for (u32 x = 0; x < width; ++x) { diff --git a/src/video_core/morton.h b/src/video_core/morton.h index 065f59ce3..b565204b5 100644 --- a/src/video_core/morton.h +++ b/src/video_core/morton.h @@ -13,9 +13,9 @@ enum class MortonSwizzleMode { MortonToLinear, LinearToMorton }; void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat format, u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, - u8* buffer, std::size_t buffer_size, VAddr addr); + u8* buffer, VAddr addr); -void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel, - u8* morton_data, u8* linear_data, bool morton_to_linear); +void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel, + u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data); } // namespace VideoCore diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index bcf0c15a4..ecd9986a0 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h @@ -4,6 +4,7 @@ #pragma once +#include <mutex> #include <set> #include <unordered_map> @@ -12,14 +13,26 @@ #include "common/common_types.h" #include "core/settings.h" +#include "video_core/gpu.h" #include "video_core/rasterizer_interface.h" class RasterizerCacheObject { public: + explicit RasterizerCacheObject(const u8* host_ptr) + : host_ptr{host_ptr}, cache_addr{ToCacheAddr(host_ptr)} {} + virtual ~RasterizerCacheObject(); + CacheAddr GetCacheAddr() const { + return cache_addr; + } + + const u8* GetHostPtr() const { + return host_ptr; + } + /// Gets the address of the shader in guest memory, required for cache management - virtual VAddr GetAddr() const = 0; + virtual VAddr GetCpuAddr() const = 0; /// Gets the size of the shader in guest memory, required for cache management virtual std::size_t GetSizeInBytes() const = 0; @@ -58,6 +71,8 @@ private: bool is_registered{}; ///< Whether the object is currently registered with the cache bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing + CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space + const u8* host_ptr{}; ///< Pointer to the memory backing this cached region }; template <class T> @@ -68,7 +83,9 @@ public: explicit RasterizerCache(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {} /// Write any cached resources overlapping the specified region back to memory - void FlushRegion(Tegra::GPUVAddr addr, size_t size) { + void FlushRegion(CacheAddr addr, std::size_t size) { + std::lock_guard<std::recursive_mutex> lock{mutex}; + const auto& objects{GetSortedObjectsFromRegion(addr, size)}; for (auto& object : objects) { FlushObject(object); @@ -76,7 +93,9 @@ public: } /// Mark the specified region as being invalidated - void InvalidateRegion(VAddr addr, u64 size) { + void InvalidateRegion(CacheAddr addr, u64 size) { + std::lock_guard<std::recursive_mutex> lock{mutex}; + const auto& objects{GetSortedObjectsFromRegion(addr, size)}; for (auto& object : objects) { if (!object->IsRegistered()) { @@ -89,49 +108,70 @@ public: /// Invalidates everything in the cache void InvalidateAll() { + std::lock_guard<std::recursive_mutex> lock{mutex}; + while (interval_cache.begin() != interval_cache.end()) { Unregister(*interval_cache.begin()->second.begin()); } } protected: - /// Tries to get an object from the cache with the specified address - T TryGet(VAddr addr) const { + /// Tries to get an object from the cache with the specified cache address + T TryGet(CacheAddr addr) const { const auto iter = map_cache.find(addr); if (iter != map_cache.end()) return iter->second; return nullptr; } + T TryGet(const void* addr) const { + const auto iter = map_cache.find(ToCacheAddr(addr)); + if (iter != map_cache.end()) + return iter->second; + return nullptr; + } + /// Register an object into the cache void Register(const T& object) { + std::lock_guard<std::recursive_mutex> lock{mutex}; + object->SetIsRegistered(true); interval_cache.add({GetInterval(object), ObjectSet{object}}); - map_cache.insert({object->GetAddr(), object}); - rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), 1); + map_cache.insert({object->GetCacheAddr(), object}); + rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), 1); } /// Unregisters an object from the cache void Unregister(const T& object) { - object->SetIsRegistered(false); - rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1); - // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit - if (Settings::values.use_accurate_gpu_emulation) { - FlushObject(object); - } + std::lock_guard<std::recursive_mutex> lock{mutex}; + object->SetIsRegistered(false); + rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1); interval_cache.subtract({GetInterval(object), ObjectSet{object}}); - map_cache.erase(object->GetAddr()); + map_cache.erase(object->GetCacheAddr()); } /// Returns a ticks counter used for tracking when cached objects were last modified u64 GetModifiedTicks() { + std::lock_guard<std::recursive_mutex> lock{mutex}; + return ++modified_ticks; } + /// Flushes the specified object, updating appropriate cache state as needed + void FlushObject(const T& object) { + std::lock_guard<std::recursive_mutex> lock{mutex}; + + if (!object->IsDirty()) { + return; + } + object->Flush(); + object->MarkAsModified(false, *this); + } + private: /// Returns a list of cached objects from the specified memory region, ordered by access time - std::vector<T> GetSortedObjectsFromRegion(VAddr addr, u64 size) { + std::vector<T> GetSortedObjectsFromRegion(CacheAddr addr, u64 size) { if (size == 0) { return {}; } @@ -154,27 +194,19 @@ private: return objects; } - /// Flushes the specified object, updating appropriate cache state as needed - void FlushObject(const T& object) { - if (!object->IsDirty()) { - return; - } - object->Flush(); - object->MarkAsModified(false, *this); - } - using ObjectSet = std::set<T>; - using ObjectCache = std::unordered_map<VAddr, T>; - using IntervalCache = boost::icl::interval_map<VAddr, ObjectSet>; + using ObjectCache = std::unordered_map<CacheAddr, T>; + using IntervalCache = boost::icl::interval_map<CacheAddr, ObjectSet>; using ObjectInterval = typename IntervalCache::interval_type; static auto GetInterval(const T& object) { - return ObjectInterval::right_open(object->GetAddr(), - object->GetAddr() + object->GetSizeInBytes()); + return ObjectInterval::right_open(object->GetCacheAddr(), + object->GetCacheAddr() + object->GetSizeInBytes()); } ObjectCache map_cache; IntervalCache interval_cache; ///< Cache of objects u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing VideoCore::RasterizerInterface& rasterizer; + std::recursive_mutex mutex; }; diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index b2a223705..76e292e87 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -35,20 +35,20 @@ public: virtual void FlushAll() = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - virtual void FlushRegion(VAddr addr, u64 size) = 0; + virtual void FlushRegion(CacheAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be invalidated - virtual void InvalidateRegion(VAddr addr, u64 size) = 0; + virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// and invalidated - virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; + virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0; /// Attempt to use a faster method to perform a surface copy virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst, - const MathUtil::Rectangle<u32>& src_rect, - const MathUtil::Rectangle<u32>& dst_rect) { + const Common::Rectangle<u32>& src_rect, + const Common::Rectangle<u32>& dst_rect) { return false; } @@ -63,7 +63,7 @@ public: } /// Increase/decrease the number of object in pages touching the specified region - virtual void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {} + virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {} /// Initialize disk cached resources for the game being emulated virtual void LoadDiskResources(const std::atomic_bool& stop_loading = false, diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp index 94223f45f..919d1f2d4 100644 --- a/src/video_core/renderer_base.cpp +++ b/src/video_core/renderer_base.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/logging/log.h" #include "core/frontend/emu_window.h" #include "core/settings.h" #include "video_core/renderer_base.h" diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index b3062e5ba..a4eea61a6 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -13,6 +13,11 @@ namespace OpenGL { +CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, + std::size_t alignment, u8* host_ptr) + : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ + host_ptr} {} + OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) : RasterizerCache{rasterizer}, stream_buffer(size, true) {} @@ -26,11 +31,12 @@ GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size // TODO: Figure out which size is the best for given games. cache &= size >= 2048; + const auto& host_ptr{Memory::GetPointer(*cpu_addr)}; if (cache) { - auto entry = TryGet(*cpu_addr); + auto entry = TryGet(host_ptr); if (entry) { - if (entry->size >= size && entry->alignment == alignment) { - return entry->offset; + if (entry->GetSize() >= size && entry->GetAlignment() == alignment) { + return entry->GetOffset(); } Unregister(entry); } @@ -39,17 +45,17 @@ GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size AlignBuffer(alignment); const GLintptr uploaded_offset = buffer_offset; - Memory::ReadBlock(*cpu_addr, buffer_ptr, size); + if (!host_ptr) { + return uploaded_offset; + } + std::memcpy(buffer_ptr, host_ptr, size); buffer_ptr += size; buffer_offset += size; if (cache) { - auto entry = std::make_shared<CachedBufferEntry>(); - entry->offset = uploaded_offset; - entry->size = size; - entry->alignment = alignment; - entry->addr = *cpu_addr; + auto entry = std::make_shared<CachedBufferEntry>(*cpu_addr, size, uploaded_offset, + alignment, host_ptr); Register(entry); } diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index c11acfb79..1de1f84ae 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -17,22 +17,39 @@ namespace OpenGL { class RasterizerOpenGL; -struct CachedBufferEntry final : public RasterizerCacheObject { - VAddr GetAddr() const override { - return addr; +class CachedBufferEntry final : public RasterizerCacheObject { +public: + explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, + std::size_t alignment, u8* host_ptr); + + VAddr GetCpuAddr() const override { + return cpu_addr; } std::size_t GetSizeInBytes() const override { return size; } + std::size_t GetSize() const { + return size; + } + + GLintptr GetOffset() const { + return offset; + } + + std::size_t GetAlignment() const { + return alignment; + } + // We do not have to flush this cache as things in it are never modified by us. void Flush() override {} - VAddr addr; - std::size_t size; - GLintptr offset; - std::size_t alignment; +private: + VAddr cpu_addr{}; + std::size_t size{}; + GLintptr offset{}; + std::size_t alignment{}; }; class OGLBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp index c7f32feaa..a2c509c24 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.cpp +++ b/src/video_core/renderer_opengl/gl_global_cache.cpp @@ -15,12 +15,13 @@ namespace OpenGL { -CachedGlobalRegion::CachedGlobalRegion(VAddr addr, u32 size) : addr{addr}, size{size} { +CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) + : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} { buffer.Create(); // Bind and unbind the buffer so it gets allocated by the driver glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); - LabelGLObject(GL_BUFFER, buffer.handle, addr, "GlobalMemory"); + LabelGLObject(GL_BUFFER, buffer.handle, cpu_addr, "GlobalMemory"); } void CachedGlobalRegion::Reload(u32 size_) { @@ -35,7 +36,7 @@ void CachedGlobalRegion::Reload(u32 size_) { // TODO(Rodrigo): Get rid of Memory::GetPointer with a staging buffer glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); - glBufferData(GL_SHADER_STORAGE_BUFFER, size, Memory::GetPointer(addr), GL_DYNAMIC_DRAW); + glBufferData(GL_SHADER_STORAGE_BUFFER, size, GetHostPtr(), GL_DYNAMIC_DRAW); } GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32 size) const { @@ -46,19 +47,19 @@ GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32 return search->second; } -GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(VAddr addr, u32 size) { +GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(VAddr addr, u32 size, u8* host_ptr) { GlobalRegion region{TryGetReservedGlobalRegion(addr, size)}; if (!region) { // No reserved surface available, create a new one and reserve it - region = std::make_shared<CachedGlobalRegion>(addr, size); + region = std::make_shared<CachedGlobalRegion>(addr, size, host_ptr); ReserveGlobalRegion(region); } region->Reload(size); return region; } -void GlobalRegionCacheOpenGL::ReserveGlobalRegion(const GlobalRegion& region) { - reserve[region->GetAddr()] = region; +void GlobalRegionCacheOpenGL::ReserveGlobalRegion(GlobalRegion region) { + reserve.insert_or_assign(region->GetCpuAddr(), std::move(region)); } GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer) @@ -80,11 +81,12 @@ GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion( ASSERT(actual_addr); // Look up global region in the cache based on address - GlobalRegion region = TryGet(*actual_addr); + const auto& host_ptr{Memory::GetPointer(*actual_addr)}; + GlobalRegion region{TryGet(host_ptr)}; if (!region) { // No global region found - create a new one - region = GetUncachedGlobalRegion(*actual_addr, size); + region = GetUncachedGlobalRegion(*actual_addr, size, host_ptr); Register(region); } diff --git a/src/video_core/renderer_opengl/gl_global_cache.h b/src/video_core/renderer_opengl/gl_global_cache.h index 37830bb7c..e497a0619 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.h +++ b/src/video_core/renderer_opengl/gl_global_cache.h @@ -27,15 +27,13 @@ using GlobalRegion = std::shared_ptr<CachedGlobalRegion>; class CachedGlobalRegion final : public RasterizerCacheObject { public: - explicit CachedGlobalRegion(VAddr addr, u32 size); + explicit CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr); - /// Gets the address of the shader in guest memory, required for cache management - VAddr GetAddr() const { - return addr; + VAddr GetCpuAddr() const override { + return cpu_addr; } - /// Gets the size of the shader in guest memory, required for cache management - std::size_t GetSizeInBytes() const { + std::size_t GetSizeInBytes() const override { return size; } @@ -53,9 +51,8 @@ public: } private: - VAddr addr{}; + VAddr cpu_addr{}; u32 size{}; - OGLBuffer buffer; }; @@ -69,8 +66,8 @@ public: private: GlobalRegion TryGetReservedGlobalRegion(VAddr addr, u32 size) const; - GlobalRegion GetUncachedGlobalRegion(VAddr addr, u32 size); - void ReserveGlobalRegion(const GlobalRegion& region); + GlobalRegion GetUncachedGlobalRegion(VAddr addr, u32 size, u8* host_ptr); + void ReserveGlobalRegion(GlobalRegion region); std::unordered_map<VAddr, GlobalRegion> reserve; }; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 12d876120..bb6de5477 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -102,8 +102,9 @@ struct FramebufferCacheKey { RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, ScreenInfo& info) - : res_cache{*this}, shader_cache{*this, system}, emu_window{window}, screen_info{info}, - buffer_cache(*this, STREAM_BUFFER_SIZE), global_cache{*this} { + : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, + emu_window{window}, system{system}, screen_info{info}, + buffer_cache(*this, STREAM_BUFFER_SIZE) { // Create sampler objects for (std::size_t i = 0; i < texture_samplers.size(); ++i) { texture_samplers[i].Create(); @@ -118,7 +119,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::Syst glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &uniform_buffer_alignment); - LOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!"); + LOG_DEBUG(Render_OpenGL, "Sync fixed function OpenGL state here"); CheckExtensions(); } @@ -138,7 +139,7 @@ void RasterizerOpenGL::CheckExtensions() { } GLuint RasterizerOpenGL::SetupVertexFormat() { - auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); + auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; if (!gpu.dirty_flags.vertex_attrib_format) { @@ -177,7 +178,7 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { continue; const auto& buffer = regs.vertex_array[attrib.buffer]; - LOG_TRACE(HW_GPU, + LOG_TRACE(Render_OpenGL, "vertex attrib {}, count={}, size={}, type={}, offset={}, normalize={}", index, attrib.ComponentCount(), attrib.SizeString(), attrib.TypeString(), attrib.offset.Value(), attrib.IsNormalized()); @@ -200,24 +201,24 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { } // Rebinding the VAO invalidates the vertex buffer bindings. - gpu.dirty_flags.vertex_array = 0xFFFFFFFF; + gpu.dirty_flags.vertex_array.set(); state.draw.vertex_array = vao_entry.handle; return vao_entry.handle; } void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { - auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); + auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; - if (!gpu.dirty_flags.vertex_array) + if (gpu.dirty_flags.vertex_array.none()) return; MICROPROFILE_SCOPE(OpenGL_VB); // Upload all guest vertex arrays sequentially to our buffer for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { - if (~gpu.dirty_flags.vertex_array & (1u << index)) + if (!gpu.dirty_flags.vertex_array[index]) continue; const auto& vertex_array = regs.vertex_array[index]; @@ -244,11 +245,11 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { } } - gpu.dirty_flags.vertex_array = 0; + gpu.dirty_flags.vertex_array.reset(); } DrawParameters RasterizerOpenGL::SetupDraw() { - const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); + const auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; const bool is_indexed = accelerate_draw == AccelDraw::Indexed; @@ -297,7 +298,7 @@ DrawParameters RasterizerOpenGL::SetupDraw() { void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { MICROPROFILE_SCOPE(OpenGL_Shader); - auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); + auto& gpu = system.GPU().Maxwell3D(); BaseBindings base_bindings; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; @@ -343,9 +344,8 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { shader_program_manager->UseProgrammableFragmentShader(program_handle); break; default: - LOG_CRITICAL(HW_GPU, "Unimplemented shader index={}, enable={}, offset=0x{:08X}", index, - shader_config.enable.Value(), shader_config.offset); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented shader index={}, enable={}, offset=0x{:08X}", index, + shader_config.enable.Value(), shader_config.offset); } const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage); @@ -414,7 +414,7 @@ void RasterizerOpenGL::SetupCachedFramebuffer(const FramebufferCacheKey& fbkey, } std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; std::size_t size = 0; for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { @@ -432,7 +432,7 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { } std::size_t RasterizerOpenGL::CalculateIndexBufferSize() const { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; return static_cast<std::size_t>(regs.index_array.count) * static_cast<std::size_t>(regs.index_array.FormatSizeInBytes()); @@ -449,7 +449,7 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) { return boost::make_iterator_range(map.equal_range(interval)); } -void RasterizerOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) { +void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { const u64 page_start{addr >> Memory::PAGE_BITS}; const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; @@ -488,13 +488,13 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool preserve_contents, std::optional<std::size_t> single_color_target) { MICROPROFILE_SCOPE(OpenGL_Framebuffer); - const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); + auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; const FramebufferConfigState fb_config_state{using_color_fb, using_depth_fb, preserve_contents, single_color_target}; - if (fb_config_state == current_framebuffer_config_state && gpu.dirty_flags.color_buffer == 0 && - !gpu.dirty_flags.zeta_buffer) { + if (fb_config_state == current_framebuffer_config_state && + gpu.dirty_flags.color_buffer.none() && !gpu.dirty_flags.zeta_buffer) { // Only skip if the previous ConfigureFramebuffers call was from the same kind (multiple or // single color targets). This is done because the guest registers may not change but the // host framebuffer may contain different attachments @@ -582,7 +582,7 @@ void RasterizerOpenGL::Clear() { const auto prev_state{state}; SCOPE_EXIT({ prev_state.Apply(); }); - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; bool use_color{}; bool use_depth{}; bool use_stencil{}; @@ -673,7 +673,7 @@ void RasterizerOpenGL::DrawArrays() { return; MICROPROFILE_SCOPE(OpenGL_Drawing); - auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); + auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; ConfigureFramebuffers(state); @@ -721,10 +721,10 @@ void RasterizerOpenGL::DrawArrays() { // Add space for at least 18 constant buffers buffer_size += Maxwell::MaxConstBuffers * (MaxConstbufferSize + uniform_buffer_alignment); - bool invalidate = buffer_cache.Map(buffer_size); + const bool invalidate = buffer_cache.Map(buffer_size); if (invalidate) { // As all cached buffers are invalidated, we need to recheck their state. - gpu.dirty_flags.vertex_array = 0xFFFFFFFF; + gpu.dirty_flags.vertex_array.set(); } const GLuint vao = SetupVertexFormat(); @@ -738,33 +738,21 @@ void RasterizerOpenGL::DrawArrays() { shader_program_manager->ApplyTo(state); state.Apply(); - // Execute draw call + res_cache.SignalPreDrawCall(); params.DispatchDraw(); - - // Disable scissor test - state.viewports[0].scissor.enabled = false; + res_cache.SignalPostDrawCall(); accelerate_draw = AccelDraw::Disabled; - - // Unbind textures for potential future use as framebuffer attachments - for (auto& texture_unit : state.texture_units) { - texture_unit.Unbind(); - } - state.Apply(); } void RasterizerOpenGL::FlushAll() {} -void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { +void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); - - if (Settings::values.use_accurate_gpu_emulation) { - // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit - res_cache.FlushRegion(addr, size); - } + res_cache.FlushRegion(addr, size); } -void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { +void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); res_cache.InvalidateRegion(addr, size); shader_cache.InvalidateRegion(addr, size); @@ -772,15 +760,15 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { buffer_cache.InvalidateRegion(addr, size); } -void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { +void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { FlushRegion(addr, size); InvalidateRegion(addr, size); } bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst, - const MathUtil::Rectangle<u32>& src_rect, - const MathUtil::Rectangle<u32>& dst_rect) { + const Common::Rectangle<u32>& src_rect, + const Common::Rectangle<u32>& dst_rect) { MICROPROFILE_SCOPE(OpenGL_Blits); res_cache.FermiCopySurface(src, dst, src_rect, dst_rect); return true; @@ -794,7 +782,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, MICROPROFILE_SCOPE(OpenGL_CacheManagement); - const auto& surface{res_cache.TryFindFramebufferSurface(framebuffer_addr)}; + const auto& surface{res_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))}; if (!surface) { return {}; } @@ -805,7 +793,10 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, VideoCore::Surface::PixelFormatFromGPUPixelFormat(config.pixel_format)}; ASSERT_MSG(params.width == config.width, "Framebuffer width is different"); ASSERT_MSG(params.height == config.height, "Framebuffer height is different"); - ASSERT_MSG(params.pixel_format == pixel_format, "Framebuffer pixel_format is different"); + + if (params.pixel_format != pixel_format) { + LOG_WARNING(Render_OpenGL, "Framebuffer pixel_format is different"); + } screen_info.display_texture = surface->Texture().handle; @@ -814,104 +805,87 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, void RasterizerOpenGL::SamplerInfo::Create() { sampler.Create(); - mag_filter = min_filter = Tegra::Texture::TextureFilter::Linear; - wrap_u = wrap_v = wrap_p = Tegra::Texture::WrapMode::Wrap; - uses_depth_compare = false; + mag_filter = Tegra::Texture::TextureFilter::Linear; + min_filter = Tegra::Texture::TextureFilter::Linear; + wrap_u = Tegra::Texture::WrapMode::Wrap; + wrap_v = Tegra::Texture::WrapMode::Wrap; + wrap_p = Tegra::Texture::WrapMode::Wrap; + use_depth_compare = false; depth_compare_func = Tegra::Texture::DepthCompareFunc::Never; - // default is GL_LINEAR_MIPMAP_LINEAR + // OpenGL's default is GL_LINEAR_MIPMAP_LINEAR glSamplerParameteri(sampler.handle, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - // Other attributes have correct defaults glSamplerParameteri(sampler.handle, GL_TEXTURE_COMPARE_FUNC, GL_NEVER); + + // Other attributes have correct defaults } void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntry& config) { - const GLuint s = sampler.handle; + const GLuint sampler_id = sampler.handle; if (mag_filter != config.mag_filter) { mag_filter = config.mag_filter; glSamplerParameteri( - s, GL_TEXTURE_MAG_FILTER, + sampler_id, GL_TEXTURE_MAG_FILTER, MaxwellToGL::TextureFilterMode(mag_filter, Tegra::Texture::TextureMipmapFilter::None)); } - if (min_filter != config.min_filter || mip_filter != config.mip_filter) { + if (min_filter != config.min_filter || mipmap_filter != config.mipmap_filter) { min_filter = config.min_filter; - mip_filter = config.mip_filter; - glSamplerParameteri(s, GL_TEXTURE_MIN_FILTER, - MaxwellToGL::TextureFilterMode(min_filter, mip_filter)); + mipmap_filter = config.mipmap_filter; + glSamplerParameteri(sampler_id, GL_TEXTURE_MIN_FILTER, + MaxwellToGL::TextureFilterMode(min_filter, mipmap_filter)); } if (wrap_u != config.wrap_u) { wrap_u = config.wrap_u; - glSamplerParameteri(s, GL_TEXTURE_WRAP_S, MaxwellToGL::WrapMode(wrap_u)); + glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_S, MaxwellToGL::WrapMode(wrap_u)); } if (wrap_v != config.wrap_v) { wrap_v = config.wrap_v; - glSamplerParameteri(s, GL_TEXTURE_WRAP_T, MaxwellToGL::WrapMode(wrap_v)); + glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_T, MaxwellToGL::WrapMode(wrap_v)); } if (wrap_p != config.wrap_p) { wrap_p = config.wrap_p; - glSamplerParameteri(s, GL_TEXTURE_WRAP_R, MaxwellToGL::WrapMode(wrap_p)); + glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_R, MaxwellToGL::WrapMode(wrap_p)); } - if (uses_depth_compare != (config.depth_compare_enabled == 1)) { - uses_depth_compare = (config.depth_compare_enabled == 1); - if (uses_depth_compare) { - glSamplerParameteri(s, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE); - } else { - glSamplerParameteri(s, GL_TEXTURE_COMPARE_MODE, GL_NONE); - } + if (const bool enabled = config.depth_compare_enabled == 1; use_depth_compare != enabled) { + use_depth_compare = enabled; + glSamplerParameteri(sampler_id, GL_TEXTURE_COMPARE_MODE, + use_depth_compare ? GL_COMPARE_REF_TO_TEXTURE : GL_NONE); } if (depth_compare_func != config.depth_compare_func) { depth_compare_func = config.depth_compare_func; - glSamplerParameteri(s, GL_TEXTURE_COMPARE_FUNC, + glSamplerParameteri(sampler_id, GL_TEXTURE_COMPARE_FUNC, MaxwellToGL::DepthCompareFunc(depth_compare_func)); } - GLvec4 new_border_color; - if (config.srgb_conversion) { - new_border_color[0] = config.srgb_border_color_r / 255.0f; - new_border_color[1] = config.srgb_border_color_g / 255.0f; - new_border_color[2] = config.srgb_border_color_g / 255.0f; - } else { - new_border_color[0] = config.border_color_r; - new_border_color[1] = config.border_color_g; - new_border_color[2] = config.border_color_b; - } - new_border_color[3] = config.border_color_a; - - if (border_color != new_border_color) { + if (const auto new_border_color = config.GetBorderColor(); border_color != new_border_color) { border_color = new_border_color; - glSamplerParameterfv(s, GL_TEXTURE_BORDER_COLOR, border_color.data()); + glSamplerParameterfv(sampler_id, GL_TEXTURE_BORDER_COLOR, border_color.data()); } - const float anisotropic_max = static_cast<float>(1 << config.max_anisotropy.Value()); - if (anisotropic_max != max_anisotropic) { - max_anisotropic = anisotropic_max; + if (const float anisotropic = config.GetMaxAnisotropy(); max_anisotropic != anisotropic) { + max_anisotropic = anisotropic; if (GLAD_GL_ARB_texture_filter_anisotropic) { - glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic); + glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic); } else if (GLAD_GL_EXT_texture_filter_anisotropic) { - glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic); + glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic); } } - const float lod_min = static_cast<float>(config.min_lod_clamp.Value()) / 256.0f; - if (lod_min != min_lod) { - min_lod = lod_min; - glSamplerParameterf(s, GL_TEXTURE_MIN_LOD, min_lod); - } - const float lod_max = static_cast<float>(config.max_lod_clamp.Value()) / 256.0f; - if (lod_max != max_lod) { - max_lod = lod_max; - glSamplerParameterf(s, GL_TEXTURE_MAX_LOD, max_lod); + if (const float min = config.GetMinLod(); min_lod != min) { + min_lod = min; + glSamplerParameterf(sampler_id, GL_TEXTURE_MIN_LOD, min_lod); } - const u32 bias = config.mip_lod_bias.Value(); - // Sign extend the 13-bit value. - constexpr u32 mask = 1U << (13 - 1); - const float bias_lod = static_cast<s32>((bias ^ mask) - mask) / 256.f; - if (lod_bias != bias_lod) { - lod_bias = bias_lod; - glSamplerParameterf(s, GL_TEXTURE_LOD_BIAS, lod_bias); + if (const float max = config.GetMaxLod(); max_lod != max) { + max_lod = max; + glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_LOD, max_lod); + } + + if (const float bias = config.GetLodBias(); lod_bias != bias) { + lod_bias = bias; + glSamplerParameterf(sampler_id, GL_TEXTURE_LOD_BIAS, lod_bias); } } @@ -919,7 +893,7 @@ void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::Shader const Shader& shader, GLuint program_handle, BaseBindings base_bindings) { MICROPROFILE_SCOPE(OpenGL_UBO); - const auto& gpu = Core::System::GetInstance().GPU(); + const auto& gpu = system.GPU(); const auto& maxwell3d = gpu.Maxwell3D(); const auto& shader_stage = maxwell3d.state.shader_stages[static_cast<std::size_t>(stage)]; const auto& entries = shader->GetShaderEntries().const_buffers; @@ -951,8 +925,8 @@ void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::Shader size = buffer.size; if (size > MaxConstbufferSize) { - LOG_CRITICAL(HW_GPU, "indirect constbuffer size {} exceeds maximum {}", size, - MaxConstbufferSize); + LOG_WARNING(Render_OpenGL, "Indirect constbuffer size {} exceeds maximum {}", size, + MaxConstbufferSize); size = MaxConstbufferSize; } } else { @@ -998,7 +972,7 @@ void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::Shade void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader, GLuint program_handle, BaseBindings base_bindings) { MICROPROFILE_SCOPE(OpenGL_Texture); - const auto& gpu = Core::System::GetInstance().GPU(); + const auto& gpu = system.GPU(); const auto& maxwell3d = gpu.Maxwell3D(); const auto& entries = shader->GetShaderEntries().samplers; @@ -1012,10 +986,9 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc); - Surface surface = res_cache.GetTextureSurface(texture, entry); - if (surface != nullptr) { + if (Surface surface = res_cache.GetTextureSurface(texture, entry); surface) { state.texture_units[current_bindpoint].texture = - entry.IsArray() ? surface->TextureLayer().handle : surface->Texture().handle; + surface->Texture(entry.IsArray()).handle; surface->UpdateSwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source, texture.tic.w_source); } else { @@ -1026,7 +999,7 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s } void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; const bool geometry_shaders_enabled = regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry)); const std::size_t viewport_count = @@ -1034,7 +1007,7 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { for (std::size_t i = 0; i < viewport_count; i++) { auto& viewport = current_state.viewports[i]; const auto& src = regs.viewports[i]; - const MathUtil::Rectangle<s32> viewport_rect{regs.viewport_transform[i].GetRect()}; + const Common::Rectangle<s32> viewport_rect{regs.viewport_transform[i].GetRect()}; viewport.x = viewport_rect.left; viewport.y = viewport_rect.bottom; viewport.width = viewport_rect.GetWidth(); @@ -1049,7 +1022,7 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { void RasterizerOpenGL::SyncClipEnabled( const std::array<bool, Maxwell::Regs::NumClipDistances>& clip_mask) { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; const std::array<bool, Maxwell::Regs::NumClipDistances> reg_state{ regs.clip_distance_enabled.c0 != 0, regs.clip_distance_enabled.c1 != 0, regs.clip_distance_enabled.c2 != 0, regs.clip_distance_enabled.c3 != 0, @@ -1066,7 +1039,7 @@ void RasterizerOpenGL::SyncClipCoef() { } void RasterizerOpenGL::SyncCullMode() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.cull.enabled = regs.cull.enabled != 0; @@ -1090,14 +1063,14 @@ void RasterizerOpenGL::SyncCullMode() { } void RasterizerOpenGL::SyncPrimitiveRestart() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.primitive_restart.enabled = regs.primitive_restart.enabled; state.primitive_restart.index = regs.primitive_restart.index; } void RasterizerOpenGL::SyncDepthTestState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.depth.test_enabled = regs.depth_test_enable != 0; state.depth.write_mask = regs.depth_write_enabled ? GL_TRUE : GL_FALSE; @@ -1109,7 +1082,7 @@ void RasterizerOpenGL::SyncDepthTestState() { } void RasterizerOpenGL::SyncStencilTestState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.stencil.test_enabled = regs.stencil_enable != 0; if (!regs.stencil_enable) { @@ -1143,7 +1116,7 @@ void RasterizerOpenGL::SyncStencilTestState() { } void RasterizerOpenGL::SyncColorMask() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; const std::size_t count = regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1; for (std::size_t i = 0; i < count; i++) { @@ -1157,18 +1130,18 @@ void RasterizerOpenGL::SyncColorMask() { } void RasterizerOpenGL::SyncMultiSampleState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.multisample_control.alpha_to_coverage = regs.multisample_control.alpha_to_coverage != 0; state.multisample_control.alpha_to_one = regs.multisample_control.alpha_to_one != 0; } void RasterizerOpenGL::SyncFragmentColorClampState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.fragment_color_clamp.enabled = regs.frag_color_clamp != 0; } void RasterizerOpenGL::SyncBlendState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.blend_color.red = regs.blend_color.r; state.blend_color.green = regs.blend_color.g; @@ -1210,7 +1183,7 @@ void RasterizerOpenGL::SyncBlendState() { } void RasterizerOpenGL::SyncLogicOpState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.logic_op.enabled = regs.logic_op.enable != 0; @@ -1224,7 +1197,7 @@ void RasterizerOpenGL::SyncLogicOpState() { } void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; const bool geometry_shaders_enabled = regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry)); const std::size_t viewport_count = @@ -1246,21 +1219,17 @@ void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) { } void RasterizerOpenGL::SyncTransformFeedback() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; - - if (regs.tfb_enabled != 0) { - LOG_CRITICAL(Render_OpenGL, "Transform feedbacks are not implemented"); - UNREACHABLE(); - } + const auto& regs = system.GPU().Maxwell3D().regs; + UNIMPLEMENTED_IF_MSG(regs.tfb_enabled != 0, "Transform feedbacks are not implemented"); } void RasterizerOpenGL::SyncPointState() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.point.size = regs.point_size; } void RasterizerOpenGL::SyncPolygonOffset() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0; state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0; state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0; @@ -1270,13 +1239,9 @@ void RasterizerOpenGL::SyncPolygonOffset() { } void RasterizerOpenGL::CheckAlphaTests() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; - - if (regs.alpha_test_enabled != 0 && regs.rt_control.count > 1) { - LOG_CRITICAL(Render_OpenGL, "Alpha Testing is enabled with Multiple Render Targets, " - "this behavior is undefined."); - UNREACHABLE(); - } + const auto& regs = system.GPU().Maxwell3D().regs; + UNIMPLEMENTED_IF_MSG(regs.alpha_test_enabled != 0 && regs.rt_control.count > 1, + "Alpha Testing is enabled with more than one rendertarget"); } } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 258d62259..30f3e8acb 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -57,17 +57,17 @@ public: void DrawArrays() override; void Clear() override; void FlushAll() override; - void FlushRegion(VAddr addr, u64 size) override; - void InvalidateRegion(VAddr addr, u64 size) override; - void FlushAndInvalidateRegion(VAddr addr, u64 size) override; + void FlushRegion(CacheAddr addr, u64 size) override; + void InvalidateRegion(CacheAddr addr, u64 size) override; + void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst, - const MathUtil::Rectangle<u32>& src_rect, - const MathUtil::Rectangle<u32>& dst_rect) override; + const Common::Rectangle<u32>& src_rect, + const Common::Rectangle<u32>& dst_rect) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; bool AccelerateDrawBatch(bool is_indexed) override; - void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) override; + void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; void LoadDiskResources(const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; @@ -94,11 +94,12 @@ private: private: Tegra::Texture::TextureFilter mag_filter = Tegra::Texture::TextureFilter::Nearest; Tegra::Texture::TextureFilter min_filter = Tegra::Texture::TextureFilter::Nearest; - Tegra::Texture::TextureMipmapFilter mip_filter = Tegra::Texture::TextureMipmapFilter::None; + Tegra::Texture::TextureMipmapFilter mipmap_filter = + Tegra::Texture::TextureMipmapFilter::None; Tegra::Texture::WrapMode wrap_u = Tegra::Texture::WrapMode::ClampToEdge; Tegra::Texture::WrapMode wrap_v = Tegra::Texture::WrapMode::ClampToEdge; Tegra::Texture::WrapMode wrap_p = Tegra::Texture::WrapMode::ClampToEdge; - bool uses_depth_compare = false; + bool use_depth_compare = false; Tegra::Texture::DepthCompareFunc depth_compare_func = Tegra::Texture::DepthCompareFunc::Always; GLvec4 border_color = {}; @@ -214,6 +215,7 @@ private: GlobalRegionCacheOpenGL global_cache; Core::Frontend::EmuWindow& emu_window; + Core::System& system; ScreenInfo& screen_info; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 59f671048..451de00e8 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include <algorithm> +#include <optional> #include <glad/glad.h> #include "common/alignment.h" @@ -20,7 +21,7 @@ #include "video_core/renderer_opengl/gl_rasterizer_cache.h" #include "video_core/renderer_opengl/utils.h" #include "video_core/surface.h" -#include "video_core/textures/astc.h" +#include "video_core/textures/convert.h" #include "video_core/textures/decoders.h" namespace OpenGL { @@ -60,6 +61,7 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { addr = cpu_addr ? *cpu_addr : 0; gpu_addr = gpu_addr_; + host_ptr = Memory::GetPointer(addr); size_in_bytes = SizeInBytesRaw(); if (IsPixelFormatASTC(pixel_format)) { @@ -399,7 +401,28 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType return format; } -MathUtil::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const { +/// Returns the discrepant array target +constexpr GLenum GetArrayDiscrepantTarget(SurfaceTarget target) { + switch (target) { + case SurfaceTarget::Texture1D: + return GL_TEXTURE_1D_ARRAY; + case SurfaceTarget::Texture2D: + return GL_TEXTURE_2D_ARRAY; + case SurfaceTarget::Texture3D: + return GL_NONE; + case SurfaceTarget::Texture1DArray: + return GL_TEXTURE_1D; + case SurfaceTarget::Texture2DArray: + return GL_TEXTURE_2D; + case SurfaceTarget::TextureCubemap: + return GL_TEXTURE_CUBE_MAP_ARRAY; + case SurfaceTarget::TextureCubeArray: + return GL_TEXTURE_CUBE_MAP; + } + return GL_NONE; +} + +Common::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const { u32 actual_height{std::max(1U, unaligned_height >> mip_level)}; if (IsPixelFormatASTC(pixel_format)) { // ASTC formats must stop at the ATSC block size boundary @@ -423,8 +446,8 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params, for (u32 i = 0; i < params.depth; i++) { MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), params.MipBlockHeight(mip_level), params.MipHeight(mip_level), - params.MipBlockDepth(mip_level), params.tile_width_spacing, 1, - gl_buffer.data() + offset_gl, gl_size, params.addr + offset); + params.MipBlockDepth(mip_level), 1, params.tile_width_spacing, + gl_buffer.data() + offset_gl, params.addr + offset); offset += layer_size; offset_gl += gl_size; } @@ -433,7 +456,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params, MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), params.MipBlockHeight(mip_level), params.MipHeight(mip_level), params.MipBlockDepth(mip_level), depth, params.tile_width_spacing, - gl_buffer.data(), gl_buffer.size(), params.addr + offset); + gl_buffer.data(), params.addr + offset); } } @@ -541,14 +564,16 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac } CachedSurface::CachedSurface(const SurfaceParams& params) - : params(params), gl_target(SurfaceTargetToGL(params.target)), - cached_size_in_bytes(params.size_in_bytes) { + : params{params}, gl_target{SurfaceTargetToGL(params.target)}, + cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} { texture.Create(gl_target); // TODO(Rodrigo): Using params.GetRect() returns a different size than using its Mip*(0) // alternatives. This signals a bug on those functions. const auto width = static_cast<GLsizei>(params.MipWidth(0)); const auto height = static_cast<GLsizei>(params.MipHeight(0)); + memory_size = params.MemorySize(); + reinterpreted = false; const auto& format_tuple = GetFormatTuple(params.pixel_format, params.component_type); gl_internal_format = format_tuple.internal_format; @@ -594,103 +619,6 @@ CachedSurface::CachedSurface(const SurfaceParams& params) } } -static void ConvertS8Z24ToZ24S8(std::vector<u8>& data, u32 width, u32 height, bool reverse) { - union S8Z24 { - BitField<0, 24, u32> z24; - BitField<24, 8, u32> s8; - }; - static_assert(sizeof(S8Z24) == 4, "S8Z24 is incorrect size"); - - union Z24S8 { - BitField<0, 8, u32> s8; - BitField<8, 24, u32> z24; - }; - static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size"); - - S8Z24 s8z24_pixel{}; - Z24S8 z24s8_pixel{}; - constexpr auto bpp{GetBytesPerPixel(PixelFormat::S8Z24)}; - for (std::size_t y = 0; y < height; ++y) { - for (std::size_t x = 0; x < width; ++x) { - const std::size_t offset{bpp * (y * width + x)}; - if (reverse) { - std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8)); - s8z24_pixel.s8.Assign(z24s8_pixel.s8); - s8z24_pixel.z24.Assign(z24s8_pixel.z24); - std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24)); - } else { - std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24)); - z24s8_pixel.s8.Assign(s8z24_pixel.s8); - z24s8_pixel.z24.Assign(s8z24_pixel.z24); - std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8)); - } - } - } -} - -/** - * Helper function to perform software conversion (as needed) when loading a buffer from Switch - * memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or with - * typical desktop GPUs. - */ -static void ConvertFormatAsNeeded_LoadGLBuffer(std::vector<u8>& data, PixelFormat pixel_format, - u32 width, u32 height, u32 depth) { - switch (pixel_format) { - case PixelFormat::ASTC_2D_4X4: - case PixelFormat::ASTC_2D_8X8: - case PixelFormat::ASTC_2D_8X5: - case PixelFormat::ASTC_2D_5X4: - case PixelFormat::ASTC_2D_5X5: - case PixelFormat::ASTC_2D_4X4_SRGB: - case PixelFormat::ASTC_2D_8X8_SRGB: - case PixelFormat::ASTC_2D_8X5_SRGB: - case PixelFormat::ASTC_2D_5X4_SRGB: - case PixelFormat::ASTC_2D_5X5_SRGB: - case PixelFormat::ASTC_2D_10X8: - case PixelFormat::ASTC_2D_10X8_SRGB: { - // Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC. - u32 block_width{}; - u32 block_height{}; - std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format); - data = - Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height); - break; - } - case PixelFormat::S8Z24: - // Convert the S8Z24 depth format to Z24S8, as OpenGL does not support S8Z24. - ConvertS8Z24ToZ24S8(data, width, height, false); - break; - } -} - -/** - * Helper function to perform software conversion (as needed) when flushing a buffer from OpenGL to - * Switch memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or - * with typical desktop GPUs. - */ -static void ConvertFormatAsNeeded_FlushGLBuffer(std::vector<u8>& data, PixelFormat pixel_format, - u32 width, u32 height) { - switch (pixel_format) { - case PixelFormat::ASTC_2D_4X4: - case PixelFormat::ASTC_2D_8X8: - case PixelFormat::ASTC_2D_4X4_SRGB: - case PixelFormat::ASTC_2D_8X8_SRGB: - case PixelFormat::ASTC_2D_5X5: - case PixelFormat::ASTC_2D_5X5_SRGB: - case PixelFormat::ASTC_2D_10X8: - case PixelFormat::ASTC_2D_10X8_SRGB: { - LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented", - static_cast<u32>(pixel_format)); - UNREACHABLE(); - break; - } - case PixelFormat::S8Z24: - // Convert the Z24S8 depth format to S8Z24, as OpenGL does not support S8Z24. - ConvertS8Z24ToZ24S8(data, width, height, true); - break; - } -} - MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64)); void CachedSurface::LoadGLBuffer() { MICROPROFILE_SCOPE(OpenGL_SurfaceLoad); @@ -706,10 +634,9 @@ void CachedSurface::LoadGLBuffer() { const u32 bpp = params.GetFormatBpp() / 8; const u32 copy_size = params.width * bpp; if (params.pitch == copy_size) { - std::memcpy(gl_buffer[0].data(), Memory::GetPointer(params.addr), - params.size_in_bytes_gl); + std::memcpy(gl_buffer[0].data(), params.host_ptr, params.size_in_bytes_gl); } else { - const u8* start = Memory::GetPointer(params.addr); + const u8* start{params.host_ptr}; u8* write_to = gl_buffer[0].data(); for (u32 h = params.height; h > 0; h--) { std::memcpy(write_to, start, copy_size); @@ -719,8 +646,16 @@ void CachedSurface::LoadGLBuffer() { } } for (u32 i = 0; i < params.max_mip_level; i++) { - ConvertFormatAsNeeded_LoadGLBuffer(gl_buffer[i], params.pixel_format, params.MipWidth(i), - params.MipHeight(i), params.MipDepth(i)); + const u32 width = params.MipWidth(i); + const u32 height = params.MipHeight(i); + const u32 depth = params.MipDepth(i); + if (VideoCore::Surface::IsPixelFormatASTC(params.pixel_format)) { + // Reserve size for RGBA8 conversion + constexpr std::size_t rgba_bpp = 4; + gl_buffer[i].resize(std::max(gl_buffer[i].size(), width * height * depth * rgba_bpp)); + } + Tegra::Texture::ConvertFromGuestToHost(gl_buffer[i].data(), params.pixel_format, width, + height, depth, true, true); } } @@ -743,10 +678,8 @@ void CachedSurface::FlushGLBuffer() { glGetTextureImage(texture.handle, 0, tuple.format, tuple.type, static_cast<GLsizei>(gl_buffer[0].size()), gl_buffer[0].data()); glPixelStorei(GL_PACK_ROW_LENGTH, 0); - ConvertFormatAsNeeded_FlushGLBuffer(gl_buffer[0], params.pixel_format, params.width, - params.height); - const u8* const texture_src_data = Memory::GetPointer(params.addr); - ASSERT(texture_src_data); + Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width, + params.height, params.depth, true, true); if (params.is_tiled) { ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", params.block_width, static_cast<u32>(params.target)); @@ -756,9 +689,9 @@ void CachedSurface::FlushGLBuffer() { const u32 bpp = params.GetFormatBpp() / 8; const u32 copy_size = params.width * bpp; if (params.pitch == copy_size) { - std::memcpy(Memory::GetPointer(params.addr), gl_buffer[0].data(), GetSizeInBytes()); + std::memcpy(params.host_ptr, gl_buffer[0].data(), GetSizeInBytes()); } else { - u8* start = Memory::GetPointer(params.addr); + u8* start{params.host_ptr}; const u8* read_to = gl_buffer[0].data(); for (u32 h = params.height; h > 0; h--) { std::memcpy(start, read_to, copy_size); @@ -881,20 +814,22 @@ void CachedSurface::UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); } -void CachedSurface::EnsureTextureView() { - if (texture_view.handle != 0) +void CachedSurface::EnsureTextureDiscrepantView() { + if (discrepant_view.handle != 0) return; - const GLenum target{TargetLayer()}; + const GLenum target{GetArrayDiscrepantTarget(params.target)}; + ASSERT(target != GL_NONE); + const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u}; constexpr GLuint min_layer = 0; constexpr GLuint min_level = 0; - glGenTextures(1, &texture_view.handle); - glTextureView(texture_view.handle, target, texture.handle, gl_internal_format, min_level, + glGenTextures(1, &discrepant_view.handle); + glTextureView(discrepant_view.handle, target, texture.handle, gl_internal_format, min_level, params.max_mip_level, min_layer, num_layers); - ApplyTextureDefaults(texture_view.handle, params.max_mip_level); - glTextureParameteriv(texture_view.handle, GL_TEXTURE_SWIZZLE_RGBA, + ApplyTextureDefaults(discrepant_view.handle, params.max_mip_level); + glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, reinterpret_cast<const GLint*>(swizzle.data())); } @@ -920,8 +855,8 @@ void CachedSurface::UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x, swizzle = {new_x, new_y, new_z, new_w}; const auto swizzle_data = reinterpret_cast<const GLint*>(swizzle.data()); glTextureParameteriv(texture.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); - if (texture_view.handle != 0) { - glTextureParameteriv(texture_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); + if (discrepant_view.handle != 0) { + glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); } } @@ -962,30 +897,31 @@ Surface RasterizerCacheOpenGL::GetColorBufferSurface(std::size_t index, bool pre auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()}; const auto& regs{gpu.regs}; - if ((gpu.dirty_flags.color_buffer & (1u << static_cast<u32>(index))) == 0) { - return last_color_buffers[index]; + if (!gpu.dirty_flags.color_buffer[index]) { + return current_color_buffers[index]; } - gpu.dirty_flags.color_buffer &= ~(1u << static_cast<u32>(index)); + gpu.dirty_flags.color_buffer.reset(index); ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); if (index >= regs.rt_control.count) { - return last_color_buffers[index] = {}; + return current_color_buffers[index] = {}; } if (regs.rt[index].Address() == 0 || regs.rt[index].format == Tegra::RenderTargetFormat::NONE) { - return last_color_buffers[index] = {}; + return current_color_buffers[index] = {}; } const SurfaceParams color_params{SurfaceParams::CreateForFramebuffer(index)}; - return last_color_buffers[index] = GetSurface(color_params, preserve_contents); + return current_color_buffers[index] = GetSurface(color_params, preserve_contents); } void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) { surface->LoadGLBuffer(); surface->UploadGLTexture(read_framebuffer.handle, draw_framebuffer.handle); surface->MarkAsModified(false, *this); + surface->MarkForReload(false); } Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) { @@ -994,21 +930,26 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres } // Look up surface in the cache based on address - Surface surface{TryGet(params.addr)}; + Surface surface{TryGet(params.host_ptr)}; if (surface) { if (surface->GetSurfaceParams().IsCompatibleSurface(params)) { - // Use the cached surface as-is + // Use the cached surface as-is unless it's not synced with memory + if (surface->MustReload()) + LoadSurface(surface); return surface; } else if (preserve_contents) { // If surface parameters changed and we care about keeping the previous data, recreate // the surface from the old one Surface new_surface{RecreateSurface(surface, params)}; - Unregister(surface); + UnregisterSurface(surface); Register(new_surface); + if (new_surface->IsUploaded()) { + RegisterReinterpretSurface(new_surface); + } return new_surface; } else { // Delete the old surface before creating a new one to prevent collisions. - Unregister(surface); + UnregisterSurface(surface); } } @@ -1043,7 +984,7 @@ void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface, for (u32 layer = 0; layer < dst_params.depth; layer++) { for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) { const VAddr sub_address = address + dst_params.GetMipmapLevelOffset(mipmap); - const Surface& copy = TryGet(sub_address); + const Surface& copy = TryGet(Memory::GetPointer(sub_address)); if (!copy) continue; const auto& src_params{copy->GetSurfaceParams()}; @@ -1062,8 +1003,8 @@ void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface, } static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface, - const MathUtil::Rectangle<u32>& src_rect, - const MathUtil::Rectangle<u32>& dst_rect, GLuint read_fb_handle, + const Common::Rectangle<u32>& src_rect, + const Common::Rectangle<u32>& dst_rect, GLuint read_fb_handle, GLuint draw_fb_handle, GLenum src_attachment = 0, GLenum dst_attachment = 0, std::size_t cubemap_face = 0) { @@ -1193,7 +1134,7 @@ static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface, void RasterizerCacheOpenGL::FermiCopySurface( const Tegra::Engines::Fermi2D::Regs::Surface& src_config, const Tegra::Engines::Fermi2D::Regs::Surface& dst_config, - const MathUtil::Rectangle<u32>& src_rect, const MathUtil::Rectangle<u32>& dst_rect) { + const Common::Rectangle<u32>& src_rect, const Common::Rectangle<u32>& dst_rect) { const auto& src_params = SurfaceParams::CreateForFermiCopySurface(src_config); const auto& dst_params = SurfaceParams::CreateForFermiCopySurface(dst_config); @@ -1220,7 +1161,8 @@ void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface, const auto& dst_params{dst_surface->GetSurfaceParams()}; // Flush enough memory for both the source and destination surface - FlushRegion(src_params.addr, std::max(src_params.MemorySize(), dst_params.MemorySize())); + FlushRegion(ToCacheAddr(src_params.host_ptr), + std::max(src_params.MemorySize(), dst_params.MemorySize())); LoadSurface(dst_surface); } @@ -1257,7 +1199,11 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, case SurfaceTarget::TextureCubemap: case SurfaceTarget::Texture2DArray: case SurfaceTarget::TextureCubeArray: - FastLayeredCopySurface(old_surface, new_surface); + if (old_params.pixel_format == new_params.pixel_format) + FastLayeredCopySurface(old_surface, new_surface); + else { + AccurateCopySurface(old_surface, new_surface); + } break; default: LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}", @@ -1268,8 +1214,8 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, return new_surface; } -Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr addr) const { - return TryGet(addr); +Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(const u8* host_ptr) const { + return TryGet(host_ptr); } void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) { @@ -1286,4 +1232,108 @@ Surface RasterizerCacheOpenGL::TryGetReservedSurface(const SurfaceParams& params return {}; } +static std::optional<u32> TryFindBestMipMap(std::size_t memory, const SurfaceParams params, + u32 height) { + for (u32 i = 0; i < params.max_mip_level; i++) { + if (memory == params.GetMipmapSingleSize(i) && params.MipHeight(i) == height) { + return {i}; + } + } + return {}; +} + +static std::optional<u32> TryFindBestLayer(VAddr addr, const SurfaceParams params, u32 mipmap) { + const std::size_t size = params.LayerMemorySize(); + VAddr start = params.addr + params.GetMipmapLevelOffset(mipmap); + for (u32 i = 0; i < params.depth; i++) { + if (start == addr) { + return {i}; + } + start += size; + } + return {}; +} + +static bool LayerFitReinterpretSurface(RasterizerCacheOpenGL& cache, const Surface render_surface, + const Surface blitted_surface) { + const auto& dst_params = blitted_surface->GetSurfaceParams(); + const auto& src_params = render_surface->GetSurfaceParams(); + const std::size_t src_memory_size = src_params.size_in_bytes; + const std::optional<u32> level = + TryFindBestMipMap(src_memory_size, dst_params, src_params.height); + if (level.has_value()) { + if (src_params.width == dst_params.MipWidthGobAligned(*level) && + src_params.height == dst_params.MipHeight(*level) && + src_params.block_height >= dst_params.MipBlockHeight(*level)) { + const std::optional<u32> slot = + TryFindBestLayer(render_surface->GetCpuAddr(), dst_params, *level); + if (slot.has_value()) { + glCopyImageSubData(render_surface->Texture().handle, + SurfaceTargetToGL(src_params.target), 0, 0, 0, 0, + blitted_surface->Texture().handle, + SurfaceTargetToGL(dst_params.target), *level, 0, 0, *slot, + dst_params.MipWidth(*level), dst_params.MipHeight(*level), 1); + blitted_surface->MarkAsModified(true, cache); + return true; + } + } + } + return false; +} + +static bool IsReinterpretInvalid(const Surface render_surface, const Surface blitted_surface) { + const VAddr bound1 = blitted_surface->GetCpuAddr() + blitted_surface->GetMemorySize(); + const VAddr bound2 = render_surface->GetCpuAddr() + render_surface->GetMemorySize(); + if (bound2 > bound1) + return true; + const auto& dst_params = blitted_surface->GetSurfaceParams(); + const auto& src_params = render_surface->GetSurfaceParams(); + return (dst_params.component_type != src_params.component_type); +} + +static bool IsReinterpretInvalidSecond(const Surface render_surface, + const Surface blitted_surface) { + const auto& dst_params = blitted_surface->GetSurfaceParams(); + const auto& src_params = render_surface->GetSurfaceParams(); + return (dst_params.height > src_params.height && dst_params.width > src_params.width); +} + +bool RasterizerCacheOpenGL::PartialReinterpretSurface(Surface triggering_surface, + Surface intersect) { + if (IsReinterpretInvalid(triggering_surface, intersect)) { + UnregisterSurface(intersect); + return false; + } + if (!LayerFitReinterpretSurface(*this, triggering_surface, intersect)) { + if (IsReinterpretInvalidSecond(triggering_surface, intersect)) { + UnregisterSurface(intersect); + return false; + } + FlushObject(intersect); + FlushObject(triggering_surface); + intersect->MarkForReload(true); + } + return true; +} + +void RasterizerCacheOpenGL::SignalPreDrawCall() { + if (texception && GLAD_GL_ARB_texture_barrier) { + glTextureBarrier(); + } + texception = false; +} + +void RasterizerCacheOpenGL::SignalPostDrawCall() { + for (u32 i = 0; i < Maxwell::NumRenderTargets; i++) { + if (current_color_buffers[i] != nullptr) { + Surface intersect = + CollideOnReinterpretedSurface(current_color_buffers[i]->GetCacheAddr()); + if (intersect != nullptr) { + PartialReinterpretSurface(current_color_buffers[i], intersect); + texception = true; + } + } + } +} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 89d733c50..b3afad139 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -28,12 +28,13 @@ namespace OpenGL { class CachedSurface; using Surface = std::shared_ptr<CachedSurface>; -using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, MathUtil::Rectangle<u32>>; +using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, Common::Rectangle<u32>>; using SurfaceTarget = VideoCore::Surface::SurfaceTarget; using SurfaceType = VideoCore::Surface::SurfaceType; using PixelFormat = VideoCore::Surface::PixelFormat; using ComponentType = VideoCore::Surface::ComponentType; +using Maxwell = Tegra::Engines::Maxwell3D::Regs; struct SurfaceParams { enum class SurfaceClass { @@ -71,7 +72,7 @@ struct SurfaceParams { } /// Returns the rectangle corresponding to this surface - MathUtil::Rectangle<u32> GetRect(u32 mip_level = 0) const; + Common::Rectangle<u32> GetRect(u32 mip_level = 0) const; /// Returns the total size of this surface in bytes, adjusted for compression std::size_t SizeInBytesRaw(bool ignore_tiled = false) const { @@ -140,10 +141,18 @@ struct SurfaceParams { return offset; } + std::size_t GetMipmapSingleSize(u32 mip_level) const { + return InnerMipmapMemorySize(mip_level, false, is_layered); + } + u32 MipWidth(u32 mip_level) const { return std::max(1U, width >> mip_level); } + u32 MipWidthGobAligned(u32 mip_level) const { + return Common::AlignUp(std::max(1U, width >> mip_level), 64U * 8U / GetFormatBpp()); + } + u32 MipHeight(u32 mip_level) const { return std::max(1U, height >> mip_level); } @@ -288,6 +297,7 @@ struct SurfaceParams { bool srgb_conversion; // Parameters used for caching VAddr addr; + u8* host_ptr; Tegra::GPUVAddr gpu_addr; std::size_t size_in_bytes; std::size_t size_in_bytes_gl; @@ -336,9 +346,9 @@ class RasterizerOpenGL; class CachedSurface final : public RasterizerCacheObject { public: - CachedSurface(const SurfaceParams& params); + explicit CachedSurface(const SurfaceParams& params); - VAddr GetAddr() const override { + VAddr GetCpuAddr() const override { return params.addr; } @@ -346,6 +356,10 @@ public: return cached_size_in_bytes; } + std::size_t GetMemorySize() const { + return memory_size; + } + void Flush() override { FlushGLBuffer(); } @@ -354,31 +368,19 @@ public: return texture; } - const OGLTexture& TextureLayer() { - if (params.is_array) { - return Texture(); + const OGLTexture& Texture(bool as_array) { + if (params.is_array == as_array) { + return texture; + } else { + EnsureTextureDiscrepantView(); + return discrepant_view; } - EnsureTextureView(); - return texture_view; } GLenum Target() const { return gl_target; } - GLenum TargetLayer() const { - using VideoCore::Surface::SurfaceTarget; - switch (params.target) { - case SurfaceTarget::Texture1D: - return GL_TEXTURE_1D_ARRAY; - case SurfaceTarget::Texture2D: - return GL_TEXTURE_2D_ARRAY; - case SurfaceTarget::TextureCubemap: - return GL_TEXTURE_CUBE_MAP_ARRAY; - } - return Target(); - } - const SurfaceParams& GetSurfaceParams() const { return params; } @@ -395,19 +397,42 @@ public: Tegra::Texture::SwizzleSource swizzle_z, Tegra::Texture::SwizzleSource swizzle_w); + void MarkReinterpreted() { + reinterpreted = true; + } + + bool IsReinterpreted() const { + return reinterpreted; + } + + void MarkForReload(bool reload) { + must_reload = reload; + } + + bool MustReload() const { + return must_reload; + } + + bool IsUploaded() const { + return params.identity == SurfaceParams::SurfaceClass::Uploaded; + } + private: void UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, GLuint draw_fb_handle); - void EnsureTextureView(); + void EnsureTextureDiscrepantView(); OGLTexture texture; - OGLTexture texture_view; + OGLTexture discrepant_view; std::vector<std::vector<u8>> gl_buffer; SurfaceParams params{}; GLenum gl_target{}; GLenum gl_internal_format{}; std::size_t cached_size_in_bytes{}; std::array<GLenum, 4> swizzle{GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA}; + std::size_t memory_size; + bool reinterpreted = false; + bool must_reload = false; }; class RasterizerCacheOpenGL final : public RasterizerCache<Surface> { @@ -425,13 +450,16 @@ public: Surface GetColorBufferSurface(std::size_t index, bool preserve_contents); /// Tries to find a framebuffer using on the provided CPU address - Surface TryFindFramebufferSurface(VAddr addr) const; + Surface TryFindFramebufferSurface(const u8* host_ptr) const; /// Copies the contents of one surface to another void FermiCopySurface(const Tegra::Engines::Fermi2D::Regs::Surface& src_config, const Tegra::Engines::Fermi2D::Regs::Surface& dst_config, - const MathUtil::Rectangle<u32>& src_rect, - const MathUtil::Rectangle<u32>& dst_rect); + const Common::Rectangle<u32>& src_rect, + const Common::Rectangle<u32>& dst_rect); + + void SignalPreDrawCall(); + void SignalPostDrawCall(); private: void LoadSurface(const Surface& surface); @@ -449,6 +477,10 @@ private: /// Tries to get a reserved surface for the specified parameters Surface TryGetReservedSurface(const SurfaceParams& params); + // Partialy reinterpret a surface based on a triggering_surface that collides with it. + // returns true if the reinterpret was successful, false in case it was not. + bool PartialReinterpretSurface(Surface triggering_surface, Surface intersect); + /// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface); void FastLayeredCopySurface(const Surface& src_surface, const Surface& dst_surface); @@ -465,12 +497,50 @@ private: OGLFramebuffer read_framebuffer; OGLFramebuffer draw_framebuffer; + bool texception = false; + /// Use a Pixel Buffer Object to download the previous texture and then upload it to the new one /// using the new format. OGLBuffer copy_pbo; - std::array<Surface, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> last_color_buffers; + std::array<Surface, Maxwell::NumRenderTargets> last_color_buffers; + std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers; Surface last_depth_buffer; + + using SurfaceIntervalCache = boost::icl::interval_map<CacheAddr, Surface>; + using SurfaceInterval = typename SurfaceIntervalCache::interval_type; + + static auto GetReinterpretInterval(const Surface& object) { + return SurfaceInterval::right_open(object->GetCacheAddr() + 1, + object->GetCacheAddr() + object->GetMemorySize() - 1); + } + + // Reinterpreted surfaces are very fragil as the game may keep rendering into them. + SurfaceIntervalCache reinterpreted_surfaces; + + void RegisterReinterpretSurface(Surface reinterpret_surface) { + auto interval = GetReinterpretInterval(reinterpret_surface); + reinterpreted_surfaces.insert({interval, reinterpret_surface}); + reinterpret_surface->MarkReinterpreted(); + } + + Surface CollideOnReinterpretedSurface(CacheAddr addr) const { + const SurfaceInterval interval{addr}; + for (auto& pair : + boost::make_iterator_range(reinterpreted_surfaces.equal_range(interval))) { + return pair.second; + } + return nullptr; + } + + /// Unregisters an object from the cache + void UnregisterSurface(const Surface& object) { + if (object->IsReinterpreted()) { + auto interval = GetReinterpretInterval(object); + reinterpreted_surfaces.erase(interval); + } + Unregister(object); + } }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 4883e4f62..60a04e146 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -42,9 +42,9 @@ VAddr GetShaderAddress(Maxwell::ShaderProgram program) { } /// Gets the shader program code from memory for the specified address -ProgramCode GetShaderCode(VAddr addr) { +ProgramCode GetShaderCode(const u8* host_ptr) { ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); - Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64)); + std::memcpy(program_code.data(), host_ptr, program_code.size() * sizeof(u64)); return program_code; } @@ -214,12 +214,13 @@ std::set<GLenum> GetSupportedFormats() { } // namespace -CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, - ShaderDiskCacheOpenGL& disk_cache, +CachedShader::CachedShader(VAddr guest_addr, u64 unique_identifier, + Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, const PrecompiledPrograms& precompiled_programs, - ProgramCode&& program_code, ProgramCode&& program_code_b) - : addr{addr}, unique_identifier{unique_identifier}, program_type{program_type}, - disk_cache{disk_cache}, precompiled_programs{precompiled_programs} { + ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) + : host_ptr{host_ptr}, guest_addr{guest_addr}, unique_identifier{unique_identifier}, + program_type{program_type}, disk_cache{disk_cache}, + precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} { const std::size_t code_size = CalculateProgramSize(program_code); const std::size_t code_size_b = @@ -243,12 +244,13 @@ CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderPro disk_cache.SaveRaw(raw); } -CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, - ShaderDiskCacheOpenGL& disk_cache, +CachedShader::CachedShader(VAddr guest_addr, u64 unique_identifier, + Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, const PrecompiledPrograms& precompiled_programs, - GLShader::ProgramResult result) - : addr{addr}, unique_identifier{unique_identifier}, program_type{program_type}, - disk_cache{disk_cache}, precompiled_programs{precompiled_programs} { + GLShader::ProgramResult result, u8* host_ptr) + : guest_addr{guest_addr}, unique_identifier{unique_identifier}, program_type{program_type}, + disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{ + host_ptr} { code = std::move(result.first); entries = result.second; @@ -271,7 +273,7 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings)); } - LabelGLObject(GL_PROGRAM, program->handle, addr); + LabelGLObject(GL_PROGRAM, program->handle, guest_addr); } handle = program->handle; @@ -323,7 +325,7 @@ GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program, BaseBind disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings)); } - LabelGLObject(GL_PROGRAM, target_program->handle, addr, debug_name); + LabelGLObject(GL_PROGRAM, target_program->handle, guest_addr, debug_name); return target_program->handle; }; @@ -489,14 +491,17 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { const VAddr program_addr{GetShaderAddress(program)}; // Look up shader in the cache based on address - Shader shader{TryGet(program_addr)}; + const auto& host_ptr{Memory::GetPointer(program_addr)}; + Shader shader{TryGet(host_ptr)}; if (!shader) { // No shader found - create a new one - ProgramCode program_code = GetShaderCode(program_addr); + const auto& host_ptr{Memory::GetPointer(program_addr)}; + ProgramCode program_code{GetShaderCode(host_ptr)}; ProgramCode program_code_b; if (program == Maxwell::ShaderProgram::VertexA) { - program_code_b = GetShaderCode(GetShaderAddress(Maxwell::ShaderProgram::VertexB)); + program_code_b = GetShaderCode( + Memory::GetPointer(GetShaderAddress(Maxwell::ShaderProgram::VertexB))); } const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b); @@ -504,11 +509,11 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { if (found != precompiled_shaders.end()) { shader = std::make_shared<CachedShader>(program_addr, unique_identifier, program, disk_cache, - precompiled_programs, found->second); + precompiled_programs, found->second, host_ptr); } else { shader = std::make_shared<CachedShader>( program_addr, unique_identifier, program, disk_cache, precompiled_programs, - std::move(program_code), std::move(program_code_b)); + std::move(program_code), std::move(program_code_b), host_ptr); } Register(shader); } diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 97eed192f..81fe716b4 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -39,18 +39,18 @@ using PrecompiledShaders = std::unordered_map<u64, GLShader::ProgramResult>; class CachedShader final : public RasterizerCacheObject { public: - explicit CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, - ShaderDiskCacheOpenGL& disk_cache, + explicit CachedShader(VAddr guest_addr, u64 unique_identifier, + Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, const PrecompiledPrograms& precompiled_programs, - ProgramCode&& program_code, ProgramCode&& program_code_b); + ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr); - explicit CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, - ShaderDiskCacheOpenGL& disk_cache, + explicit CachedShader(VAddr guest_addr, u64 unique_identifier, + Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, const PrecompiledPrograms& precompiled_programs, - GLShader::ProgramResult result); + GLShader::ProgramResult result, u8* host_ptr); - VAddr GetAddr() const override { - return addr; + VAddr GetCpuAddr() const override { + return guest_addr; } std::size_t GetSizeInBytes() const override { @@ -91,7 +91,8 @@ private: ShaderDiskCacheUsage GetUsage(GLenum primitive_mode, BaseBindings base_bindings) const; - VAddr addr{}; + u8* host_ptr{}; + VAddr guest_addr{}; u64 unique_identifier{}; Maxwell::ShaderProgram program_type{}; ShaderDiskCacheOpenGL& disk_cache; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index db18f4dbe..11d1169f0 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -5,7 +5,9 @@ #include <array> #include <string> #include <string_view> +#include <utility> #include <variant> +#include <vector> #include <fmt/format.h> @@ -20,6 +22,7 @@ namespace OpenGL::GLShader { using Tegra::Shader::Attribute; +using Tegra::Shader::AttributeUse; using Tegra::Shader::Header; using Tegra::Shader::IpaInterpMode; using Tegra::Shader::IpaMode; @@ -288,34 +291,22 @@ private: code.AddNewLine(); } - std::string GetInputFlags(const IpaMode& input_mode) { - const IpaSampleMode sample_mode = input_mode.sampling_mode; - const IpaInterpMode interp_mode = input_mode.interpolation_mode; + std::string GetInputFlags(AttributeUse attribute) { std::string out; - switch (interp_mode) { - case IpaInterpMode::Flat: + switch (attribute) { + case AttributeUse::Constant: out += "flat "; break; - case IpaInterpMode::Linear: + case AttributeUse::ScreenLinear: out += "noperspective "; break; - case IpaInterpMode::Perspective: + case AttributeUse::Perspective: // Default, Smooth break; default: - UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode)); - } - switch (sample_mode) { - case IpaSampleMode::Centroid: - // It can be implemented with the "centroid " keyword in GLSL - UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid"); - break; - case IpaSampleMode::Default: - // Default, n/a - break; - default: - UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode)); + LOG_CRITICAL(HW_GPU, "Unused attribute being fetched"); + UNREACHABLE(); } return out; } @@ -324,16 +315,11 @@ private: const auto& attributes = ir.GetInputAttributes(); for (const auto element : attributes) { const Attribute::Index index = element.first; - const IpaMode& input_mode = *element.second.begin(); if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { // Skip when it's not a generic attribute continue; } - ASSERT(element.second.size() > 0); - UNIMPLEMENTED_IF_MSG(element.second.size() > 1, - "Multiple input flag modes are not supported in GLSL"); - // TODO(bunnei): Use proper number of elements for these u32 idx = static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0); if (stage != ShaderStage::Vertex) { @@ -345,8 +331,14 @@ private: if (stage == ShaderStage::Geometry) { attr = "gs_" + attr + "[]"; } - code.AddLine("layout (location = " + std::to_string(idx) + ") " + - GetInputFlags(input_mode) + "in vec4 " + attr + ';'); + std::string suffix; + if (stage == ShaderStage::Fragment) { + const auto input_mode = + header.ps.GetAttributeUse(idx - GENERIC_VARYING_START_LOCATION); + suffix = GetInputFlags(input_mode); + } + code.AddLine("layout (location = " + std::to_string(idx) + ") " + suffix + "in vec4 " + + attr + ';'); } if (!attributes.empty()) code.AddNewLine(); @@ -727,7 +719,7 @@ private: } std::string GenerateTexture(Operation operation, const std::string& func, - bool is_extra_int = false) { + const std::vector<std::pair<Type, Node>>& extras) { constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"}; const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); @@ -748,36 +740,47 @@ private: expr += Visit(operation[i]); const std::size_t next = i + 1; - if (next < count || has_array || has_shadow) + if (next < count) expr += ", "; } if (has_array) { - expr += "float(ftoi(" + Visit(meta->array) + "))"; + expr += ", float(ftoi(" + Visit(meta->array) + "))"; } if (has_shadow) { - if (has_array) - expr += ", "; - expr += Visit(meta->depth_compare); + expr += ", " + Visit(meta->depth_compare); } expr += ')'; - for (const Node extra : meta->extras) { + for (const auto& extra_pair : extras) { + const auto [type, operand] = extra_pair; + if (operand == nullptr) { + continue; + } expr += ", "; - if (is_extra_int) { - if (const auto immediate = std::get_if<ImmediateNode>(extra)) { + + switch (type) { + case Type::Int: + if (const auto immediate = std::get_if<ImmediateNode>(operand)) { // Inline the string as an immediate integer in GLSL (some extra arguments are // required to be constant) expr += std::to_string(static_cast<s32>(immediate->GetValue())); } else { - expr += "ftoi(" + Visit(extra) + ')'; + expr += "ftoi(" + Visit(operand) + ')'; } - } else { - expr += Visit(extra); + break; + case Type::Float: + expr += Visit(operand); + break; + default: { + const auto type_int = static_cast<u32>(type); + UNIMPLEMENTED_MSG("Unimplemented extra type={}", type_int); + expr += '0'; + break; + } } } - expr += ')'; - return expr; + return expr + ')'; } std::string Assign(Operation operation) { @@ -1156,7 +1159,7 @@ private: const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); - std::string expr = GenerateTexture(operation, "texture"); + std::string expr = GenerateTexture(operation, "texture", {{Type::Float, meta->bias}}); if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } @@ -1167,7 +1170,7 @@ private: const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); - std::string expr = GenerateTexture(operation, "textureLod"); + std::string expr = GenerateTexture(operation, "textureLod", {{Type::Float, meta->lod}}); if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } @@ -1178,7 +1181,8 @@ private: const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); - return GenerateTexture(operation, "textureGather", !meta->sampler.IsShadow()) + + const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; + return GenerateTexture(operation, "textureGather", {{type, meta->component}}) + GetSwizzle(meta->element); } @@ -1207,8 +1211,8 @@ private: ASSERT(meta); if (meta->element < 2) { - return "itof(int((" + GenerateTexture(operation, "textureQueryLod") + " * vec2(256))" + - GetSwizzle(meta->element) + "))"; + return "itof(int((" + GenerateTexture(operation, "textureQueryLod", {}) + + " * vec2(256))" + GetSwizzle(meta->element) + "))"; } return "0"; } @@ -1234,9 +1238,9 @@ private: else if (next < count) expr += ", "; } - for (std::size_t i = 0; i < meta->extras.size(); ++i) { + if (meta->lod) { expr += ", "; - expr += CastOperand(Visit(meta->extras.at(i)), Type::Int); + expr += CastOperand(Visit(meta->lod), Type::Int); } expr += ')'; @@ -1584,4 +1588,4 @@ ProgramResult Decompile(const ShaderIR& ir, Maxwell::ShaderStage stage, const st return {decompiler.GetResult(), decompiler.GetShaderEntries()}; } -} // namespace OpenGL::GLShader
\ No newline at end of file +} // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 81882822b..82fc4d44b 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp @@ -2,8 +2,6 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#pragma once - #include <cstring> #include <fmt/format.h> #include <lz4.h> diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 04e1db911..7d96649af 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -124,7 +124,7 @@ layout (location = 5) out vec4 FragColor5; layout (location = 6) out vec4 FragColor6; layout (location = 7) out vec4 FragColor7; -layout (location = 0) in vec4 position; +layout (location = 0) in noperspective vec4 position; layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { vec4 viewport_flip; @@ -172,4 +172,4 @@ void main() { return {out, program.second}; } -} // namespace OpenGL::GLShader
\ No newline at end of file +} // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index 81af803bc..9419326a3 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -11,7 +11,9 @@ namespace OpenGL { OpenGLState OpenGLState::cur_state; + bool OpenGLState::s_rgb_used; + OpenGLState::OpenGLState() { // These all match default OpenGL values geometry_shaders.enabled = false; @@ -112,7 +114,6 @@ void OpenGLState::ApplyDefaultState() { } void OpenGLState::ApplySRgb() const { - // sRGB if (framebuffer_srgb.enabled != cur_state.framebuffer_srgb.enabled) { if (framebuffer_srgb.enabled) { // Track if sRGB is used @@ -125,23 +126,20 @@ void OpenGLState::ApplySRgb() const { } void OpenGLState::ApplyCulling() const { - // Culling - const bool cull_changed = cull.enabled != cur_state.cull.enabled; - if (cull_changed) { + if (cull.enabled != cur_state.cull.enabled) { if (cull.enabled) { glEnable(GL_CULL_FACE); } else { glDisable(GL_CULL_FACE); } } - if (cull.enabled) { - if (cull_changed || cull.mode != cur_state.cull.mode) { - glCullFace(cull.mode); - } - if (cull_changed || cull.front_face != cur_state.cull.front_face) { - glFrontFace(cull.front_face); - } + if (cull.mode != cur_state.cull.mode) { + glCullFace(cull.mode); + } + + if (cull.front_face != cur_state.cull.front_face) { + glFrontFace(cull.front_face); } } @@ -172,72 +170,63 @@ void OpenGLState::ApplyColorMask() const { } void OpenGLState::ApplyDepth() const { - // Depth test - const bool depth_test_changed = depth.test_enabled != cur_state.depth.test_enabled; - if (depth_test_changed) { + if (depth.test_enabled != cur_state.depth.test_enabled) { if (depth.test_enabled) { glEnable(GL_DEPTH_TEST); } else { glDisable(GL_DEPTH_TEST); } } - if (depth.test_enabled && - (depth_test_changed || depth.test_func != cur_state.depth.test_func)) { + + if (depth.test_func != cur_state.depth.test_func) { glDepthFunc(depth.test_func); } - // Depth mask + if (depth.write_mask != cur_state.depth.write_mask) { glDepthMask(depth.write_mask); } } void OpenGLState::ApplyPrimitiveRestart() const { - const bool primitive_restart_changed = - primitive_restart.enabled != cur_state.primitive_restart.enabled; - if (primitive_restart_changed) { + if (primitive_restart.enabled != cur_state.primitive_restart.enabled) { if (primitive_restart.enabled) { glEnable(GL_PRIMITIVE_RESTART); } else { glDisable(GL_PRIMITIVE_RESTART); } } - if (primitive_restart_changed || - (primitive_restart.enabled && - primitive_restart.index != cur_state.primitive_restart.index)) { + + if (primitive_restart.index != cur_state.primitive_restart.index) { glPrimitiveRestartIndex(primitive_restart.index); } } void OpenGLState::ApplyStencilTest() const { - const bool stencil_test_changed = stencil.test_enabled != cur_state.stencil.test_enabled; - if (stencil_test_changed) { + if (stencil.test_enabled != cur_state.stencil.test_enabled) { if (stencil.test_enabled) { glEnable(GL_STENCIL_TEST); } else { glDisable(GL_STENCIL_TEST); } } - if (stencil.test_enabled) { - auto config_stencil = [stencil_test_changed](GLenum face, const auto& config, - const auto& prev_config) { - if (stencil_test_changed || config.test_func != prev_config.test_func || - config.test_ref != prev_config.test_ref || - config.test_mask != prev_config.test_mask) { - glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask); - } - if (stencil_test_changed || config.action_depth_fail != prev_config.action_depth_fail || - config.action_depth_pass != prev_config.action_depth_pass || - config.action_stencil_fail != prev_config.action_stencil_fail) { - glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail, - config.action_depth_pass); - } - if (config.write_mask != prev_config.write_mask) { - glStencilMaskSeparate(face, config.write_mask); - } - }; - config_stencil(GL_FRONT, stencil.front, cur_state.stencil.front); - config_stencil(GL_BACK, stencil.back, cur_state.stencil.back); - } + + const auto ConfigStencil = [](GLenum face, const auto& config, const auto& prev_config) { + if (config.test_func != prev_config.test_func || config.test_ref != prev_config.test_ref || + config.test_mask != prev_config.test_mask) { + glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask); + } + if (config.action_depth_fail != prev_config.action_depth_fail || + config.action_depth_pass != prev_config.action_depth_pass || + config.action_stencil_fail != prev_config.action_stencil_fail) { + glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail, + config.action_depth_pass); + } + if (config.write_mask != prev_config.write_mask) { + glStencilMaskSeparate(face, config.write_mask); + } + }; + ConfigStencil(GL_FRONT, stencil.front, cur_state.stencil.front); + ConfigStencil(GL_BACK, stencil.back, cur_state.stencil.back); } // Viewport does not affects glClearBuffer so emulate viewport using scissor test void OpenGLState::EmulateViewportWithScissor() { @@ -278,19 +267,18 @@ void OpenGLState::ApplyViewport() const { updated.depth_range_far != current.depth_range_far) { glDepthRangeIndexed(i, updated.depth_range_near, updated.depth_range_far); } - const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled; - if (scissor_changed) { + + if (updated.scissor.enabled != current.scissor.enabled) { if (updated.scissor.enabled) { glEnablei(GL_SCISSOR_TEST, i); } else { glDisablei(GL_SCISSOR_TEST, i); } } - if (updated.scissor.enabled && - (scissor_changed || updated.scissor.x != current.scissor.x || - updated.scissor.y != current.scissor.y || - updated.scissor.width != current.scissor.width || - updated.scissor.height != current.scissor.height)) { + + if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y || + updated.scissor.width != current.scissor.width || + updated.scissor.height != current.scissor.height) { glScissorIndexed(i, updated.scissor.x, updated.scissor.y, updated.scissor.width, updated.scissor.height); } @@ -302,22 +290,23 @@ void OpenGLState::ApplyViewport() const { updated.height != current.height) { glViewport(updated.x, updated.y, updated.width, updated.height); } + if (updated.depth_range_near != current.depth_range_near || updated.depth_range_far != current.depth_range_far) { glDepthRange(updated.depth_range_near, updated.depth_range_far); } - const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled; - if (scissor_changed) { + + if (updated.scissor.enabled != current.scissor.enabled) { if (updated.scissor.enabled) { glEnable(GL_SCISSOR_TEST); } else { glDisable(GL_SCISSOR_TEST); } } - if (updated.scissor.enabled && (scissor_changed || updated.scissor.x != current.scissor.x || - updated.scissor.y != current.scissor.y || - updated.scissor.width != current.scissor.width || - updated.scissor.height != current.scissor.height)) { + + if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y || + updated.scissor.width != current.scissor.width || + updated.scissor.height != current.scissor.height) { glScissor(updated.scissor.x, updated.scissor.y, updated.scissor.width, updated.scissor.height); } @@ -327,8 +316,7 @@ void OpenGLState::ApplyViewport() const { void OpenGLState::ApplyGlobalBlending() const { const Blend& current = cur_state.blend[0]; const Blend& updated = blend[0]; - const bool blend_changed = updated.enabled != current.enabled; - if (blend_changed) { + if (updated.enabled != current.enabled) { if (updated.enabled) { glEnable(GL_BLEND); } else { @@ -338,15 +326,14 @@ void OpenGLState::ApplyGlobalBlending() const { if (!updated.enabled) { return; } - if (blend_changed || updated.src_rgb_func != current.src_rgb_func || + if (updated.src_rgb_func != current.src_rgb_func || updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) { glBlendFuncSeparate(updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func); } - if (blend_changed || updated.rgb_equation != current.rgb_equation || - updated.a_equation != current.a_equation) { + if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) { glBlendEquationSeparate(updated.rgb_equation, updated.a_equation); } } @@ -354,26 +341,22 @@ void OpenGLState::ApplyGlobalBlending() const { void OpenGLState::ApplyTargetBlending(std::size_t target, bool force) const { const Blend& updated = blend[target]; const Blend& current = cur_state.blend[target]; - const bool blend_changed = updated.enabled != current.enabled || force; - if (blend_changed) { + if (updated.enabled != current.enabled || force) { if (updated.enabled) { glEnablei(GL_BLEND, static_cast<GLuint>(target)); } else { glDisablei(GL_BLEND, static_cast<GLuint>(target)); } } - if (!updated.enabled) { - return; - } - if (blend_changed || updated.src_rgb_func != current.src_rgb_func || + + if (updated.src_rgb_func != current.src_rgb_func || updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) { glBlendFuncSeparatei(static_cast<GLuint>(target), updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func); } - if (blend_changed || updated.rgb_equation != current.rgb_equation || - updated.a_equation != current.a_equation) { + if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) { glBlendEquationSeparatei(static_cast<GLuint>(target), updated.rgb_equation, updated.a_equation); } @@ -397,8 +380,7 @@ void OpenGLState::ApplyBlending() const { } void OpenGLState::ApplyLogicOp() const { - const bool logic_op_changed = logic_op.enabled != cur_state.logic_op.enabled; - if (logic_op_changed) { + if (logic_op.enabled != cur_state.logic_op.enabled) { if (logic_op.enabled) { glEnable(GL_COLOR_LOGIC_OP); } else { @@ -406,14 +388,12 @@ void OpenGLState::ApplyLogicOp() const { } } - if (logic_op.enabled && - (logic_op_changed || logic_op.operation != cur_state.logic_op.operation)) { + if (logic_op.operation != cur_state.logic_op.operation) { glLogicOp(logic_op.operation); } } void OpenGLState::ApplyPolygonOffset() const { - const bool fill_enable_changed = polygon_offset.fill_enable != cur_state.polygon_offset.fill_enable; const bool line_enable_changed = @@ -448,9 +428,7 @@ void OpenGLState::ApplyPolygonOffset() const { } } - if ((polygon_offset.fill_enable || polygon_offset.line_enable || polygon_offset.point_enable) && - (factor_changed || units_changed || clamp_changed)) { - + if (factor_changed || units_changed || clamp_changed) { if (GLAD_GL_EXT_polygon_offset_clamp && polygon_offset.clamp != 0) { glPolygonOffsetClamp(polygon_offset.factor, polygon_offset.units, polygon_offset.clamp); } else { @@ -483,7 +461,7 @@ void OpenGLState::ApplyTextures() const { if (has_delta) { glBindTextures(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1), - textures.data()); + textures.data() + first); } } @@ -504,7 +482,7 @@ void OpenGLState::ApplySamplers() const { } if (has_delta) { glBindSamplers(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1), - samplers.data()); + samplers.data() + first); } } @@ -528,9 +506,9 @@ void OpenGLState::ApplyDepthClamp() const { depth_clamp.near_plane == cur_state.depth_clamp.near_plane) { return; } - if (depth_clamp.far_plane != depth_clamp.near_plane) { - UNIMPLEMENTED_MSG("Unimplemented Depth Clamp Separation!"); - } + UNIMPLEMENTED_IF_MSG(depth_clamp.far_plane != depth_clamp.near_plane, + "Unimplemented Depth Clamp Separation!"); + if (depth_clamp.far_plane || depth_clamp.near_plane) { glEnable(GL_DEPTH_CLAMP); } else { diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index d40666ac6..b97576309 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -167,9 +167,11 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf Memory::RasterizerFlushVirtualRegion(framebuffer_addr, size_in_bytes, Memory::FlushMode::Flush); - VideoCore::MortonCopyPixels128(framebuffer.width, framebuffer.height, bytes_per_pixel, 4, - Memory::GetPointer(framebuffer_addr), - gl_framebuffer_data.data(), true); + constexpr u32 linear_bpp = 4; + VideoCore::MortonCopyPixels128(VideoCore::MortonSwizzleMode::MortonToLinear, + framebuffer.width, framebuffer.height, bytes_per_pixel, + linear_bpp, Memory::GetPointer(framebuffer_addr), + gl_framebuffer_data.data()); glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride)); @@ -244,6 +246,21 @@ void RendererOpenGL::InitOpenGLObjects() { LoadColorToActiveGLTexture(0, 0, 0, 0, screen_info.texture); } +void RendererOpenGL::AddTelemetryFields() { + const char* const gl_version{reinterpret_cast<char const*>(glGetString(GL_VERSION))}; + const char* const gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))}; + const char* const gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))}; + + LOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version); + LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor); + LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model); + + auto& telemetry_session = system.TelemetrySession(); + telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor); + telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model); + telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version); +} + void RendererOpenGL::CreateRasterizer() { if (rasterizer) { return; @@ -257,6 +274,7 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, const Tegra::FramebufferConfig& framebuffer) { texture.width = framebuffer.width; texture.height = framebuffer.height; + texture.pixel_format = framebuffer.pixel_format; GLint internal_format; switch (framebuffer.pixel_format) { @@ -380,7 +398,8 @@ void RendererOpenGL::CaptureScreenshot() { GLuint renderbuffer; glGenRenderbuffers(1, &renderbuffer); glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer); - glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB8, layout.width, layout.height); + glRenderbufferStorage(GL_RENDERBUFFER, state.GetsRGBUsed() ? GL_SRGB8 : GL_RGB8, layout.width, + layout.height); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer); DrawScreen(layout); @@ -464,17 +483,7 @@ bool RendererOpenGL::Init() { glDebugMessageCallback(DebugHandler, nullptr); } - const char* gl_version{reinterpret_cast<char const*>(glGetString(GL_VERSION))}; - const char* gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))}; - const char* gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))}; - - LOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version); - LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor); - LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model); - - Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor); - Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model); - Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version); + AddTelemetryFields(); if (!GLAD_GL_VERSION_4_3) { return false; diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h index 7e13e566b..6cbf9d2cb 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.h +++ b/src/video_core/renderer_opengl/renderer_opengl.h @@ -39,7 +39,7 @@ struct TextureInfo { /// Structure used for storing information about the display target for the Switch screen struct ScreenInfo { GLuint display_texture; - const MathUtil::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f}; + const Common::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f}; TextureInfo texture; }; @@ -60,6 +60,7 @@ public: private: void InitOpenGLObjects(); + void AddTelemetryFields(); void CreateRasterizer(); void ConfigureFramebufferTexture(TextureInfo& texture, @@ -102,7 +103,7 @@ private: /// Used for transforming the framebuffer orientation Tegra::FramebufferConfig::TransformFlags framebuffer_transform_flags; - MathUtil::Rectangle<int> framebuffer_crop_rect; + Common::Rectangle<int> framebuffer_crop_rect; }; } // namespace OpenGL diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp new file mode 100644 index 000000000..34bf26ff2 --- /dev/null +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -0,0 +1,483 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/maxwell_to_vk.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/surface.h" + +namespace Vulkan::MaxwellToVK { + +namespace Sampler { + +vk::Filter Filter(Tegra::Texture::TextureFilter filter) { + switch (filter) { + case Tegra::Texture::TextureFilter::Linear: + return vk::Filter::eLinear; + case Tegra::Texture::TextureFilter::Nearest: + return vk::Filter::eNearest; + } + UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter)); + return {}; +} + +vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) { + switch (mipmap_filter) { + case Tegra::Texture::TextureMipmapFilter::None: + // TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping + // (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to + // use an image view with a single mipmap level to emulate this. + return vk::SamplerMipmapMode::eLinear; + case Tegra::Texture::TextureMipmapFilter::Linear: + return vk::SamplerMipmapMode::eLinear; + case Tegra::Texture::TextureMipmapFilter::Nearest: + return vk::SamplerMipmapMode::eNearest; + } + UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter)); + return {}; +} + +vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode) { + switch (wrap_mode) { + case Tegra::Texture::WrapMode::Wrap: + return vk::SamplerAddressMode::eRepeat; + case Tegra::Texture::WrapMode::Mirror: + return vk::SamplerAddressMode::eMirroredRepeat; + case Tegra::Texture::WrapMode::ClampToEdge: + return vk::SamplerAddressMode::eClampToEdge; + case Tegra::Texture::WrapMode::Border: + return vk::SamplerAddressMode::eClampToBorder; + case Tegra::Texture::WrapMode::ClampOGL: + // TODO(Rodrigo): GL_CLAMP was removed as of OpenGL 3.1, to implement GL_CLAMP, we can use + // eClampToBorder to get the border color of the texture, and then sample the edge to + // manually mix them. However the shader part of this is not yet implemented. + return vk::SamplerAddressMode::eClampToBorder; + case Tegra::Texture::WrapMode::MirrorOnceClampToEdge: + return vk::SamplerAddressMode::eMirrorClampToEdge; + case Tegra::Texture::WrapMode::MirrorOnceBorder: + UNIMPLEMENTED(); + return vk::SamplerAddressMode::eMirrorClampToEdge; + } + UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); + return {}; +} + +vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) { + switch (depth_compare_func) { + case Tegra::Texture::DepthCompareFunc::Never: + return vk::CompareOp::eNever; + case Tegra::Texture::DepthCompareFunc::Less: + return vk::CompareOp::eLess; + case Tegra::Texture::DepthCompareFunc::LessEqual: + return vk::CompareOp::eLessOrEqual; + case Tegra::Texture::DepthCompareFunc::Equal: + return vk::CompareOp::eEqual; + case Tegra::Texture::DepthCompareFunc::NotEqual: + return vk::CompareOp::eNotEqual; + case Tegra::Texture::DepthCompareFunc::Greater: + return vk::CompareOp::eGreater; + case Tegra::Texture::DepthCompareFunc::GreaterEqual: + return vk::CompareOp::eGreaterOrEqual; + case Tegra::Texture::DepthCompareFunc::Always: + return vk::CompareOp::eAlways; + } + UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}", + static_cast<u32>(depth_compare_func)); + return {}; +} + +} // namespace Sampler + +struct FormatTuple { + vk::Format format; ///< Vulkan format + ComponentType component_type; ///< Abstracted component type + bool attachable; ///< True when this format can be used as an attachment +}; + +static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{ + {vk::Format::eA8B8G8R8UnormPack32, ComponentType::UNorm, true}, // ABGR8U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ABGR8S + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ABGR8UI + {vk::Format::eB5G6R5UnormPack16, ComponentType::UNorm, false}, // B5G6R5U + {vk::Format::eA2B10G10R10UnormPack32, ComponentType::UNorm, true}, // A2B10G10R10U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // A1B5G5R5U + {vk::Format::eR8Unorm, ComponentType::UNorm, true}, // R8U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R8UI + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16UI + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R11FG11FB10F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA32UI + {vk::Format::eBc1RgbaUnormBlock, ComponentType::UNorm, false}, // DXT1 + {vk::Format::eBc2UnormBlock, ComponentType::UNorm, false}, // DXT23 + {vk::Format::eBc3UnormBlock, ComponentType::UNorm, false}, // DXT45 + {vk::Format::eBc4UnormBlock, ComponentType::UNorm, false}, // DXN1 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXN2UNORM + {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXN2SNORM + {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC7U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC6H_UF16 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC6H_SF16 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_4X4 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // BGRA8 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA32F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG32F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R32F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16S + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16UI + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16I + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16F + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16UI + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16I + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16S + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGB32F + {vk::Format::eA8B8G8R8SrgbPack32, ComponentType::UNorm, true}, // RGBA8_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG8U + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG8S + {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG32UI + {vk::Format::eUndefined, ComponentType::Invalid, false}, // R32UI + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X8 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X5 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X4 + + // Compressed sRGB formats + {vk::Format::eUndefined, ComponentType::Invalid, false}, // BGRA8_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT1_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT23_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT45_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC7U_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_4X4_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X8_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X5_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X4_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X5 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X5_SRGB + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_10X8 + {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_10X8_SRGB + + // Depth formats + {vk::Format::eD32Sfloat, ComponentType::Float, true}, // Z32F + {vk::Format::eD16Unorm, ComponentType::UNorm, true}, // Z16 + + // DepthStencil formats + {vk::Format::eD24UnormS8Uint, ComponentType::UNorm, true}, // Z24S8 + {vk::Format::eD24UnormS8Uint, ComponentType::UNorm, true}, // S8Z24 (emulated) + {vk::Format::eUndefined, ComponentType::Invalid, false}, // Z32FS8 +}}; + +static constexpr bool IsZetaFormat(PixelFormat pixel_format) { + return pixel_format >= PixelFormat::MaxColorFormat && + pixel_format < PixelFormat::MaxDepthStencilFormat; +} + +std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type, + PixelFormat pixel_format, ComponentType component_type) { + ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size()); + + const auto tuple = tex_format_tuples[static_cast<u32>(pixel_format)]; + UNIMPLEMENTED_IF_MSG(tuple.format == vk::Format::eUndefined, + "Unimplemented texture format with pixel format={} and component type={}", + static_cast<u32>(pixel_format), static_cast<u32>(component_type)); + ASSERT_MSG(component_type == tuple.component_type, "Component type mismatch"); + + auto usage = vk::FormatFeatureFlagBits::eSampledImage | + vk::FormatFeatureFlagBits::eTransferDst | vk::FormatFeatureFlagBits::eTransferSrc; + if (tuple.attachable) { + usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment + : vk::FormatFeatureFlagBits::eColorAttachment; + } + return {device.GetSupportedFormat(tuple.format, usage, format_type), tuple.attachable}; +} + +vk::ShaderStageFlagBits ShaderStage(Maxwell::ShaderStage stage) { + switch (stage) { + case Maxwell::ShaderStage::Vertex: + return vk::ShaderStageFlagBits::eVertex; + case Maxwell::ShaderStage::TesselationControl: + return vk::ShaderStageFlagBits::eTessellationControl; + case Maxwell::ShaderStage::TesselationEval: + return vk::ShaderStageFlagBits::eTessellationEvaluation; + case Maxwell::ShaderStage::Geometry: + return vk::ShaderStageFlagBits::eGeometry; + case Maxwell::ShaderStage::Fragment: + return vk::ShaderStageFlagBits::eFragment; + } + UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage)); + return {}; +} + +vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { + switch (topology) { + case Maxwell::PrimitiveTopology::Points: + return vk::PrimitiveTopology::ePointList; + case Maxwell::PrimitiveTopology::Lines: + return vk::PrimitiveTopology::eLineList; + case Maxwell::PrimitiveTopology::LineStrip: + return vk::PrimitiveTopology::eLineStrip; + case Maxwell::PrimitiveTopology::Triangles: + return vk::PrimitiveTopology::eTriangleList; + case Maxwell::PrimitiveTopology::TriangleStrip: + return vk::PrimitiveTopology::eTriangleStrip; + } + UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); + return {}; +} + +vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { + switch (type) { + case Maxwell::VertexAttribute::Type::SignedNorm: + break; + case Maxwell::VertexAttribute::Type::UnsignedNorm: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Unorm; + default: + break; + } + break; + case Maxwell::VertexAttribute::Type::SignedInt: + break; + case Maxwell::VertexAttribute::Type::UnsignedInt: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_32: + return vk::Format::eR32Uint; + default: + break; + } + case Maxwell::VertexAttribute::Type::UnsignedScaled: + case Maxwell::VertexAttribute::Type::SignedScaled: + break; + case Maxwell::VertexAttribute::Type::Float: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + return vk::Format::eR32G32B32A32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32_32: + return vk::Format::eR32G32B32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32: + return vk::Format::eR32G32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32: + return vk::Format::eR32Sfloat; + default: + break; + } + break; + } + UNIMPLEMENTED_MSG("Unimplemented vertex format of type={} and size={}", static_cast<u32>(type), + static_cast<u32>(size)); + return {}; +} + +vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) { + switch (comparison) { + case Maxwell::ComparisonOp::Never: + case Maxwell::ComparisonOp::NeverOld: + return vk::CompareOp::eNever; + case Maxwell::ComparisonOp::Less: + case Maxwell::ComparisonOp::LessOld: + return vk::CompareOp::eLess; + case Maxwell::ComparisonOp::Equal: + case Maxwell::ComparisonOp::EqualOld: + return vk::CompareOp::eEqual; + case Maxwell::ComparisonOp::LessEqual: + case Maxwell::ComparisonOp::LessEqualOld: + return vk::CompareOp::eLessOrEqual; + case Maxwell::ComparisonOp::Greater: + case Maxwell::ComparisonOp::GreaterOld: + return vk::CompareOp::eGreater; + case Maxwell::ComparisonOp::NotEqual: + case Maxwell::ComparisonOp::NotEqualOld: + return vk::CompareOp::eNotEqual; + case Maxwell::ComparisonOp::GreaterEqual: + case Maxwell::ComparisonOp::GreaterEqualOld: + return vk::CompareOp::eGreaterOrEqual; + case Maxwell::ComparisonOp::Always: + case Maxwell::ComparisonOp::AlwaysOld: + return vk::CompareOp::eAlways; + } + UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison)); + return {}; +} + +vk::IndexType IndexFormat(Maxwell::IndexFormat index_format) { + switch (index_format) { + case Maxwell::IndexFormat::UnsignedByte: + UNIMPLEMENTED_MSG("Vulkan does not support native u8 index format"); + return vk::IndexType::eUint16; + case Maxwell::IndexFormat::UnsignedShort: + return vk::IndexType::eUint16; + case Maxwell::IndexFormat::UnsignedInt: + return vk::IndexType::eUint32; + } + UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format)); + return {}; +} + +vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op) { + switch (stencil_op) { + case Maxwell::StencilOp::Keep: + case Maxwell::StencilOp::KeepOGL: + return vk::StencilOp::eKeep; + case Maxwell::StencilOp::Zero: + case Maxwell::StencilOp::ZeroOGL: + return vk::StencilOp::eZero; + case Maxwell::StencilOp::Replace: + case Maxwell::StencilOp::ReplaceOGL: + return vk::StencilOp::eReplace; + case Maxwell::StencilOp::Incr: + case Maxwell::StencilOp::IncrOGL: + return vk::StencilOp::eIncrementAndClamp; + case Maxwell::StencilOp::Decr: + case Maxwell::StencilOp::DecrOGL: + return vk::StencilOp::eDecrementAndClamp; + case Maxwell::StencilOp::Invert: + case Maxwell::StencilOp::InvertOGL: + return vk::StencilOp::eInvert; + case Maxwell::StencilOp::IncrWrap: + case Maxwell::StencilOp::IncrWrapOGL: + return vk::StencilOp::eIncrementAndWrap; + case Maxwell::StencilOp::DecrWrap: + case Maxwell::StencilOp::DecrWrapOGL: + return vk::StencilOp::eDecrementAndWrap; + } + UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op)); + return {}; +} + +vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation) { + switch (equation) { + case Maxwell::Blend::Equation::Add: + case Maxwell::Blend::Equation::AddGL: + return vk::BlendOp::eAdd; + case Maxwell::Blend::Equation::Subtract: + case Maxwell::Blend::Equation::SubtractGL: + return vk::BlendOp::eSubtract; + case Maxwell::Blend::Equation::ReverseSubtract: + case Maxwell::Blend::Equation::ReverseSubtractGL: + return vk::BlendOp::eReverseSubtract; + case Maxwell::Blend::Equation::Min: + case Maxwell::Blend::Equation::MinGL: + return vk::BlendOp::eMin; + case Maxwell::Blend::Equation::Max: + case Maxwell::Blend::Equation::MaxGL: + return vk::BlendOp::eMax; + } + UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation)); + return {}; +} + +vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor) { + switch (factor) { + case Maxwell::Blend::Factor::Zero: + case Maxwell::Blend::Factor::ZeroGL: + return vk::BlendFactor::eZero; + case Maxwell::Blend::Factor::One: + case Maxwell::Blend::Factor::OneGL: + return vk::BlendFactor::eOne; + case Maxwell::Blend::Factor::SourceColor: + case Maxwell::Blend::Factor::SourceColorGL: + return vk::BlendFactor::eSrcColor; + case Maxwell::Blend::Factor::OneMinusSourceColor: + case Maxwell::Blend::Factor::OneMinusSourceColorGL: + return vk::BlendFactor::eOneMinusSrcColor; + case Maxwell::Blend::Factor::SourceAlpha: + case Maxwell::Blend::Factor::SourceAlphaGL: + return vk::BlendFactor::eSrcAlpha; + case Maxwell::Blend::Factor::OneMinusSourceAlpha: + case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: + return vk::BlendFactor::eOneMinusSrcAlpha; + case Maxwell::Blend::Factor::DestAlpha: + case Maxwell::Blend::Factor::DestAlphaGL: + return vk::BlendFactor::eDstAlpha; + case Maxwell::Blend::Factor::OneMinusDestAlpha: + case Maxwell::Blend::Factor::OneMinusDestAlphaGL: + return vk::BlendFactor::eOneMinusDstAlpha; + case Maxwell::Blend::Factor::DestColor: + case Maxwell::Blend::Factor::DestColorGL: + return vk::BlendFactor::eDstColor; + case Maxwell::Blend::Factor::OneMinusDestColor: + case Maxwell::Blend::Factor::OneMinusDestColorGL: + return vk::BlendFactor::eOneMinusDstColor; + case Maxwell::Blend::Factor::SourceAlphaSaturate: + case Maxwell::Blend::Factor::SourceAlphaSaturateGL: + return vk::BlendFactor::eSrcAlphaSaturate; + case Maxwell::Blend::Factor::Source1Color: + case Maxwell::Blend::Factor::Source1ColorGL: + return vk::BlendFactor::eSrc1Color; + case Maxwell::Blend::Factor::OneMinusSource1Color: + case Maxwell::Blend::Factor::OneMinusSource1ColorGL: + return vk::BlendFactor::eOneMinusSrc1Color; + case Maxwell::Blend::Factor::Source1Alpha: + case Maxwell::Blend::Factor::Source1AlphaGL: + return vk::BlendFactor::eSrc1Alpha; + case Maxwell::Blend::Factor::OneMinusSource1Alpha: + case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: + return vk::BlendFactor::eOneMinusSrc1Alpha; + case Maxwell::Blend::Factor::ConstantColor: + case Maxwell::Blend::Factor::ConstantColorGL: + return vk::BlendFactor::eConstantColor; + case Maxwell::Blend::Factor::OneMinusConstantColor: + case Maxwell::Blend::Factor::OneMinusConstantColorGL: + return vk::BlendFactor::eOneMinusConstantColor; + case Maxwell::Blend::Factor::ConstantAlpha: + case Maxwell::Blend::Factor::ConstantAlphaGL: + return vk::BlendFactor::eConstantAlpha; + case Maxwell::Blend::Factor::OneMinusConstantAlpha: + case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: + return vk::BlendFactor::eOneMinusConstantAlpha; + } + UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor)); + return {}; +} + +vk::FrontFace FrontFace(Maxwell::Cull::FrontFace front_face) { + switch (front_face) { + case Maxwell::Cull::FrontFace::ClockWise: + return vk::FrontFace::eClockwise; + case Maxwell::Cull::FrontFace::CounterClockWise: + return vk::FrontFace::eCounterClockwise; + } + UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face)); + return {}; +} + +vk::CullModeFlags CullFace(Maxwell::Cull::CullFace cull_face) { + switch (cull_face) { + case Maxwell::Cull::CullFace::Front: + return vk::CullModeFlagBits::eFront; + case Maxwell::Cull::CullFace::Back: + return vk::CullModeFlagBits::eBack; + case Maxwell::Cull::CullFace::FrontAndBack: + return vk::CullModeFlagBits::eFrontAndBack; + } + UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face)); + return {}; +} + +vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) { + switch (swizzle) { + case Tegra::Texture::SwizzleSource::Zero: + return vk::ComponentSwizzle::eZero; + case Tegra::Texture::SwizzleSource::R: + return vk::ComponentSwizzle::eR; + case Tegra::Texture::SwizzleSource::G: + return vk::ComponentSwizzle::eG; + case Tegra::Texture::SwizzleSource::B: + return vk::ComponentSwizzle::eB; + case Tegra::Texture::SwizzleSource::A: + return vk::ComponentSwizzle::eA; + case Tegra::Texture::SwizzleSource::OneInt: + case Tegra::Texture::SwizzleSource::OneFloat: + return vk::ComponentSwizzle::eOne; + } + UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle)); + return {}; +} + +} // namespace Vulkan::MaxwellToVK diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h new file mode 100644 index 000000000..4cadc0721 --- /dev/null +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h @@ -0,0 +1,58 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <utility> +#include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/surface.h" +#include "video_core/textures/texture.h" + +namespace Vulkan::MaxwellToVK { + +using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using PixelFormat = VideoCore::Surface::PixelFormat; +using ComponentType = VideoCore::Surface::ComponentType; + +namespace Sampler { + +vk::Filter Filter(Tegra::Texture::TextureFilter filter); + +vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter); + +vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode); + +vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func); + +} // namespace Sampler + +std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type, + PixelFormat pixel_format, ComponentType component_type); + +vk::ShaderStageFlagBits ShaderStage(Maxwell::ShaderStage stage); + +vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology); + +vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size); + +vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison); + +vk::IndexType IndexFormat(Maxwell::IndexFormat index_format); + +vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op); + +vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation); + +vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor); + +vk::FrontFace FrontFace(Maxwell::Cull::FrontFace front_face); + +vk::CullModeFlags CullFace(Maxwell::Cull::CullFace cull_face); + +vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle); + +} // namespace Vulkan::MaxwellToVK diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp new file mode 100644 index 000000000..95eab3fec --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -0,0 +1,123 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <cstring> +#include <memory> +#include <optional> +#include <tuple> + +#include "common/alignment.h" +#include "common/assert.h" +#include "core/memory.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_buffer_cache.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_stream_buffer.h" + +namespace Vulkan { + +CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, + std::size_t alignment, u8* host_ptr) + : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ + host_ptr} {} + +VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, + VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, + VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size) + : RasterizerCache{rasterizer}, tegra_memory_manager{tegra_memory_manager} { + const auto usage = vk::BufferUsageFlagBits::eVertexBuffer | + vk::BufferUsageFlagBits::eIndexBuffer | + vk::BufferUsageFlagBits::eUniformBuffer; + const auto access = vk::AccessFlagBits::eVertexAttributeRead | vk::AccessFlagBits::eIndexRead | + vk::AccessFlagBits::eUniformRead; + stream_buffer = + std::make_unique<VKStreamBuffer>(device, memory_manager, scheduler, size, usage, access, + vk::PipelineStageFlagBits::eAllCommands); + buffer_handle = stream_buffer->GetBuffer(); +} + +VKBufferCache::~VKBufferCache() = default; + +u64 VKBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 alignment, + bool cache) { + const auto cpu_addr{tegra_memory_manager.GpuToCpuAddress(gpu_addr)}; + ASSERT_MSG(cpu_addr, "Invalid GPU address"); + + // Cache management is a big overhead, so only cache entries with a given size. + // TODO: Figure out which size is the best for given games. + cache &= size >= 2048; + + const auto& host_ptr{Memory::GetPointer(*cpu_addr)}; + if (cache) { + auto entry = TryGet(host_ptr); + if (entry) { + if (entry->GetSize() >= size && entry->GetAlignment() == alignment) { + return entry->GetOffset(); + } + Unregister(entry); + } + } + + AlignBuffer(alignment); + const u64 uploaded_offset = buffer_offset; + + if (!host_ptr) { + return uploaded_offset; + } + + std::memcpy(buffer_ptr, host_ptr, size); + buffer_ptr += size; + buffer_offset += size; + + if (cache) { + auto entry = std::make_shared<CachedBufferEntry>(*cpu_addr, size, uploaded_offset, + alignment, host_ptr); + Register(entry); + } + + return uploaded_offset; +} + +u64 VKBufferCache::UploadHostMemory(const u8* raw_pointer, std::size_t size, u64 alignment) { + AlignBuffer(alignment); + std::memcpy(buffer_ptr, raw_pointer, size); + const u64 uploaded_offset = buffer_offset; + + buffer_ptr += size; + buffer_offset += size; + return uploaded_offset; +} + +std::tuple<u8*, u64> VKBufferCache::ReserveMemory(std::size_t size, u64 alignment) { + AlignBuffer(alignment); + u8* const uploaded_ptr = buffer_ptr; + const u64 uploaded_offset = buffer_offset; + + buffer_ptr += size; + buffer_offset += size; + return {uploaded_ptr, uploaded_offset}; +} + +void VKBufferCache::Reserve(std::size_t max_size) { + bool invalidate; + std::tie(buffer_ptr, buffer_offset_base, invalidate) = stream_buffer->Reserve(max_size); + buffer_offset = buffer_offset_base; + + if (invalidate) { + InvalidateAll(); + } +} + +VKExecutionContext VKBufferCache::Send(VKExecutionContext exctx) { + return stream_buffer->Send(exctx, buffer_offset - buffer_offset_base); +} + +void VKBufferCache::AlignBuffer(std::size_t alignment) { + // Align the offset, not the mapped pointer + const u64 offset_aligned = Common::AlignUp(buffer_offset, alignment); + buffer_ptr += offset_aligned - buffer_offset; + buffer_offset = offset_aligned; +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h new file mode 100644 index 000000000..8b415744b --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -0,0 +1,104 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <tuple> + +#include "common/common_types.h" +#include "video_core/gpu.h" +#include "video_core/rasterizer_cache.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" + +namespace Tegra { +class MemoryManager; +} + +namespace Vulkan { + +class VKDevice; +class VKFence; +class VKMemoryManager; +class VKStreamBuffer; + +class CachedBufferEntry final : public RasterizerCacheObject { +public: + explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, std::size_t alignment, + u8* host_ptr); + + VAddr GetCpuAddr() const override { + return cpu_addr; + } + + std::size_t GetSizeInBytes() const override { + return size; + } + + std::size_t GetSize() const { + return size; + } + + u64 GetOffset() const { + return offset; + } + + std::size_t GetAlignment() const { + return alignment; + } + + // We do not have to flush this cache as things in it are never modified by us. + void Flush() override {} + +private: + VAddr cpu_addr{}; + std::size_t size{}; + u64 offset{}; + std::size_t alignment{}; +}; + +class VKBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { +public: + explicit VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, + VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, + VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size); + ~VKBufferCache(); + + /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been + /// allocated. + u64 UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 alignment = 4, + bool cache = true); + + /// Uploads from a host memory. Returns host's buffer offset where it's been allocated. + u64 UploadHostMemory(const u8* raw_pointer, std::size_t size, u64 alignment = 4); + + /// Reserves memory to be used by host's CPU. Returns mapped address and offset. + std::tuple<u8*, u64> ReserveMemory(std::size_t size, u64 alignment = 4); + + /// Reserves a region of memory to be used in subsequent upload/reserve operations. + void Reserve(std::size_t max_size); + + /// Ensures that the set data is sent to the device. + [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx); + + /// Returns the buffer cache handle. + vk::Buffer GetBuffer() const { + return buffer_handle; + } + +private: + void AlignBuffer(std::size_t alignment); + + Tegra::MemoryManager& tegra_memory_manager; + + std::unique_ptr<VKStreamBuffer> stream_buffer; + vk::Buffer buffer_handle; + + u8* buffer_ptr = nullptr; + u64 buffer_offset = 0; + u64 buffer_offset_base = 0; +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 78a4e5f0e..00242ecbe 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -122,8 +122,7 @@ bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlag FormatType format_type) const { const auto it = format_properties.find(wanted_format); if (it == format_properties.end()) { - LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}", - static_cast<u32>(wanted_format)); + LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}", vk::to_string(wanted_format)); UNREACHABLE(); return true; } @@ -219,11 +218,19 @@ std::map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( format_properties.emplace(format, physical.getFormatProperties(format, dldi)); }; AddFormatQuery(vk::Format::eA8B8G8R8UnormPack32); - AddFormatQuery(vk::Format::eR5G6B5UnormPack16); + AddFormatQuery(vk::Format::eB5G6R5UnormPack16); + AddFormatQuery(vk::Format::eA2B10G10R10UnormPack32); + AddFormatQuery(vk::Format::eR8G8B8A8Srgb); + AddFormatQuery(vk::Format::eR8Unorm); AddFormatQuery(vk::Format::eD32Sfloat); + AddFormatQuery(vk::Format::eD16Unorm); AddFormatQuery(vk::Format::eD16UnormS8Uint); AddFormatQuery(vk::Format::eD24UnormS8Uint); AddFormatQuery(vk::Format::eD32SfloatS8Uint); + AddFormatQuery(vk::Format::eBc1RgbaUnormBlock); + AddFormatQuery(vk::Format::eBc2UnormBlock); + AddFormatQuery(vk::Format::eBc3UnormBlock); + AddFormatQuery(vk::Format::eBc4UnormBlock); return format_properties; } diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp new file mode 100644 index 000000000..0451babbf --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp @@ -0,0 +1,252 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <optional> +#include <tuple> +#include <vector> +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_memory_manager.h" + +namespace Vulkan { + +// TODO(Rodrigo): Fine tune this number +constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024; + +class VKMemoryAllocation final { +public: + explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory, + vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type) + : device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size}, + shifted_type{ShiftType(type)}, is_mappable{properties & + vk::MemoryPropertyFlagBits::eHostVisible} { + if (is_mappable) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld)); + } + } + + ~VKMemoryAllocation() { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + if (is_mappable) + dev.unmapMemory(memory, dld); + dev.free(memory, nullptr, dld); + } + + VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) { + auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size), + static_cast<u64>(alignment)); + if (!found) { + found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size), + static_cast<u64>(alignment)); + if (!found) { + // Signal out of memory, it'll try to do more allocations. + return nullptr; + } + } + u8* address = is_mappable ? base_address + *found : nullptr; + auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found, + *found + commit_size); + commits.push_back(commit.get()); + + // Last commit's address is highly probable to be free. + free_iterator = *found + commit_size; + + return commit; + } + + void Free(const VKMemoryCommitImpl* commit) { + ASSERT(commit); + const auto it = + std::find_if(commits.begin(), commits.end(), + [&](const auto& stored_commit) { return stored_commit == commit; }); + if (it == commits.end()) { + LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!"); + UNREACHABLE(); + return; + } + commits.erase(it); + } + + /// Returns whether this allocation is compatible with the arguments. + bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const { + return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) && + (type_mask & shifted_type) != 0; + } + +private: + static constexpr u32 ShiftType(u32 type) { + return 1U << type; + } + + /// A memory allocator, it may return a free region between "start" and "end" with the solicited + /// requeriments. + std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const { + u64 iterator = start; + while (iterator + size < end) { + const u64 try_left = Common::AlignUp(iterator, alignment); + const u64 try_right = try_left + size; + + bool overlap = false; + for (const auto& commit : commits) { + const auto [commit_left, commit_right] = commit->interval; + if (try_left < commit_right && commit_left < try_right) { + // There's an overlap, continue the search where the overlapping commit ends. + iterator = commit_right; + overlap = true; + break; + } + } + if (!overlap) { + // A free address has been found. + return try_left; + } + } + // No free regions where found, return an empty optional. + return std::nullopt; + } + + const VKDevice& device; ///< Vulkan device. + const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. + const vk::MemoryPropertyFlags properties; ///< Vulkan properties. + const u64 alloc_size; ///< Size of this allocation. + const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted. + const bool is_mappable; ///< Whether the allocation is mappable. + + /// Base address of the mapped pointer. + u8* base_address{}; + + /// Hints where the next free region is likely going to be. + u64 free_iterator{}; + + /// Stores all commits done from this allocation. + std::vector<const VKMemoryCommitImpl*> commits; +}; + +VKMemoryManager::VKMemoryManager(const VKDevice& device) + : device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())}, + is_memory_unified{GetMemoryUnified(props)} {} + +VKMemoryManager::~VKMemoryManager() = default; + +VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) { + ASSERT(reqs.size < ALLOC_CHUNK_SIZE); + + // When a host visible commit is asked, search for host visible and coherent, otherwise search + // for a fast device local type. + const vk::MemoryPropertyFlags wanted_properties = + host_visible + ? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent + : vk::MemoryPropertyFlagBits::eDeviceLocal; + + const auto TryCommit = [&]() -> VKMemoryCommit { + for (auto& alloc : allocs) { + if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits)) + continue; + + if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) { + return commit; + } + } + return {}; + }; + + if (auto commit = TryCommit(); commit) { + return commit; + } + + // Commit has failed, allocate more memory. + if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) { + // TODO(Rodrigo): Try to use host memory. + LOG_CRITICAL(Render_Vulkan, "Ran out of memory!"); + UNREACHABLE(); + } + + // Commit again, this time it won't fail since there's a fresh allocation above. If it does, + // there's a bug. + auto commit = TryCommit(); + ASSERT(commit); + return commit; +} + +VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld); + auto commit = Commit(requeriments, host_visible); + dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld); + return commit; +} + +VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const auto requeriments = dev.getImageMemoryRequirements(image, dld); + auto commit = Commit(requeriments, host_visible); + dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld); + return commit; +} + +bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, + u64 size) { + const u32 type = [&]() { + for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) { + const auto flags = props.memoryTypes[type_index].propertyFlags; + if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) { + // The type matches in type and in the wanted properties. + return type_index; + } + } + LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!"); + UNREACHABLE(); + return 0u; + }(); + + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + + // Try to allocate found type. + const vk::MemoryAllocateInfo memory_ai(size, type); + vk::DeviceMemory memory; + if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld); + res != vk::Result::eSuccess) { + LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res)); + return false; + } + allocs.push_back( + std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type)); + return true; +} + +/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) { + for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) { + if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) { + // Memory is considered unified when heaps are device local only. + return false; + } + } + return true; +} + +VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, + u8* data, u64 begin, u64 end) + : interval(std::make_pair(begin, end)), memory{memory}, allocation{allocation}, data{data} {} + +VKMemoryCommitImpl::~VKMemoryCommitImpl() { + allocation->Free(this); +} + +u8* VKMemoryCommitImpl::GetData() const { + ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit."); + return data; +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h new file mode 100644 index 000000000..073597b35 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_memory_manager.h @@ -0,0 +1,87 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <utility> +#include <vector> +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKMemoryAllocation; +class VKMemoryCommitImpl; + +using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>; + +class VKMemoryManager final { +public: + explicit VKMemoryManager(const VKDevice& device); + ~VKMemoryManager(); + + /** + * Commits a memory with the specified requeriments. + * @param reqs Requeriments returned from a Vulkan call. + * @param host_visible Signals the allocator that it *must* use host visible and coherent + * memory. When passing false, it will try to allocate device local memory. + * @returns A memory commit. + */ + VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible); + + /// Commits memory required by the buffer and binds it. + VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible); + + /// Commits memory required by the image and binds it. + VKMemoryCommit Commit(vk::Image image, bool host_visible); + + /// Returns true if the memory allocations are done always in host visible and coherent memory. + bool IsMemoryUnified() const { + return is_memory_unified; + } + +private: + /// Allocates a chunk of memory. + bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); + + /// Returns true if the device uses an unified memory model. + static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props); + + const VKDevice& device; ///< Device handler. + const vk::PhysicalDeviceMemoryProperties props; ///< Physical device properties. + const bool is_memory_unified; ///< True if memory model is unified. + std::vector<std::unique_ptr<VKMemoryAllocation>> allocs; ///< Current allocations. +}; + +class VKMemoryCommitImpl final { + friend VKMemoryAllocation; + +public: + explicit VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, u8* data, + u64 begin, u64 end); + ~VKMemoryCommitImpl(); + + /// Returns the writeable memory map. The commit has to be mappable. + u8* GetData() const; + + /// Returns the Vulkan memory handler. + vk::DeviceMemory GetMemory() const { + return memory; + } + + /// Returns the start position of the commit relative to the allocation. + vk::DeviceSize GetOffset() const { + return static_cast<vk::DeviceSize>(interval.first); + } + +private: + std::pair<u64, u64> interval{}; ///< Interval where the commit exists. + vk::DeviceMemory memory; ///< Vulkan device memory handler. + VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. + u8* data{}; ///< Pointer to the host mapped memory, it has the commit offset included. +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp new file mode 100644 index 000000000..a1e117443 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp @@ -0,0 +1,285 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <optional> +#include "common/assert.h" +#include "common/logging/log.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" + +namespace Vulkan { + +// TODO(Rodrigo): Fine tune these numbers. +constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000; +constexpr std::size_t FENCES_GROW_STEP = 0x40; + +class CommandBufferPool final : public VKFencedPool { +public: + CommandBufferPool(const VKDevice& device) + : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} + + void Allocate(std::size_t begin, std::size_t end) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const u32 graphics_family = device.GetGraphicsFamily(); + + auto pool = std::make_unique<Pool>(); + + // Command buffers are going to be commited, recorded, executed every single usage cycle. + // They are also going to be reseted when commited. + const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient | + vk::CommandPoolCreateFlagBits::eResetCommandBuffer; + const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family); + pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld); + + const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle, + vk::CommandBufferLevel::ePrimary, + static_cast<u32>(COMMAND_BUFFER_POOL_SIZE)); + pool->cmdbufs = + dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld); + + pools.push_back(std::move(pool)); + } + + vk::CommandBuffer Commit(VKFence& fence) { + const std::size_t index = CommitResource(fence); + const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE; + const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE; + return *pools[pool_index]->cmdbufs[sub_index]; + } + +private: + struct Pool { + UniqueCommandPool handle; + std::vector<UniqueCommandBuffer> cmdbufs; + }; + + const VKDevice& device; + + std::vector<std::unique_ptr<Pool>> pools; +}; + +VKResource::VKResource() = default; + +VKResource::~VKResource() = default; + +VKFence::VKFence(const VKDevice& device, UniqueFence handle) + : device{device}, handle{std::move(handle)} {} + +VKFence::~VKFence() = default; + +void VKFence::Wait() { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); +} + +void VKFence::Release() { + is_owned = false; +} + +void VKFence::Commit() { + is_owned = true; + is_used = true; +} + +bool VKFence::Tick(bool gpu_wait, bool owner_wait) { + if (!is_used) { + // If a fence is not used it's always free. + return true; + } + if (is_owned && !owner_wait) { + // The fence is still being owned (Release has not been called) and ownership wait has + // not been asked. + return false; + } + + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + if (gpu_wait) { + // Wait for the fence if it has been requested. + dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); + } else { + if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) { + // Vulkan fence is not ready, not much it can do here + return false; + } + } + + // Broadcast resources their free state. + for (auto* resource : protected_resources) { + resource->OnFenceRemoval(this); + } + protected_resources.clear(); + + // Prepare fence for reusage. + dev.resetFences({*handle}, dld); + is_used = false; + return true; +} + +void VKFence::Protect(VKResource* resource) { + protected_resources.push_back(resource); +} + +void VKFence::Unprotect(VKResource* resource) { + const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource); + ASSERT(it != protected_resources.end()); + + resource->OnFenceRemoval(this); + protected_resources.erase(it); +} + +VKFenceWatch::VKFenceWatch() = default; + +VKFenceWatch::~VKFenceWatch() { + if (fence) { + fence->Unprotect(this); + } +} + +void VKFenceWatch::Wait() { + if (fence == nullptr) { + return; + } + fence->Wait(); + fence->Unprotect(this); +} + +void VKFenceWatch::Watch(VKFence& new_fence) { + Wait(); + fence = &new_fence; + fence->Protect(this); +} + +bool VKFenceWatch::TryWatch(VKFence& new_fence) { + if (fence) { + return false; + } + fence = &new_fence; + fence->Protect(this); + return true; +} + +void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) { + ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence"); + fence = nullptr; +} + +VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {} + +VKFencedPool::~VKFencedPool() = default; + +std::size_t VKFencedPool::CommitResource(VKFence& fence) { + const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> { + for (std::size_t iterator = begin; iterator < end; ++iterator) { + if (watches[iterator]->TryWatch(fence)) { + // The resource is now being watched, a free resource was successfully found. + return iterator; + } + } + return {}; + }; + // Try to find a free resource from the hinted position to the end. + auto found = Search(free_iterator, watches.size()); + if (!found) { + // Search from beginning to the hinted position. + found = Search(0, free_iterator); + if (!found) { + // Both searches failed, the pool is full; handle it. + const std::size_t free_resource = ManageOverflow(); + + // Watch will wait for the resource to be free. + watches[free_resource]->Watch(fence); + found = free_resource; + } + } + // Free iterator is hinted to the resource after the one that's been commited. + free_iterator = (*found + 1) % watches.size(); + return *found; +} + +std::size_t VKFencedPool::ManageOverflow() { + const std::size_t old_capacity = watches.size(); + Grow(); + + // The last entry is guaranted to be free, since it's the first element of the freshly + // allocated resources. + return old_capacity; +} + +void VKFencedPool::Grow() { + const std::size_t old_capacity = watches.size(); + watches.resize(old_capacity + grow_step); + std::generate(watches.begin() + old_capacity, watches.end(), + []() { return std::make_unique<VKFenceWatch>(); }); + Allocate(old_capacity, old_capacity + grow_step); +} + +VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} { + GrowFences(FENCES_GROW_STEP); + command_buffer_pool = std::make_unique<CommandBufferPool>(device); +} + +VKResourceManager::~VKResourceManager() = default; + +VKFence& VKResourceManager::CommitFence() { + const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* { + const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); }; + const auto hinted = fences.begin() + fences_iterator; + + auto it = std::find_if(hinted, fences.end(), Tick); + if (it == fences.end()) { + it = std::find_if(fences.begin(), hinted, Tick); + if (it == hinted) { + return nullptr; + } + } + fences_iterator = std::distance(fences.begin(), it) + 1; + if (fences_iterator >= fences.size()) + fences_iterator = 0; + + auto& fence = *it; + fence->Commit(); + return fence.get(); + }; + + VKFence* found_fence = StepFences(false, false); + if (!found_fence) { + // Try again, this time waiting. + found_fence = StepFences(true, false); + + if (!found_fence) { + // Allocate new fences and try again. + LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(), + fences.size() + FENCES_GROW_STEP); + + GrowFences(FENCES_GROW_STEP); + found_fence = StepFences(true, false); + ASSERT(found_fence != nullptr); + } + } + return *found_fence; +} + +vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) { + return command_buffer_pool->Commit(fence); +} + +void VKResourceManager::GrowFences(std::size_t new_fences_count) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const vk::FenceCreateInfo fence_ci; + + const std::size_t previous_size = fences.size(); + fences.resize(previous_size + new_fences_count); + + std::generate(fences.begin() + previous_size, fences.end(), [&]() { + return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld)); + }); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h new file mode 100644 index 000000000..5bfe4cead --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_resource_manager.h @@ -0,0 +1,180 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <cstddef> +#include <memory> +#include <vector> +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKFence; +class VKResourceManager; + +class CommandBufferPool; + +/// Interface for a Vulkan resource +class VKResource { +public: + explicit VKResource(); + virtual ~VKResource(); + + /** + * Signals the object that an owning fence has been signaled. + * @param signaling_fence Fence that signals its usage end. + */ + virtual void OnFenceRemoval(VKFence* signaling_fence) = 0; +}; + +/** + * Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access. + * They must be commited from the resource manager. Their usage flow is: commit the fence from the + * resource manager, protect resources with it and use them, send the fence to an execution queue + * and Wait for it if needed and then call Release. Used resources will automatically be signaled + * when they are free to be reused. + * @brief Protects resources for concurrent usage and signals its release. + */ +class VKFence { + friend class VKResourceManager; + +public: + explicit VKFence(const VKDevice& device, UniqueFence handle); + ~VKFence(); + + /** + * Waits for the fence to be signaled. + * @warning You must have ownership of the fence and it has to be previously sent to a queue to + * call this function. + */ + void Wait(); + + /** + * Releases ownership of the fence. Pass after it has been sent to an execution queue. + * Unmanaged usage of the fence after the call will result in undefined behavior because it may + * be being used for something else. + */ + void Release(); + + /// Protects a resource with this fence. + void Protect(VKResource* resource); + + /// Removes protection for a resource. + void Unprotect(VKResource* resource); + + /// Retreives the fence. + operator vk::Fence() const { + return *handle; + } + +private: + /// Take ownership of the fence. + void Commit(); + + /** + * Updates the fence status. + * @warning Waiting for the owner might soft lock the execution. + * @param gpu_wait Wait for the fence to be signaled by the driver. + * @param owner_wait Wait for the owner to signal its freedom. + * @returns True if the fence is free. Waiting for gpu and owner will always return true. + */ + bool Tick(bool gpu_wait, bool owner_wait); + + const VKDevice& device; ///< Device handler + UniqueFence handle; ///< Vulkan fence + std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence + bool is_owned = false; ///< The fence has been commited but not released yet. + bool is_used = false; ///< The fence has been commited but it has not been checked to be free. +}; + +/** + * A fence watch is used to keep track of the usage of a fence and protect a resource or set of + * resources without having to inherit VKResource from their handlers. + */ +class VKFenceWatch final : public VKResource { +public: + explicit VKFenceWatch(); + ~VKFenceWatch(); + + /// Waits for the fence to be released. + void Wait(); + + /** + * Waits for a previous fence and watches a new one. + * @param new_fence New fence to wait to. + */ + void Watch(VKFence& new_fence); + + /** + * Checks if it's currently being watched and starts watching it if it's available. + * @returns True if a watch has started, false if it's being watched. + */ + bool TryWatch(VKFence& new_fence); + + void OnFenceRemoval(VKFence* signaling_fence) override; + +private: + VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free. +}; + +/** + * Handles a pool of resources protected by fences. Manages resource overflow allocating more + * resources. + */ +class VKFencedPool { +public: + explicit VKFencedPool(std::size_t grow_step); + virtual ~VKFencedPool(); + +protected: + /** + * Commits a free resource and protects it with a fence. It may allocate new resources. + * @param fence Fence that protects the commited resource. + * @returns Index of the resource commited. + */ + std::size_t CommitResource(VKFence& fence); + + /// Called when a chunk of resources have to be allocated. + virtual void Allocate(std::size_t begin, std::size_t end) = 0; + +private: + /// Manages pool overflow allocating new resources. + std::size_t ManageOverflow(); + + /// Allocates a new page of resources. + void Grow(); + + std::size_t grow_step = 0; ///< Number of new resources created after an overflow + std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found + std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources +}; + +/** + * The resource manager handles all resources that can be protected with a fence avoiding + * driver-side or GPU-side concurrent usage. Usage is documented in VKFence. + */ +class VKResourceManager final { +public: + explicit VKResourceManager(const VKDevice& device); + ~VKResourceManager(); + + /// Commits a fence. It has to be sent to a queue and released. + VKFence& CommitFence(); + + /// Commits an unused command buffer and protects it with a fence. + vk::CommandBuffer CommitCommandBuffer(VKFence& fence); + +private: + /// Allocates new fences. + void GrowFences(std::size_t new_fences_count); + + const VKDevice& device; ///< Device handler. + std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found. + std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences. + std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers. +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp new file mode 100644 index 000000000..ed3178f09 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp @@ -0,0 +1,81 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <cstring> +#include <optional> +#include <unordered_map> + +#include "common/assert.h" +#include "common/cityhash.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/maxwell_to_vk.h" +#include "video_core/renderer_vulkan/vk_sampler_cache.h" +#include "video_core/textures/texture.h" + +namespace Vulkan { + +static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4> color) { + // TODO(Rodrigo): Manage integer border colors + if (color == std::array<float, 4>{0, 0, 0, 0}) { + return vk::BorderColor::eFloatTransparentBlack; + } else if (color == std::array<float, 4>{0, 0, 0, 1}) { + return vk::BorderColor::eFloatOpaqueBlack; + } else if (color == std::array<float, 4>{1, 1, 1, 1}) { + return vk::BorderColor::eFloatOpaqueWhite; + } else { + return {}; + } +} + +std::size_t SamplerCacheKey::Hash() const { + static_assert(sizeof(raw) % sizeof(u64) == 0); + return static_cast<std::size_t>( + Common::CityHash64(reinterpret_cast<const char*>(raw.data()), sizeof(raw) / sizeof(u64))); +} + +bool SamplerCacheKey::operator==(const SamplerCacheKey& rhs) const { + return raw == rhs.raw; +} + +VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {} + +VKSamplerCache::~VKSamplerCache() = default; + +vk::Sampler VKSamplerCache::GetSampler(const Tegra::Texture::TSCEntry& tsc) { + const auto [entry, is_cache_miss] = cache.try_emplace(SamplerCacheKey{tsc}); + auto& sampler = entry->second; + if (is_cache_miss) { + sampler = CreateSampler(tsc); + } + return *sampler; +} + +UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) { + const float max_anisotropy = tsc.GetMaxAnisotropy(); + const bool has_anisotropy = max_anisotropy > 1.0f; + + const auto border_color = tsc.GetBorderColor(); + const auto vk_border_color = TryConvertBorderColor(border_color); + UNIMPLEMENTED_IF_MSG(!vk_border_color, "Unimplemented border color {} {} {} {}", + border_color[0], border_color[1], border_color[2], border_color[3]); + + constexpr bool unnormalized_coords = false; + + const vk::SamplerCreateInfo sampler_ci( + {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter), + MaxwellToVK::Sampler::Filter(tsc.min_filter), + MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter), + MaxwellToVK::Sampler::WrapMode(tsc.wrap_u), MaxwellToVK::Sampler::WrapMode(tsc.wrap_v), + MaxwellToVK::Sampler::WrapMode(tsc.wrap_p), tsc.GetLodBias(), has_anisotropy, + max_anisotropy, tsc.depth_compare_enabled, + MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(), + tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack), + unnormalized_coords); + + const auto& dld = device.GetDispatchLoader(); + const auto dev = device.GetLogical(); + return dev.createSamplerUnique(sampler_ci, nullptr, dld); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h new file mode 100644 index 000000000..c6394dc87 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h @@ -0,0 +1,56 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <unordered_map> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/textures/texture.h" + +namespace Vulkan { + +class VKDevice; + +struct SamplerCacheKey final : public Tegra::Texture::TSCEntry { + std::size_t Hash() const; + + bool operator==(const SamplerCacheKey& rhs) const; + + bool operator!=(const SamplerCacheKey& rhs) const { + return !operator==(rhs); + } +}; + +} // namespace Vulkan + +namespace std { + +template <> +struct hash<Vulkan::SamplerCacheKey> { + std::size_t operator()(const Vulkan::SamplerCacheKey& k) const noexcept { + return k.Hash(); + } +}; + +} // namespace std + +namespace Vulkan { + +class VKSamplerCache { +public: + explicit VKSamplerCache(const VKDevice& device); + ~VKSamplerCache(); + + vk::Sampler GetSampler(const Tegra::Texture::TSCEntry& tsc); + +private: + UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc); + + const VKDevice& device; + std::unordered_map<SamplerCacheKey, UniqueSampler> cache; +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp new file mode 100644 index 000000000..f1fea1871 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -0,0 +1,60 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/logging/log.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" + +namespace Vulkan { + +VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager) + : device{device}, resource_manager{resource_manager} { + next_fence = &resource_manager.CommitFence(); + AllocateNewContext(); +} + +VKScheduler::~VKScheduler() = default; + +VKExecutionContext VKScheduler::GetExecutionContext() const { + return VKExecutionContext(current_fence, current_cmdbuf); +} + +VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) { + SubmitExecution(semaphore); + current_fence->Release(); + AllocateNewContext(); + return GetExecutionContext(); +} + +VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) { + SubmitExecution(semaphore); + current_fence->Wait(); + current_fence->Release(); + AllocateNewContext(); + return GetExecutionContext(); +} + +void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { + const auto& dld = device.GetDispatchLoader(); + current_cmdbuf.end(dld); + + const auto queue = device.GetGraphicsQueue(); + const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, ¤t_cmdbuf, semaphore ? 1u : 0u, + &semaphore); + queue.submit({submit_info}, *current_fence, dld); +} + +void VKScheduler::AllocateNewContext() { + current_fence = next_fence; + current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); + next_fence = &resource_manager.CommitFence(); + + const auto& dld = device.GetDispatchLoader(); + current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, dld); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h new file mode 100644 index 000000000..cfaf5376f --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_scheduler.h @@ -0,0 +1,69 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKExecutionContext; +class VKFence; +class VKResourceManager; + +/// The scheduler abstracts command buffer and fence management with an interface that's able to do +/// OpenGL-like operations on Vulkan command buffers. +class VKScheduler { +public: + explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager); + ~VKScheduler(); + + /// Gets the current execution context. + [[nodiscard]] VKExecutionContext GetExecutionContext() const; + + /// Sends the current execution context to the GPU. It invalidates the current execution context + /// and returns a new one. + VKExecutionContext Flush(vk::Semaphore semaphore = nullptr); + + /// Sends the current execution context to the GPU and waits for it to complete. It invalidates + /// the current execution context and returns a new one. + VKExecutionContext Finish(vk::Semaphore semaphore = nullptr); + +private: + void SubmitExecution(vk::Semaphore semaphore); + + void AllocateNewContext(); + + const VKDevice& device; + VKResourceManager& resource_manager; + vk::CommandBuffer current_cmdbuf; + VKFence* current_fence = nullptr; + VKFence* next_fence = nullptr; +}; + +class VKExecutionContext { + friend class VKScheduler; + +public: + VKExecutionContext() = default; + + VKFence& GetFence() const { + return *fence; + } + + vk::CommandBuffer GetCommandBuffer() const { + return cmdbuf; + } + +private: + explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf) + : fence{fence}, cmdbuf{cmdbuf} {} + + VKFence* fence{}; + vk::CommandBuffer cmdbuf; +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp new file mode 100644 index 000000000..58ffa42f2 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp @@ -0,0 +1,90 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <memory> +#include <optional> +#include <vector> + +#include "common/assert.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_memory_manager.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_stream_buffer.h" + +namespace Vulkan { + +constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000; +constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000; + +VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKMemoryManager& memory_manager, + VKScheduler& scheduler, u64 size, vk::BufferUsageFlags usage, + vk::AccessFlags access, vk::PipelineStageFlags pipeline_stage) + : device{device}, scheduler{scheduler}, buffer_size{size}, access{access}, pipeline_stage{ + pipeline_stage} { + CreateBuffers(memory_manager, usage); + ReserveWatches(WATCHES_INITIAL_RESERVE); +} + +VKStreamBuffer::~VKStreamBuffer() = default; + +std::tuple<u8*, u64, bool> VKStreamBuffer::Reserve(u64 size) { + ASSERT(size <= buffer_size); + mapped_size = size; + + if (offset + size > buffer_size) { + // The buffer would overflow, save the amount of used buffers, signal an invalidation and + // reset the state. + invalidation_mark = used_watches; + used_watches = 0; + offset = 0; + } + + return {mapped_pointer + offset, offset, invalidation_mark.has_value()}; +} + +VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) { + ASSERT_MSG(size <= mapped_size, "Reserved size is too small"); + + if (invalidation_mark) { + // TODO(Rodrigo): Find a better way to invalidate than waiting for all watches to finish. + exctx = scheduler.Flush(); + std::for_each(watches.begin(), watches.begin() + *invalidation_mark, + [&](auto& resource) { resource->Wait(); }); + invalidation_mark = std::nullopt; + } + + if (used_watches + 1 >= watches.size()) { + // Ensure that there are enough watches. + ReserveWatches(WATCHES_RESERVE_CHUNK); + } + // Add a watch for this allocation. + watches[used_watches++]->Watch(exctx.GetFence()); + + offset += size; + + return exctx; +} + +void VKStreamBuffer::CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage) { + const vk::BufferCreateInfo buffer_ci({}, buffer_size, usage, vk::SharingMode::eExclusive, 0, + nullptr); + + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + buffer = dev.createBufferUnique(buffer_ci, nullptr, dld); + commit = memory_manager.Commit(*buffer, true); + mapped_pointer = commit->GetData(); +} + +void VKStreamBuffer::ReserveWatches(std::size_t grow_size) { + const std::size_t previous_size = watches.size(); + watches.resize(previous_size + grow_size); + std::generate(watches.begin() + previous_size, watches.end(), + []() { return std::make_unique<VKFenceWatch>(); }); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h new file mode 100644 index 000000000..69d036ccd --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h @@ -0,0 +1,72 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <optional> +#include <tuple> +#include <vector> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_memory_manager.h" + +namespace Vulkan { + +class VKDevice; +class VKFence; +class VKFenceWatch; +class VKResourceManager; +class VKScheduler; + +class VKStreamBuffer { +public: + explicit VKStreamBuffer(const VKDevice& device, VKMemoryManager& memory_manager, + VKScheduler& scheduler, u64 size, vk::BufferUsageFlags usage, + vk::AccessFlags access, vk::PipelineStageFlags pipeline_stage); + ~VKStreamBuffer(); + + /** + * Reserves a region of memory from the stream buffer. + * @param size Size to reserve. + * @returns A tuple in the following order: Raw memory pointer (with offset added), buffer + * offset and a boolean that's true when buffer has been invalidated. + */ + std::tuple<u8*, u64, bool> Reserve(u64 size); + + /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. + [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx, u64 size); + + vk::Buffer GetBuffer() const { + return *buffer; + } + +private: + /// Creates Vulkan buffer handles committing the required the required memory. + void CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage); + + /// Increases the amount of watches available. + void ReserveWatches(std::size_t grow_size); + + const VKDevice& device; ///< Vulkan device manager. + VKScheduler& scheduler; ///< Command scheduler. + const u64 buffer_size; ///< Total size of the stream buffer. + const vk::AccessFlags access; ///< Access usage of this stream buffer. + const vk::PipelineStageFlags pipeline_stage; ///< Pipeline usage of this stream buffer. + + UniqueBuffer buffer; ///< Mapped buffer. + VKMemoryCommit commit; ///< Memory commit. + u8* mapped_pointer{}; ///< Pointer to the host visible commit + + u64 offset{}; ///< Buffer iterator. + u64 mapped_size{}; ///< Size reserved for the current copy. + + std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Total watches + std::size_t used_watches{}; ///< Count of watches, reset on invalidation. + std::optional<std::size_t> + invalidation_mark{}; ///< Number of watches used in the current invalidation. +}; + +} // namespace Vulkan diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp index 740ac3118..e4c438792 100644 --- a/src/video_core/shader/decode.cpp +++ b/src/video_core/shader/decode.cpp @@ -165,6 +165,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) { {OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2}, {OpCode::Type::Conversion, &ShaderIR::DecodeConversion}, {OpCode::Type::Memory, &ShaderIR::DecodeMemory}, + {OpCode::Type::Texture, &ShaderIR::DecodeTexture}, {OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate}, {OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate}, {OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate}, diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 55ec601ff..ea3c71eed 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -17,24 +17,6 @@ using Tegra::Shader::Attribute; using Tegra::Shader::Instruction; using Tegra::Shader::OpCode; using Tegra::Shader::Register; -using Tegra::Shader::TextureMiscMode; -using Tegra::Shader::TextureProcessMode; -using Tegra::Shader::TextureType; - -static std::size_t GetCoordCount(TextureType texture_type) { - switch (texture_type) { - case TextureType::Texture1D: - return 1; - case TextureType::Texture2D: - return 2; - case TextureType::Texture3D: - case TextureType::TextureCube: - return 3; - default: - UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); - return 0; - } -} u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; @@ -48,7 +30,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, "Unaligned attribute loads are not supported"); - Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, + Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Pass, Tegra::Shader::IpaSampleMode::Default}; u64 next_element = instr.attribute.fmt20.element; @@ -247,194 +229,6 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { } break; } - case OpCode::Id::TEX: { - UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - - if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); - } - - const TextureType texture_type{instr.tex.texture_type}; - const bool is_array = instr.tex.array != 0; - const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC); - const auto process_mode = instr.tex.GetTextureProcessMode(); - WriteTexInstructionFloat( - bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array)); - break; - } - case OpCode::Id::TEXS: { - const TextureType texture_type{instr.texs.GetTextureType()}; - const bool is_array{instr.texs.IsArrayTexture()}; - const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); - const auto process_mode = instr.texs.GetTextureProcessMode(); - - if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); - } - - const Node4 components = - GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); - - if (instr.texs.fp32_flag) { - WriteTexsInstructionFloat(bb, instr, components); - } else { - WriteTexsInstructionHalfFloat(bb, instr, components); - } - break; - } - case OpCode::Id::TLD4: { - ASSERT(instr.tld4.array == 0); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), - "NDV is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), - "PTP is not implemented"); - - if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); - } - - const auto texture_type = instr.tld4.texture_type.Value(); - const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC); - const bool is_array = instr.tld4.array != 0; - WriteTexInstructionFloat(bb, instr, - GetTld4Code(instr, texture_type, depth_compare, is_array)); - break; - } - case OpCode::Id::TLD4S: { - UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); - } - - const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); - const Node op_a = GetRegister(instr.gpr8); - const Node op_b = GetRegister(instr.gpr20); - - // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. - std::vector<Node> coords; - if (depth_compare) { - // Note: TLD4S coordinate encoding works just like TEXS's - const Node op_y = GetRegister(instr.gpr8.Value() + 1); - coords.push_back(op_a); - coords.push_back(op_y); - coords.push_back(op_b); - } else { - coords.push_back(op_a); - coords.push_back(op_b); - } - std::vector<Node> extras; - extras.push_back(Immediate(static_cast<u32>(instr.tld4s.component))); - - const auto& sampler = - GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); - - Node4 values; - for (u32 element = 0; element < values.size(); ++element) { - auto coords_copy = coords; - MetaTexture meta{sampler, {}, {}, extras, element}; - values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); - } - - WriteTexsInstructionFloat(bb, instr, values); - break; - } - case OpCode::Id::TXQ: { - if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); - } - - // TODO: The new commits on the texture refactor, change the way samplers work. - // Sadly, not all texture instructions specify the type of texture their sampler - // uses. This must be fixed at a later instance. - const auto& sampler = - GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); - - u32 indexer = 0; - switch (instr.txq.query_type) { - case Tegra::Shader::TextureQueryType::Dimension: { - for (u32 element = 0; element < 4; ++element) { - if (!instr.txq.IsComponentEnabled(element)) { - continue; - } - MetaTexture meta{sampler, {}, {}, {}, element}; - const Node value = - Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8)); - SetTemporal(bb, indexer++, value); - } - for (u32 i = 0; i < indexer; ++i) { - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); - } - break; - } - default: - UNIMPLEMENTED_MSG("Unhandled texture query type: {}", - static_cast<u32>(instr.txq.query_type.Value())); - } - break; - } - case OpCode::Id::TMML: { - UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); - - if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); - } - - auto texture_type = instr.tmml.texture_type.Value(); - const bool is_array = instr.tmml.array != 0; - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); - - std::vector<Node> coords; - - // TODO: Add coordinates for different samplers once other texture types are implemented. - switch (texture_type) { - case TextureType::Texture1D: - coords.push_back(GetRegister(instr.gpr8)); - break; - case TextureType::Texture2D: - coords.push_back(GetRegister(instr.gpr8.Value() + 0)); - coords.push_back(GetRegister(instr.gpr8.Value() + 1)); - break; - default: - UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); - - // Fallback to interpreting as a 2D texture for now - coords.push_back(GetRegister(instr.gpr8.Value() + 0)); - coords.push_back(GetRegister(instr.gpr8.Value() + 1)); - texture_type = TextureType::Texture2D; - } - - for (u32 element = 0; element < 2; ++element) { - auto params = coords; - MetaTexture meta{sampler, {}, {}, {}, element}; - const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); - SetTemporal(bb, element, value); - } - for (u32 element = 0; element < 2; ++element) { - SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element)); - } - - break; - } - case OpCode::Id::TLDS: { - const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; - const bool is_array{instr.tlds.IsArrayTexture()}; - - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); - - if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); - } - - WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); - break; - } default: UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); } @@ -442,291 +236,4 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { return pc; } -const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, - bool is_array, bool is_shadow) { - const auto offset = static_cast<std::size_t>(sampler.index.Value()); - - // If this sampler has already been used, return the existing mapping. - const auto itr = - std::find_if(used_samplers.begin(), used_samplers.end(), - [&](const Sampler& entry) { return entry.GetOffset() == offset; }); - if (itr != used_samplers.end()) { - ASSERT(itr->GetType() == type && itr->IsArray() == is_array && - itr->IsShadow() == is_shadow); - return *itr; - } - - // Otherwise create a new mapping for this sampler - const std::size_t next_index = used_samplers.size(); - const Sampler entry{offset, next_index, type, is_array, is_shadow}; - return *used_samplers.emplace(entry).first; -} - -void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { - u32 dest_elem = 0; - for (u32 elem = 0; elem < 4; ++elem) { - if (!instr.tex.IsComponentEnabled(elem)) { - // Skip disabled components - continue; - } - SetTemporal(bb, dest_elem++, components[elem]); - } - // After writing values in temporals, move them to the real registers - for (u32 i = 0; i < dest_elem; ++i) { - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); - } -} - -void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, - const Node4& components) { - // TEXS has two destination registers and a swizzle. The first two elements in the swizzle - // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 - - u32 dest_elem = 0; - for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) - continue; - SetTemporal(bb, dest_elem++, components[component]); - } - - for (u32 i = 0; i < dest_elem; ++i) { - if (i < 2) { - // Write the first two swizzle components to gpr0 and gpr0+1 - SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); - } else { - ASSERT(instr.texs.HasTwoDestinations()); - // Write the rest of the swizzle components to gpr28 and gpr28+1 - SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); - } - } -} - -void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, - const Node4& components) { - // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half - // float instruction). - - Node4 values; - u32 dest_elem = 0; - for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) - continue; - values[dest_elem++] = components[component]; - } - if (dest_elem == 0) - return; - - std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); }); - - const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]); - if (dest_elem <= 2) { - SetRegister(bb, instr.gpr0, first_value); - return; - } - - SetTemporal(bb, 0, first_value); - SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); - - SetRegister(bb, instr.gpr0, GetTemporal(0)); - SetRegister(bb, instr.gpr28, GetTemporal(1)); -} - -Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, - TextureProcessMode process_mode, std::vector<Node> coords, - Node array, Node depth_compare, u32 bias_offset) { - const bool is_array = array; - const bool is_shadow = depth_compare; - - UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) || - (texture_type == TextureType::TextureCube && is_array && is_shadow), - "This method is not supported."); - - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow); - - const bool lod_needed = process_mode == TextureProcessMode::LZ || - process_mode == TextureProcessMode::LL || - process_mode == TextureProcessMode::LLA; - - // LOD selection (either via bias or explicit textureLod) not supported in GL for - // sampler2DArrayShadow and samplerCubeArrayShadow. - const bool gl_lod_supported = - !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) || - (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow)); - - const OperationCode read_method = - lod_needed && gl_lod_supported ? OperationCode::TextureLod : OperationCode::Texture; - - UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); - - std::vector<Node> extras; - if (process_mode != TextureProcessMode::None && gl_lod_supported) { - if (process_mode == TextureProcessMode::LZ) { - extras.push_back(Immediate(0.0f)); - } else { - // If present, lod or bias are always stored in the register indexed by the gpr20 - // field with an offset depending on the usage of the other registers - extras.push_back(GetRegister(instr.gpr20.Value() + bias_offset)); - } - } - - Node4 values; - for (u32 element = 0; element < values.size(); ++element) { - auto copy_coords = coords; - MetaTexture meta{sampler, array, depth_compare, extras, element}; - values[element] = Operation(read_method, meta, std::move(copy_coords)); - } - - return values; -} - -Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, - TextureProcessMode process_mode, bool depth_compare, bool is_array) { - const bool lod_bias_enabled = - (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); - - const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - - std::vector<Node> coords; - for (std::size_t i = 0; i < coord_count; ++i) { - coords.push_back(GetRegister(coord_register + i)); - } - // 1D.DC in OpenGL the 2nd component is ignored. - if (depth_compare && !is_array && texture_type == TextureType::Texture1D) { - coords.push_back(Immediate(0.0f)); - } - - const Node array = is_array ? GetRegister(array_register) : nullptr; - - Node dc{}; - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 or in the next register if lod - // or bias are used - const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); - dc = GetRegister(depth_register); - } - - return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0); -} - -Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, - TextureProcessMode process_mode, bool depth_compare, bool is_array) { - const bool lod_bias_enabled = - (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); - - const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - const u64 last_coord_register = - (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) - ? static_cast<u64>(instr.gpr20.Value()) - : coord_register + 1; - const u32 bias_offset = coord_count > 2 ? 1 : 0; - - std::vector<Node> coords; - for (std::size_t i = 0; i < coord_count; ++i) { - const bool last = (i == (coord_count - 1)) && (coord_count > 1); - coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); - } - - const Node array = is_array ? GetRegister(array_register) : nullptr; - - Node dc{}; - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 or in the next register if lod - // or bias are used - const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); - dc = GetRegister(depth_register); - } - - return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset); -} - -Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, - bool is_array) { - const std::size_t coord_count = GetCoordCount(texture_type); - const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0); - const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); - - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - - std::vector<Node> coords; - for (size_t i = 0; i < coord_count; ++i) - coords.push_back(GetRegister(coord_register + i)); - - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); - - Node4 values; - for (u32 element = 0; element < values.size(); ++element) { - auto coords_copy = coords; - MetaTexture meta{sampler, GetRegister(array_register), {}, {}, element}; - values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); - } - - return values; -} - -Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { - const std::size_t type_coord_count = GetCoordCount(texture_type); - const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; - - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // if is array gpr20 is used - const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); - - const u64 last_coord_register = - ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array - ? static_cast<u64>(instr.gpr20.Value()) - : coord_register + 1; - - std::vector<Node> coords; - for (std::size_t i = 0; i < type_coord_count; ++i) { - const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1); - coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); - } - - const Node array = is_array ? GetRegister(array_register) : nullptr; - // When lod is used always is in gpr20 - const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0); - - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); - - Node4 values; - for (u32 element = 0; element < values.size(); ++element) { - auto coords_copy = coords; - MetaTexture meta{sampler, array, {}, {lod}, element}; - values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); - } - return values; -} - -std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement( - TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled, - std::size_t max_coords, std::size_t max_inputs) { - const std::size_t coord_count = GetCoordCount(texture_type); - - std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); - const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); - if (total_coord_count > max_coords || total_reg_count > max_inputs) { - UNIMPLEMENTED_MSG("Unsupported Texture operation"); - total_coord_count = std::min(total_coord_count, max_coords); - } - // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later. - total_coord_count += - (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0; - - return {coord_count, total_coord_count}; -} - } // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp index f9502e3d0..d750a2936 100644 --- a/src/video_core/shader/decode/other.cpp +++ b/src/video_core/shader/decode/other.cpp @@ -135,7 +135,18 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { instr.ipa.sample_mode.Value()}; const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode); - const Node value = GetSaturatedFloat(attr, instr.ipa.saturate); + Node value = attr; + const Tegra::Shader::Attribute::Index index = attribute.index.Value(); + if (index >= Tegra::Shader::Attribute::Index::Attribute_0 && + index <= Tegra::Shader::Attribute::Index::Attribute_31) { + // TODO(Blinkhawk): There are cases where a perspective attribute use PASS. + // In theory by setting them as perspective, OpenGL does the perspective correction. + // A way must figured to reverse the last step of it. + if (input_mode.interpolation_mode == Tegra::Shader::IpaInterpMode::Multiply) { + value = Operation(OperationCode::FMul, PRECISE, value, GetRegister(instr.gpr20)); + } + } + value = GetSaturatedFloat(value, instr.ipa.saturate); SetRegister(bb, instr.gpr0, value); break; @@ -175,4 +186,4 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { return pc; } -} // namespace VideoCommon::Shader
\ No newline at end of file +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp new file mode 100644 index 000000000..a99ae19bf --- /dev/null +++ b/src/video_core/shader/decode/texture.cpp @@ -0,0 +1,534 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <vector> +#include <fmt/format.h> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; +using Tegra::Shader::TextureMiscMode; +using Tegra::Shader::TextureProcessMode; +using Tegra::Shader::TextureType; + +static std::size_t GetCoordCount(TextureType texture_type) { + switch (texture_type) { + case TextureType::Texture1D: + return 1; + case TextureType::Texture2D: + return 2; + case TextureType::Texture3D: + case TextureType::TextureCube: + return 3; + default: + UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); + return 0; + } +} + +u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::TEX: { + UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + + if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); + } + + const TextureType texture_type{instr.tex.texture_type}; + const bool is_array = instr.tex.array != 0; + const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC); + const auto process_mode = instr.tex.GetTextureProcessMode(); + WriteTexInstructionFloat( + bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array)); + break; + } + case OpCode::Id::TEXS: { + const TextureType texture_type{instr.texs.GetTextureType()}; + const bool is_array{instr.texs.IsArrayTexture()}; + const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); + const auto process_mode = instr.texs.GetTextureProcessMode(); + + if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); + } + + const Node4 components = + GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); + + if (instr.texs.fp32_flag) { + WriteTexsInstructionFloat(bb, instr, components); + } else { + WriteTexsInstructionHalfFloat(bb, instr, components); + } + break; + } + case OpCode::Id::TLD4: { + ASSERT(instr.tld4.array == 0); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), + "NDV is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), + "PTP is not implemented"); + + if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); + } + + const auto texture_type = instr.tld4.texture_type.Value(); + const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC); + const bool is_array = instr.tld4.array != 0; + WriteTexInstructionFloat(bb, instr, + GetTld4Code(instr, texture_type, depth_compare, is_array)); + break; + } + case OpCode::Id::TLD4S: { + UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); + } + + const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = GetRegister(instr.gpr20); + + // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. + std::vector<Node> coords; + if (depth_compare) { + // Note: TLD4S coordinate encoding works just like TEXS's + const Node op_y = GetRegister(instr.gpr8.Value() + 1); + coords.push_back(op_a); + coords.push_back(op_y); + coords.push_back(op_b); + } else { + coords.push_back(op_a); + coords.push_back(op_b); + } + const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); + + const auto& sampler = + GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto coords_copy = coords; + MetaTexture meta{sampler, {}, {}, {}, {}, component, element}; + values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); + } + + WriteTexsInstructionFloat(bb, instr, values); + break; + } + case OpCode::Id::TXQ: { + if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); + } + + // TODO: The new commits on the texture refactor, change the way samplers work. + // Sadly, not all texture instructions specify the type of texture their sampler + // uses. This must be fixed at a later instance. + const auto& sampler = + GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); + + u32 indexer = 0; + switch (instr.txq.query_type) { + case Tegra::Shader::TextureQueryType::Dimension: { + for (u32 element = 0; element < 4; ++element) { + if (!instr.txq.IsComponentEnabled(element)) { + continue; + } + MetaTexture meta{sampler, {}, {}, {}, {}, {}, element}; + const Node value = + Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8)); + SetTemporal(bb, indexer++, value); + } + for (u32 i = 0; i < indexer; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled texture query type: {}", + static_cast<u32>(instr.txq.query_type.Value())); + } + break; + } + case OpCode::Id::TMML: { + UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), + "NDV is not implemented"); + + if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); + } + + auto texture_type = instr.tmml.texture_type.Value(); + const bool is_array = instr.tmml.array != 0; + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + + std::vector<Node> coords; + + // TODO: Add coordinates for different samplers once other texture types are implemented. + switch (texture_type) { + case TextureType::Texture1D: + coords.push_back(GetRegister(instr.gpr8)); + break; + case TextureType::Texture2D: + coords.push_back(GetRegister(instr.gpr8.Value() + 0)); + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + break; + default: + UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); + + // Fallback to interpreting as a 2D texture for now + coords.push_back(GetRegister(instr.gpr8.Value() + 0)); + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + texture_type = TextureType::Texture2D; + } + + for (u32 element = 0; element < 2; ++element) { + auto params = coords; + MetaTexture meta{sampler, {}, {}, {}, {}, {}, element}; + const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); + SetTemporal(bb, element, value); + } + for (u32 element = 0; element < 2; ++element) { + SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element)); + } + + break; + } + case OpCode::Id::TLDS: { + const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; + const bool is_array{instr.tlds.IsArrayTexture()}; + + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); + + if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); + } + + WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, + bool is_array, bool is_shadow) { + const auto offset = static_cast<std::size_t>(sampler.index.Value()); + + // If this sampler has already been used, return the existing mapping. + const auto itr = + std::find_if(used_samplers.begin(), used_samplers.end(), + [&](const Sampler& entry) { return entry.GetOffset() == offset; }); + if (itr != used_samplers.end()) { + ASSERT(itr->GetType() == type && itr->IsArray() == is_array && + itr->IsShadow() == is_shadow); + return *itr; + } + + // Otherwise create a new mapping for this sampler + const std::size_t next_index = used_samplers.size(); + const Sampler entry{offset, next_index, type, is_array, is_shadow}; + return *used_samplers.emplace(entry).first; +} + +void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { + u32 dest_elem = 0; + for (u32 elem = 0; elem < 4; ++elem) { + if (!instr.tex.IsComponentEnabled(elem)) { + // Skip disabled components + continue; + } + SetTemporal(bb, dest_elem++, components[elem]); + } + // After writing values in temporals, move them to the real registers + for (u32 i = 0; i < dest_elem; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } +} + +void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, + const Node4& components) { + // TEXS has two destination registers and a swizzle. The first two elements in the swizzle + // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 + + u32 dest_elem = 0; + for (u32 component = 0; component < 4; ++component) { + if (!instr.texs.IsComponentEnabled(component)) + continue; + SetTemporal(bb, dest_elem++, components[component]); + } + + for (u32 i = 0; i < dest_elem; ++i) { + if (i < 2) { + // Write the first two swizzle components to gpr0 and gpr0+1 + SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); + } else { + ASSERT(instr.texs.HasTwoDestinations()); + // Write the rest of the swizzle components to gpr28 and gpr28+1 + SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); + } + } +} + +void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, + const Node4& components) { + // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half + // float instruction). + + Node4 values; + u32 dest_elem = 0; + for (u32 component = 0; component < 4; ++component) { + if (!instr.texs.IsComponentEnabled(component)) + continue; + values[dest_elem++] = components[component]; + } + if (dest_elem == 0) + return; + + std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); }); + + const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]); + if (dest_elem <= 2) { + SetRegister(bb, instr.gpr0, first_value); + return; + } + + SetTemporal(bb, 0, first_value); + SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); + + SetRegister(bb, instr.gpr0, GetTemporal(0)); + SetRegister(bb, instr.gpr28, GetTemporal(1)); +} + +Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, std::vector<Node> coords, + Node array, Node depth_compare, u32 bias_offset) { + const bool is_array = array; + const bool is_shadow = depth_compare; + + UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) || + (texture_type == TextureType::TextureCube && is_array && is_shadow), + "This method is not supported."); + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow); + + const bool lod_needed = process_mode == TextureProcessMode::LZ || + process_mode == TextureProcessMode::LL || + process_mode == TextureProcessMode::LLA; + + // LOD selection (either via bias or explicit textureLod) not supported in GL for + // sampler2DArrayShadow and samplerCubeArrayShadow. + const bool gl_lod_supported = + !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) || + (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow)); + + const OperationCode read_method = + (lod_needed && gl_lod_supported) ? OperationCode::TextureLod : OperationCode::Texture; + + UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); + + Node bias = {}; + Node lod = {}; + if (process_mode != TextureProcessMode::None && gl_lod_supported) { + switch (process_mode) { + case TextureProcessMode::LZ: + lod = Immediate(0.0f); + break; + case TextureProcessMode::LB: + // If present, lod or bias are always stored in the register indexed by the gpr20 + // field with an offset depending on the usage of the other registers + bias = GetRegister(instr.gpr20.Value() + bias_offset); + break; + case TextureProcessMode::LL: + lod = GetRegister(instr.gpr20.Value() + bias_offset); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode)); + break; + } + } + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto copy_coords = coords; + MetaTexture meta{sampler, array, depth_compare, bias, lod, {}, element}; + values[element] = Operation(read_method, meta, std::move(copy_coords)); + } + + return values; +} + +Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array) { + const bool lod_bias_enabled = + (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); + + const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( + texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + + std::vector<Node> coords; + for (std::size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(coord_register + i)); + } + // 1D.DC in OpenGL the 2nd component is ignored. + if (depth_compare && !is_array && texture_type == TextureType::Texture1D) { + coords.push_back(Immediate(0.0f)); + } + + const Node array = is_array ? GetRegister(array_register) : nullptr; + + Node dc{}; + if (depth_compare) { + // Depth is always stored in the register signaled by gpr20 or in the next register if lod + // or bias are used + const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); + dc = GetRegister(depth_register); + } + + return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0); +} + +Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array) { + const bool lod_bias_enabled = + (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); + + const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( + texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + const u64 last_coord_register = + (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) + ? static_cast<u64>(instr.gpr20.Value()) + : coord_register + 1; + const u32 bias_offset = coord_count > 2 ? 1 : 0; + + std::vector<Node> coords; + for (std::size_t i = 0; i < coord_count; ++i) { + const bool last = (i == (coord_count - 1)) && (coord_count > 1); + coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); + } + + const Node array = is_array ? GetRegister(array_register) : nullptr; + + Node dc{}; + if (depth_compare) { + // Depth is always stored in the register signaled by gpr20 or in the next register if lod + // or bias are used + const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); + dc = GetRegister(depth_register); + } + + return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset); +} + +Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, + bool is_array) { + const std::size_t coord_count = GetCoordCount(texture_type); + const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0); + const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); + + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + + std::vector<Node> coords; + for (size_t i = 0; i < coord_count; ++i) + coords.push_back(GetRegister(coord_register + i)); + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto coords_copy = coords; + MetaTexture meta{sampler, GetRegister(array_register), {}, {}, {}, {}, element}; + values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); + } + + return values; +} + +Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { + const std::size_t type_coord_count = GetCoordCount(texture_type); + const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; + + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // if is array gpr20 is used + const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); + + const u64 last_coord_register = + ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array + ? static_cast<u64>(instr.gpr20.Value()) + : coord_register + 1; + + std::vector<Node> coords; + for (std::size_t i = 0; i < type_coord_count; ++i) { + const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1); + coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); + } + + const Node array = is_array ? GetRegister(array_register) : nullptr; + // When lod is used always is in gpr20 + const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0); + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto coords_copy = coords; + MetaTexture meta{sampler, array, {}, {}, lod, {}, element}; + values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); + } + return values; +} + +std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement( + TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled, + std::size_t max_coords, std::size_t max_inputs) { + const std::size_t coord_count = GetCoordCount(texture_type); + + std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); + const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); + if (total_coord_count > max_coords || total_reg_count > max_inputs) { + UNIMPLEMENTED_MSG("Unsupported Texture operation"); + total_coord_count = std::min(total_coord_count, max_coords); + } + // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later. + total_coord_count += + (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0; + + return {coord_count, total_coord_count}; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index 52c7f2c4e..5bc3a3900 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -290,7 +290,9 @@ struct MetaTexture { const Sampler& sampler; Node array{}; Node depth_compare{}; - std::vector<Node> extras; + Node bias{}; + Node lod{}; + Node component{}; u32 element{}; }; @@ -614,6 +616,7 @@ private: u32 DecodeHfma2(NodeBlock& bb, u32 pc); u32 DecodeConversion(NodeBlock& bb, u32 pc); u32 DecodeMemory(NodeBlock& bb, u32 pc); + u32 DecodeTexture(NodeBlock& bb, u32 pc); u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc); u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc); u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc); diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index be4635342..33b071747 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -20,9 +20,9 @@ std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor, return {node, cursor}; } if (const auto conditional = std::get_if<ConditionalNode>(node)) { - const auto& code = conditional->GetCode(); - const auto [found, internal_cursor] = - FindOperation(code, static_cast<s64>(code.size() - 1), operation_code); + const auto& conditional_code = conditional->GetCode(); + const auto [found, internal_cursor] = FindOperation( + conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code); if (found) return {found, cursor}; } @@ -58,8 +58,8 @@ Node ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) { return nullptr; } if (const auto conditional = std::get_if<ConditionalNode>(tracked)) { - const auto& code = conditional->GetCode(); - return TrackCbuf(tracked, code, static_cast<s64>(code.size())); + const auto& conditional_code = conditional->GetCode(); + return TrackCbuf(tracked, conditional_code, static_cast<s64>(conditional_code.size())); } return nullptr; } diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 044ba116a..a7ac26d71 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -89,8 +89,6 @@ PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) { PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format) { switch (format) { - // TODO (Hexagon12): Converting SRGBA to RGBA is a hack and doesn't completely correct the - // gamma. case Tegra::RenderTargetFormat::RGBA8_SRGB: return PixelFormat::RGBA8_SRGB; case Tegra::RenderTargetFormat::RGBA8_UNORM: diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp index bc50a4876..b508d64e9 100644 --- a/src/video_core/textures/astc.cpp +++ b/src/video_core/textures/astc.cpp @@ -23,28 +23,12 @@ #include "video_core/textures/astc.h" -class BitStream { +class InputBitStream { public: - explicit BitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0) + explicit InputBitStream(const unsigned char* ptr, int nBits = 0, int start_offset = 0) : m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {} - ~BitStream() = default; - - int GetBitsWritten() const { - return m_BitsWritten; - } - - void WriteBitsR(unsigned int val, unsigned int nBits) { - for (unsigned int i = 0; i < nBits; i++) { - WriteBit((val >> (nBits - i - 1)) & 1); - } - } - - void WriteBits(unsigned int val, unsigned int nBits) { - for (unsigned int i = 0; i < nBits; i++) { - WriteBit((val >> i) & 1); - } - } + ~InputBitStream() = default; int GetBitsRead() const { return m_BitsRead; @@ -71,6 +55,38 @@ public: } private: + const int m_NumBits; + const unsigned char* m_CurByte; + int m_NextBit = 0; + int m_BitsRead = 0; + + bool done = false; +}; + +class OutputBitStream { +public: + explicit OutputBitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0) + : m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {} + + ~OutputBitStream() = default; + + int GetBitsWritten() const { + return m_BitsWritten; + } + + void WriteBitsR(unsigned int val, unsigned int nBits) { + for (unsigned int i = 0; i < nBits; i++) { + WriteBit((val >> (nBits - i - 1)) & 1); + } + } + + void WriteBits(unsigned int val, unsigned int nBits) { + for (unsigned int i = 0; i < nBits; i++) { + WriteBit((val >> i) & 1); + } + } + +private: void WriteBit(int b) { if (done) @@ -238,8 +254,8 @@ public: // Fills result with the values that are encoded in the given // bitstream. We must know beforehand what the maximum possible // value is, and how many values we're decoding. - static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result, BitStream& bits, - uint32_t maxRange, uint32_t nValues) { + static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result, + InputBitStream& bits, uint32_t maxRange, uint32_t nValues) { // Determine encoding parameters IntegerEncodedValue val = IntegerEncodedValue::CreateEncoding(maxRange); @@ -267,7 +283,7 @@ public: } private: - static void DecodeTritBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result, + static void DecodeTritBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result, uint32_t nBitsPerValue) { // Implement the algorithm in section C.2.12 uint32_t m[5]; @@ -327,7 +343,7 @@ private: } } - static void DecodeQuintBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result, + static void DecodeQuintBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result, uint32_t nBitsPerValue) { // Implement the algorithm in section C.2.12 uint32_t m[3]; @@ -406,7 +422,7 @@ struct TexelWeightParams { } }; -static TexelWeightParams DecodeBlockInfo(BitStream& strm) { +static TexelWeightParams DecodeBlockInfo(InputBitStream& strm) { TexelWeightParams params; // Read the entire block mode all at once @@ -605,7 +621,7 @@ static TexelWeightParams DecodeBlockInfo(BitStream& strm) { return params; } -static void FillVoidExtentLDR(BitStream& strm, uint32_t* const outBuf, uint32_t blockWidth, +static void FillVoidExtentLDR(InputBitStream& strm, uint32_t* const outBuf, uint32_t blockWidth, uint32_t blockHeight) { // Don't actually care about the void extent, just read the bits... for (int i = 0; i < 4; ++i) { @@ -821,7 +837,7 @@ static void DecodeColorValues(uint32_t* out, uint8_t* data, const uint32_t* mode // We now have enough to decode our integer sequence. std::vector<IntegerEncodedValue> decodedColorValues; - BitStream colorStream(data); + InputBitStream colorStream(data); IntegerEncodedValue::DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues); // Once we have the decoded values, we need to dequantize them to the 0-255 range @@ -1365,9 +1381,9 @@ static void ComputeEndpoints(Pixel& ep1, Pixel& ep2, const uint32_t*& colorValue #undef READ_INT_VALUES } -static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth, +static void DecompressBlock(const uint8_t inBuf[16], const uint32_t blockWidth, const uint32_t blockHeight, uint32_t* outBuf) { - BitStream strm(inBuf); + InputBitStream strm(inBuf); TexelWeightParams weightParams = DecodeBlockInfo(strm); // Was there an error? @@ -1421,7 +1437,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth, // Define color data. uint8_t colorEndpointData[16]; memset(colorEndpointData, 0, sizeof(colorEndpointData)); - BitStream colorEndpointStream(colorEndpointData, 16 * 8, 0); + OutputBitStream colorEndpointStream(colorEndpointData, 16 * 8, 0); // Read extra config data... uint32_t baseCEM = 0; @@ -1549,7 +1565,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth, memset(texelWeightData + clearByteStart, 0, 16 - clearByteStart); std::vector<IntegerEncodedValue> texelWeightValues; - BitStream weightStream(texelWeightData); + InputBitStream weightStream(texelWeightData); IntegerEncodedValue::DecodeIntegerSequence(texelWeightValues, weightStream, weightParams.m_MaxWeight, @@ -1597,7 +1613,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth, namespace Tegra::Texture::ASTC { -std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height, +std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height, uint32_t depth, uint32_t block_width, uint32_t block_height) { uint32_t blockIdx = 0; std::vector<uint8_t> outData(height * width * depth * 4); @@ -1605,7 +1621,7 @@ std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint for (uint32_t j = 0; j < height; j += block_height) { for (uint32_t i = 0; i < width; i += block_width) { - uint8_t* blockPtr = data.data() + blockIdx * 16; + const uint8_t* blockPtr = data + blockIdx * 16; // Blocks can be at most 12x12 uint32_t uncompData[144]; diff --git a/src/video_core/textures/astc.h b/src/video_core/textures/astc.h index d419dd025..991cdba72 100644 --- a/src/video_core/textures/astc.h +++ b/src/video_core/textures/astc.h @@ -9,7 +9,7 @@ namespace Tegra::Texture::ASTC { -std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height, +std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height, uint32_t depth, uint32_t block_width, uint32_t block_height); } // namespace Tegra::Texture::ASTC diff --git a/src/video_core/textures/convert.cpp b/src/video_core/textures/convert.cpp new file mode 100644 index 000000000..5e439f036 --- /dev/null +++ b/src/video_core/textures/convert.cpp @@ -0,0 +1,92 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <cstring> +#include <tuple> +#include <vector> + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/textures/astc.h" +#include "video_core/textures/convert.h" + +namespace Tegra::Texture { + +using VideoCore::Surface::PixelFormat; + +template <bool reverse> +void SwapS8Z24ToZ24S8(u8* data, u32 width, u32 height) { + union S8Z24 { + BitField<0, 24, u32> z24; + BitField<24, 8, u32> s8; + }; + static_assert(sizeof(S8Z24) == 4, "S8Z24 is incorrect size"); + + union Z24S8 { + BitField<0, 8, u32> s8; + BitField<8, 24, u32> z24; + }; + static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size"); + + S8Z24 s8z24_pixel{}; + Z24S8 z24s8_pixel{}; + constexpr auto bpp{ + VideoCore::Surface::GetBytesPerPixel(VideoCore::Surface::PixelFormat::S8Z24)}; + for (std::size_t y = 0; y < height; ++y) { + for (std::size_t x = 0; x < width; ++x) { + const std::size_t offset{bpp * (y * width + x)}; + if constexpr (reverse) { + std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8)); + s8z24_pixel.s8.Assign(z24s8_pixel.s8); + s8z24_pixel.z24.Assign(z24s8_pixel.z24); + std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24)); + } else { + std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24)); + z24s8_pixel.s8.Assign(s8z24_pixel.s8); + z24s8_pixel.z24.Assign(s8z24_pixel.z24); + std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8)); + } + } + } +} + +static void ConvertS8Z24ToZ24S8(u8* data, u32 width, u32 height) { + SwapS8Z24ToZ24S8<false>(data, width, height); +} + +static void ConvertZ24S8ToS8Z24(u8* data, u32 width, u32 height) { + SwapS8Z24ToZ24S8<true>(data, width, height); +} + +void ConvertFromGuestToHost(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth, + bool convert_astc, bool convert_s8z24) { + if (convert_astc && IsPixelFormatASTC(pixel_format)) { + // Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC. + u32 block_width{}; + u32 block_height{}; + std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format); + const std::vector<u8> rgba8_data = + Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height); + std::copy(rgba8_data.begin(), rgba8_data.end(), data); + + } else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) { + Tegra::Texture::ConvertS8Z24ToZ24S8(data, width, height); + } +} + +void ConvertFromHostToGuest(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth, + bool convert_astc, bool convert_s8z24) { + if (convert_astc && IsPixelFormatASTC(pixel_format)) { + LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented", + static_cast<u32>(pixel_format)); + UNREACHABLE(); + + } else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) { + Tegra::Texture::ConvertZ24S8ToS8Z24(data, width, height); + } +} + +} // namespace Tegra::Texture
\ No newline at end of file diff --git a/src/video_core/textures/convert.h b/src/video_core/textures/convert.h new file mode 100644 index 000000000..07cd8b5da --- /dev/null +++ b/src/video_core/textures/convert.h @@ -0,0 +1,18 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "video_core/surface.h" + +namespace Tegra::Texture { + +void ConvertFromGuestToHost(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width, + u32 height, u32 depth, bool convert_astc, bool convert_s8z24); + +void ConvertFromHostToGuest(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width, + u32 height, u32 depth, bool convert_astc, bool convert_s8z24); + +} // namespace Tegra::Texture
\ No newline at end of file diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 5db75de22..cad7340f5 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -103,8 +103,8 @@ void FastProcessBlock(u8* const swizzled_data, u8* const unswizzled_data, const const u32 swizzle_offset{y_address + table[(xb / fast_swizzle_align) % 4]}; const u32 out_x = xb * out_bytes_per_pixel / bytes_per_pixel; const u32 pixel_index{out_x + pixel_base}; - data_ptrs[unswizzle] = swizzled_data + swizzle_offset; - data_ptrs[!unswizzle] = unswizzled_data + pixel_index; + data_ptrs[unswizzle ? 1 : 0] = swizzled_data + swizzle_offset; + data_ptrs[unswizzle ? 0 : 1] = unswizzled_data + pixel_index; std::memcpy(data_ptrs[0], data_ptrs[1], fast_swizzle_align); } pixel_base += stride_x; @@ -154,7 +154,7 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool for (u32 xb = 0; xb < blocks_on_x; xb++) { const u32 x_start = xb * block_x_elements; const u32 x_end = std::min(width, x_start + block_x_elements); - if (fast) { + if constexpr (fast) { FastProcessBlock(swizzled_data, unswizzled_data, unswizzle, x_start, y_start, z_start, x_end, y_end, z_end, tile_offset, xy_block_size, layer_z, stride_x, bytes_per_pixel, out_bytes_per_pixel); diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index 85b7e9f7b..65df86890 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -16,16 +16,13 @@ inline std::size_t GetGOBSize() { return 512; } -/** - * Unswizzles a swizzled texture without changing its format. - */ +/// Unswizzles a swizzled texture without changing its format. void UnswizzleTexture(u8* unswizzled_data, VAddr address, u32 tile_size_x, u32 tile_size_y, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height = TICEntry::DefaultBlockHeight, u32 block_depth = TICEntry::DefaultBlockHeight, u32 width_spacing = 0); -/** - * Unswizzles a swizzled texture without changing its format. - */ + +/// Unswizzles a swizzled texture without changing its format. std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size_x, u32 tile_size_y, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height = TICEntry::DefaultBlockHeight, @@ -37,15 +34,11 @@ void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel, u32 out_bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing); -/** - * Decodes an unswizzled texture into a A8R8G8B8 texture. - */ +/// Decodes an unswizzled texture into a A8R8G8B8 texture. std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat format, u32 width, u32 height); -/** - * This function calculates the correct size of a texture depending if it's tiled or not. - */ +/// This function calculates the correct size of a texture depending if it's tiled or not. std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth); @@ -53,6 +46,7 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, u32 block_height); + /// Copies a tiled subrectangle into a linear surface. void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h index 0fc5530f2..93ecc6e31 100644 --- a/src/video_core/textures/texture.h +++ b/src/video_core/textures/texture.h @@ -4,6 +4,7 @@ #pragma once +#include <array> #include "common/assert.h" #include "common/bit_field.h" #include "common/common_funcs.h" @@ -282,34 +283,62 @@ enum class TextureMipmapFilter : u32 { struct TSCEntry { union { - BitField<0, 3, WrapMode> wrap_u; - BitField<3, 3, WrapMode> wrap_v; - BitField<6, 3, WrapMode> wrap_p; - BitField<9, 1, u32> depth_compare_enabled; - BitField<10, 3, DepthCompareFunc> depth_compare_func; - BitField<13, 1, u32> srgb_conversion; - BitField<20, 3, u32> max_anisotropy; + struct { + union { + BitField<0, 3, WrapMode> wrap_u; + BitField<3, 3, WrapMode> wrap_v; + BitField<6, 3, WrapMode> wrap_p; + BitField<9, 1, u32> depth_compare_enabled; + BitField<10, 3, DepthCompareFunc> depth_compare_func; + BitField<13, 1, u32> srgb_conversion; + BitField<20, 3, u32> max_anisotropy; + }; + union { + BitField<0, 2, TextureFilter> mag_filter; + BitField<4, 2, TextureFilter> min_filter; + BitField<6, 2, TextureMipmapFilter> mipmap_filter; + BitField<9, 1, u32> cubemap_interface_filtering; + BitField<12, 13, u32> mip_lod_bias; + }; + union { + BitField<0, 12, u32> min_lod_clamp; + BitField<12, 12, u32> max_lod_clamp; + BitField<24, 8, u32> srgb_border_color_r; + }; + union { + BitField<12, 8, u32> srgb_border_color_g; + BitField<20, 8, u32> srgb_border_color_b; + }; + std::array<f32, 4> border_color; + }; + std::array<u8, 0x20> raw; }; - union { - BitField<0, 2, TextureFilter> mag_filter; - BitField<4, 2, TextureFilter> min_filter; - BitField<6, 2, TextureMipmapFilter> mip_filter; - BitField<9, 1, u32> cubemap_interface_filtering; - BitField<12, 13, u32> mip_lod_bias; - }; - union { - BitField<0, 12, u32> min_lod_clamp; - BitField<12, 12, u32> max_lod_clamp; - BitField<24, 8, u32> srgb_border_color_r; - }; - union { - BitField<12, 8, u32> srgb_border_color_g; - BitField<20, 8, u32> srgb_border_color_b; - }; - float border_color_r; - float border_color_g; - float border_color_b; - float border_color_a; + + float GetMaxAnisotropy() const { + return static_cast<float>(1U << max_anisotropy); + } + + float GetMinLod() const { + return static_cast<float>(min_lod_clamp) / 256.0f; + } + + float GetMaxLod() const { + return static_cast<float>(max_lod_clamp) / 256.0f; + } + + float GetLodBias() const { + // Sign extend the 13-bit value. + constexpr u32 mask = 1U << (13 - 1); + return static_cast<s32>((mip_lod_bias ^ mask) - mask) / 256.0f; + } + + std::array<float, 4> GetBorderColor() const { + if (srgb_conversion) { + return {srgb_border_color_r / 255.0f, srgb_border_color_g / 255.0f, + srgb_border_color_b / 255.0f, border_color[3]}; + } + return border_color; + } }; static_assert(sizeof(TSCEntry) == 0x20, "TSCEntry has wrong size"); diff --git a/src/web_service/verify_login.h b/src/web_service/verify_login.h index 39db32dbb..821b345d7 100644 --- a/src/web_service/verify_login.h +++ b/src/web_service/verify_login.h @@ -4,8 +4,6 @@ #pragma once -#include <functional> -#include <future> #include <string> namespace WebService { diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp index b7737b615..40da1a4e2 100644 --- a/src/web_service/web_backend.cpp +++ b/src/web_service/web_backend.cpp @@ -10,7 +10,6 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "common/web_result.h" -#include "core/settings.h" #include "web_service/web_backend.h" namespace WebService { diff --git a/src/yuzu/applets/web_browser.cpp b/src/yuzu/applets/web_browser.cpp index 6a9138d53..979b9ec14 100644 --- a/src/yuzu/applets/web_browser.cpp +++ b/src/yuzu/applets/web_browser.cpp @@ -56,6 +56,8 @@ constexpr char NX_SHIM_INJECT_SCRIPT[] = R"( window.nx.endApplet = function() { applet_done = true; }; + + window.onkeypress = function(e) { if (e.keyCode === 13) { applet_done = true; } }; )"; QString GetNXShimInjectionScript() { diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 73b04b749..d2c97b1f8 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp @@ -20,10 +20,7 @@ EmuThread::EmuThread(GRenderWindow* render_window) : render_window(render_window) {} void EmuThread::run() { - if (!Settings::values.use_multi_core) { - // Single core mode must acquire OpenGL context for entire emulation session - render_window->MakeCurrent(); - } + render_window->MakeCurrent(); MicroProfileOnThreadCreate("EmuThread"); @@ -38,6 +35,11 @@ void EmuThread::run() { emit LoadProgress(VideoCore::LoadCallbackStage::Complete, 0, 0); + if (Settings::values.use_asynchronous_gpu_emulation) { + // Release OpenGL context for the GPU thread + render_window->DoneCurrent(); + } + // holds whether the cpu was running during the last iteration, // so that the DebugModeLeft signal can be emitted before the // next execution step @@ -121,7 +123,6 @@ GRenderWindow::GRenderWindow(QWidget* parent, EmuThread* emu_thread) setAttribute(Qt::WA_AcceptTouchEvents); InputCommon::Init(); - InputCommon::StartJoystickEventHandler(); connect(this, &GRenderWindow::FirstFrameDisplayed, static_cast<GMainWindow*>(parent), &GMainWindow::OnLoadComplete); } diff --git a/src/yuzu/compatdb.cpp b/src/yuzu/compatdb.cpp index 5f0896f84..c8b0a5ec0 100644 --- a/src/yuzu/compatdb.cpp +++ b/src/yuzu/compatdb.cpp @@ -53,15 +53,15 @@ void CompatDB::Submit() { case CompatDBPage::Final: back(); LOG_DEBUG(Frontend, "Compatibility Rating: {}", compatibility->checkedId()); - Core::Telemetry().AddField(Telemetry::FieldType::UserFeedback, "Compatibility", - compatibility->checkedId()); + Core::System::GetInstance().TelemetrySession().AddField( + Telemetry::FieldType::UserFeedback, "Compatibility", compatibility->checkedId()); button(NextButton)->setEnabled(false); button(NextButton)->setText(tr("Submitting")); button(QWizard::CancelButton)->setVisible(false); testcase_watcher.setFuture(QtConcurrent::run( - [this]() { return Core::System::GetInstance().TelemetrySession().SubmitTestcase(); })); + [] { return Core::System::GetInstance().TelemetrySession().SubmitTestcase(); })); break; default: LOG_ERROR(Frontend, "Unexpected page: {}", currentId()); diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index e9546dadf..4650f96a3 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -209,7 +209,7 @@ void Config::ReadPlayerValues() { for (std::size_t p = 0; p < Settings::values.players.size(); ++p) { auto& player = Settings::values.players[p]; - player.connected = qt_config->value(QString("player_%1_connected").arg(p), false).toBool(); + player.connected = ReadSetting(QString("player_%1_connected").arg(p), false).toBool(); player.type = static_cast<Settings::ControllerType>( qt_config @@ -269,7 +269,7 @@ void Config::ReadPlayerValues() { } void Config::ReadDebugValues() { - Settings::values.debug_pad_enabled = qt_config->value("debug_pad_enabled", false).toBool(); + Settings::values.debug_pad_enabled = ReadSetting("debug_pad_enabled", false).toBool(); for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); Settings::values.debug_pad_buttons[i] = @@ -298,7 +298,7 @@ void Config::ReadDebugValues() { } void Config::ReadKeyboardValues() { - Settings::values.keyboard_enabled = qt_config->value("keyboard_enabled", false).toBool(); + Settings::values.keyboard_enabled = ReadSetting("keyboard_enabled", false).toBool(); std::transform(default_keyboard_keys.begin(), default_keyboard_keys.end(), Settings::values.keyboard_keys.begin(), InputCommon::GenerateKeyboardParam); @@ -311,7 +311,7 @@ void Config::ReadKeyboardValues() { } void Config::ReadMouseValues() { - Settings::values.mouse_enabled = qt_config->value("mouse_enabled", false).toBool(); + Settings::values.mouse_enabled = ReadSetting("mouse_enabled", false).toBool(); for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) { std::string default_param = InputCommon::GenerateKeyboardParam(default_mouse_buttons[i]); @@ -327,16 +327,14 @@ void Config::ReadMouseValues() { } void Config::ReadTouchscreenValues() { - Settings::values.touchscreen.enabled = qt_config->value("touchscreen_enabled", true).toBool(); + Settings::values.touchscreen.enabled = ReadSetting("touchscreen_enabled", true).toBool(); Settings::values.touchscreen.device = - qt_config->value("touchscreen_device", "engine:emu_window").toString().toStdString(); - - Settings::values.touchscreen.finger = qt_config->value("touchscreen_finger", 0).toUInt(); - Settings::values.touchscreen.rotation_angle = qt_config->value("touchscreen_angle", 0).toUInt(); - Settings::values.touchscreen.diameter_x = - qt_config->value("touchscreen_diameter_x", 15).toUInt(); - Settings::values.touchscreen.diameter_y = - qt_config->value("touchscreen_diameter_y", 15).toUInt(); + ReadSetting("touchscreen_device", "engine:emu_window").toString().toStdString(); + + Settings::values.touchscreen.finger = ReadSetting("touchscreen_finger", 0).toUInt(); + Settings::values.touchscreen.rotation_angle = ReadSetting("touchscreen_angle", 0).toUInt(); + Settings::values.touchscreen.diameter_x = ReadSetting("touchscreen_diameter_x", 15).toUInt(); + Settings::values.touchscreen.diameter_y = ReadSetting("touchscreen_diameter_y", 15).toUInt(); qt_config->endGroup(); } @@ -357,40 +355,41 @@ void Config::ReadValues() { ReadTouchscreenValues(); Settings::values.motion_device = - qt_config->value("motion_device", "engine:motion_emu,update_period:100,sensitivity:0.01") + ReadSetting("motion_device", "engine:motion_emu,update_period:100,sensitivity:0.01") .toString() .toStdString(); qt_config->beginGroup("Core"); - Settings::values.use_cpu_jit = qt_config->value("use_cpu_jit", true).toBool(); - Settings::values.use_multi_core = qt_config->value("use_multi_core", false).toBool(); + Settings::values.use_cpu_jit = ReadSetting("use_cpu_jit", true).toBool(); + Settings::values.use_multi_core = ReadSetting("use_multi_core", false).toBool(); qt_config->endGroup(); qt_config->beginGroup("Renderer"); - Settings::values.resolution_factor = qt_config->value("resolution_factor", 1.0).toFloat(); - Settings::values.use_frame_limit = qt_config->value("use_frame_limit", true).toBool(); - Settings::values.frame_limit = qt_config->value("frame_limit", 100).toInt(); - Settings::values.use_disk_shader_cache = - qt_config->value("use_disk_shader_cache", false).toBool(); + Settings::values.resolution_factor = ReadSetting("resolution_factor", 1.0).toFloat(); + Settings::values.use_frame_limit = ReadSetting("use_frame_limit", true).toBool(); + Settings::values.frame_limit = ReadSetting("frame_limit", 100).toInt(); + Settings::values.use_disk_shader_cache = ReadSetting("use_disk_shader_cache", true).toBool(); Settings::values.use_accurate_gpu_emulation = - qt_config->value("use_accurate_gpu_emulation", false).toBool(); + ReadSetting("use_accurate_gpu_emulation", false).toBool(); + Settings::values.use_asynchronous_gpu_emulation = + ReadSetting("use_asynchronous_gpu_emulation", false).toBool(); - Settings::values.bg_red = qt_config->value("bg_red", 0.0).toFloat(); - Settings::values.bg_green = qt_config->value("bg_green", 0.0).toFloat(); - Settings::values.bg_blue = qt_config->value("bg_blue", 0.0).toFloat(); + Settings::values.bg_red = ReadSetting("bg_red", 0.0).toFloat(); + Settings::values.bg_green = ReadSetting("bg_green", 0.0).toFloat(); + Settings::values.bg_blue = ReadSetting("bg_blue", 0.0).toFloat(); qt_config->endGroup(); qt_config->beginGroup("Audio"); - Settings::values.sink_id = qt_config->value("output_engine", "auto").toString().toStdString(); + Settings::values.sink_id = ReadSetting("output_engine", "auto").toString().toStdString(); Settings::values.enable_audio_stretching = - qt_config->value("enable_audio_stretching", true).toBool(); + ReadSetting("enable_audio_stretching", true).toBool(); Settings::values.audio_device_id = - qt_config->value("output_device", "auto").toString().toStdString(); - Settings::values.volume = qt_config->value("volume", 1).toFloat(); + ReadSetting("output_device", "auto").toString().toStdString(); + Settings::values.volume = ReadSetting("volume", 1).toFloat(); qt_config->endGroup(); qt_config->beginGroup("Data Storage"); - Settings::values.use_virtual_sd = qt_config->value("use_virtual_sd", true).toBool(); + Settings::values.use_virtual_sd = ReadSetting("use_virtual_sd", true).toBool(); FileUtil::GetUserPath( FileUtil::UserPath::NANDDir, qt_config @@ -408,30 +407,30 @@ void Config::ReadValues() { qt_config->endGroup(); qt_config->beginGroup("Core"); - Settings::values.use_cpu_jit = qt_config->value("use_cpu_jit", true).toBool(); - Settings::values.use_multi_core = qt_config->value("use_multi_core", false).toBool(); + Settings::values.use_cpu_jit = ReadSetting("use_cpu_jit", true).toBool(); + Settings::values.use_multi_core = ReadSetting("use_multi_core", false).toBool(); qt_config->endGroup(); qt_config->beginGroup("System"); - Settings::values.use_docked_mode = qt_config->value("use_docked_mode", false).toBool(); - Settings::values.enable_nfc = qt_config->value("enable_nfc", true).toBool(); + Settings::values.use_docked_mode = ReadSetting("use_docked_mode", false).toBool(); + Settings::values.enable_nfc = ReadSetting("enable_nfc", true).toBool(); - Settings::values.current_user = std::clamp<int>(qt_config->value("current_user", 0).toInt(), 0, - Service::Account::MAX_USERS - 1); + Settings::values.current_user = + std::clamp<int>(ReadSetting("current_user", 0).toInt(), 0, Service::Account::MAX_USERS - 1); - Settings::values.language_index = qt_config->value("language_index", 1).toInt(); + Settings::values.language_index = ReadSetting("language_index", 1).toInt(); - const auto rng_seed_enabled = qt_config->value("rng_seed_enabled", false).toBool(); + const auto rng_seed_enabled = ReadSetting("rng_seed_enabled", false).toBool(); if (rng_seed_enabled) { - Settings::values.rng_seed = qt_config->value("rng_seed", 0).toULongLong(); + Settings::values.rng_seed = ReadSetting("rng_seed", 0).toULongLong(); } else { Settings::values.rng_seed = std::nullopt; } - const auto custom_rtc_enabled = qt_config->value("custom_rtc_enabled", false).toBool(); + const auto custom_rtc_enabled = ReadSetting("custom_rtc_enabled", false).toBool(); if (custom_rtc_enabled) { Settings::values.custom_rtc = - std::chrono::seconds(qt_config->value("custom_rtc", 0).toULongLong()); + std::chrono::seconds(ReadSetting("custom_rtc", 0).toULongLong()); } else { Settings::values.custom_rtc = std::nullopt; } @@ -439,35 +438,35 @@ void Config::ReadValues() { qt_config->endGroup(); qt_config->beginGroup("Miscellaneous"); - Settings::values.log_filter = qt_config->value("log_filter", "*:Info").toString().toStdString(); - Settings::values.use_dev_keys = qt_config->value("use_dev_keys", false).toBool(); + Settings::values.log_filter = ReadSetting("log_filter", "*:Info").toString().toStdString(); + Settings::values.use_dev_keys = ReadSetting("use_dev_keys", false).toBool(); qt_config->endGroup(); qt_config->beginGroup("Debugging"); - Settings::values.use_gdbstub = qt_config->value("use_gdbstub", false).toBool(); - Settings::values.gdbstub_port = qt_config->value("gdbstub_port", 24689).toInt(); - Settings::values.program_args = qt_config->value("program_args", "").toString().toStdString(); - Settings::values.dump_exefs = qt_config->value("dump_exefs", false).toBool(); - Settings::values.dump_nso = qt_config->value("dump_nso", false).toBool(); + Settings::values.use_gdbstub = ReadSetting("use_gdbstub", false).toBool(); + Settings::values.gdbstub_port = ReadSetting("gdbstub_port", 24689).toInt(); + Settings::values.program_args = ReadSetting("program_args", "").toString().toStdString(); + Settings::values.dump_exefs = ReadSetting("dump_exefs", false).toBool(); + Settings::values.dump_nso = ReadSetting("dump_nso", false).toBool(); qt_config->endGroup(); qt_config->beginGroup("WebService"); - Settings::values.enable_telemetry = qt_config->value("enable_telemetry", true).toBool(); + Settings::values.enable_telemetry = ReadSetting("enable_telemetry", true).toBool(); Settings::values.web_api_url = - qt_config->value("web_api_url", "https://api.yuzu-emu.org").toString().toStdString(); - Settings::values.yuzu_username = qt_config->value("yuzu_username").toString().toStdString(); - Settings::values.yuzu_token = qt_config->value("yuzu_token").toString().toStdString(); + ReadSetting("web_api_url", "https://api.yuzu-emu.org").toString().toStdString(); + Settings::values.yuzu_username = ReadSetting("yuzu_username").toString().toStdString(); + Settings::values.yuzu_token = ReadSetting("yuzu_token").toString().toStdString(); qt_config->endGroup(); const auto size = qt_config->beginReadArray("DisabledAddOns"); for (int i = 0; i < size; ++i) { qt_config->setArrayIndex(i); - const auto title_id = qt_config->value("title_id", 0).toULongLong(); + const auto title_id = ReadSetting("title_id", 0).toULongLong(); std::vector<std::string> out; const auto d_size = qt_config->beginReadArray("disabled"); for (int j = 0; j < d_size; ++j) { qt_config->setArrayIndex(j); - out.push_back(qt_config->value("d", "").toString().toStdString()); + out.push_back(ReadSetting("d", "").toString().toStdString()); } qt_config->endArray(); Settings::values.disabled_addons.insert_or_assign(title_id, out); @@ -475,41 +474,38 @@ void Config::ReadValues() { qt_config->endArray(); qt_config->beginGroup("UI"); - UISettings::values.theme = qt_config->value("theme", UISettings::themes[0].second).toString(); + UISettings::values.theme = ReadSetting("theme", UISettings::themes[0].second).toString(); UISettings::values.enable_discord_presence = - qt_config->value("enable_discord_presence", true).toBool(); + ReadSetting("enable_discord_presence", true).toBool(); UISettings::values.screenshot_resolution_factor = - static_cast<u16>(qt_config->value("screenshot_resolution_factor", 0).toUInt()); - UISettings::values.select_user_on_boot = - qt_config->value("select_user_on_boot", false).toBool(); + static_cast<u16>(ReadSetting("screenshot_resolution_factor", 0).toUInt()); + UISettings::values.select_user_on_boot = ReadSetting("select_user_on_boot", false).toBool(); qt_config->beginGroup("UIGameList"); - UISettings::values.show_unknown = qt_config->value("show_unknown", true).toBool(); - UISettings::values.show_add_ons = qt_config->value("show_add_ons", true).toBool(); - UISettings::values.icon_size = qt_config->value("icon_size", 64).toUInt(); - UISettings::values.row_1_text_id = qt_config->value("row_1_text_id", 3).toUInt(); - UISettings::values.row_2_text_id = qt_config->value("row_2_text_id", 2).toUInt(); + UISettings::values.show_unknown = ReadSetting("show_unknown", true).toBool(); + UISettings::values.show_add_ons = ReadSetting("show_add_ons", true).toBool(); + UISettings::values.icon_size = ReadSetting("icon_size", 64).toUInt(); + UISettings::values.row_1_text_id = ReadSetting("row_1_text_id", 3).toUInt(); + UISettings::values.row_2_text_id = ReadSetting("row_2_text_id", 2).toUInt(); qt_config->endGroup(); qt_config->beginGroup("UILayout"); - UISettings::values.geometry = qt_config->value("geometry").toByteArray(); - UISettings::values.state = qt_config->value("state").toByteArray(); - UISettings::values.renderwindow_geometry = - qt_config->value("geometryRenderWindow").toByteArray(); - UISettings::values.gamelist_header_state = - qt_config->value("gameListHeaderState").toByteArray(); + UISettings::values.geometry = ReadSetting("geometry").toByteArray(); + UISettings::values.state = ReadSetting("state").toByteArray(); + UISettings::values.renderwindow_geometry = ReadSetting("geometryRenderWindow").toByteArray(); + UISettings::values.gamelist_header_state = ReadSetting("gameListHeaderState").toByteArray(); UISettings::values.microprofile_geometry = - qt_config->value("microProfileDialogGeometry").toByteArray(); + ReadSetting("microProfileDialogGeometry").toByteArray(); UISettings::values.microprofile_visible = - qt_config->value("microProfileDialogVisible", false).toBool(); + ReadSetting("microProfileDialogVisible", false).toBool(); qt_config->endGroup(); qt_config->beginGroup("Paths"); - UISettings::values.roms_path = qt_config->value("romsPath").toString(); - UISettings::values.symbols_path = qt_config->value("symbolsPath").toString(); - UISettings::values.gamedir = qt_config->value("gameListRootDir", ".").toString(); - UISettings::values.gamedir_deepscan = qt_config->value("gameListDeepScan", false).toBool(); - UISettings::values.recent_files = qt_config->value("recentFiles").toStringList(); + UISettings::values.roms_path = ReadSetting("romsPath").toString(); + UISettings::values.symbols_path = ReadSetting("symbolsPath").toString(); + UISettings::values.gamedir = ReadSetting("gameListRootDir", ".").toString(); + UISettings::values.gamedir_deepscan = ReadSetting("gameListDeepScan", false).toBool(); + UISettings::values.recent_files = ReadSetting("recentFiles").toStringList(); qt_config->endGroup(); qt_config->beginGroup("Shortcuts"); @@ -522,8 +518,8 @@ void Config::ReadValues() { qt_config->beginGroup(hotkey); UISettings::values.shortcuts.emplace_back(UISettings::Shortcut( group + "/" + hotkey, - UISettings::ContextualShortcut(qt_config->value("KeySeq").toString(), - qt_config->value("Context").toInt()))); + UISettings::ContextualShortcut(ReadSetting("KeySeq").toString(), + ReadSetting("Context").toInt()))); qt_config->endGroup(); } @@ -531,16 +527,16 @@ void Config::ReadValues() { } qt_config->endGroup(); - UISettings::values.single_window_mode = qt_config->value("singleWindowMode", true).toBool(); - UISettings::values.fullscreen = qt_config->value("fullscreen", false).toBool(); - UISettings::values.display_titlebar = qt_config->value("displayTitleBars", true).toBool(); - UISettings::values.show_filter_bar = qt_config->value("showFilterBar", true).toBool(); - UISettings::values.show_status_bar = qt_config->value("showStatusBar", true).toBool(); - UISettings::values.confirm_before_closing = qt_config->value("confirmClose", true).toBool(); - UISettings::values.first_start = qt_config->value("firstStart", true).toBool(); - UISettings::values.callout_flags = qt_config->value("calloutFlags", 0).toUInt(); - UISettings::values.show_console = qt_config->value("showConsole", false).toBool(); - UISettings::values.profile_index = qt_config->value("profileIndex", 0).toUInt(); + UISettings::values.single_window_mode = ReadSetting("singleWindowMode", true).toBool(); + UISettings::values.fullscreen = ReadSetting("fullscreen", false).toBool(); + UISettings::values.display_titlebar = ReadSetting("displayTitleBars", true).toBool(); + UISettings::values.show_filter_bar = ReadSetting("showFilterBar", true).toBool(); + UISettings::values.show_status_bar = ReadSetting("showStatusBar", true).toBool(); + UISettings::values.confirm_before_closing = ReadSetting("confirmClose", true).toBool(); + UISettings::values.first_start = ReadSetting("firstStart", true).toBool(); + UISettings::values.callout_flags = ReadSetting("calloutFlags", 0).toUInt(); + UISettings::values.show_console = ReadSetting("showConsole", false).toBool(); + UISettings::values.profile_index = ReadSetting("profileIndex", 0).toUInt(); ApplyDefaultProfileIfInputInvalid(); @@ -551,62 +547,79 @@ void Config::SavePlayerValues() { for (std::size_t p = 0; p < Settings::values.players.size(); ++p) { const auto& player = Settings::values.players[p]; - qt_config->setValue(QString("player_%1_connected").arg(p), player.connected); - qt_config->setValue(QString("player_%1_type").arg(p), static_cast<u8>(player.type)); + WriteSetting(QString("player_%1_connected").arg(p), player.connected, false); + WriteSetting(QString("player_%1_type").arg(p), static_cast<u8>(player.type), + static_cast<u8>(Settings::ControllerType::DualJoycon)); - qt_config->setValue(QString("player_%1_body_color_left").arg(p), player.body_color_left); - qt_config->setValue(QString("player_%1_body_color_right").arg(p), player.body_color_right); - qt_config->setValue(QString("player_%1_button_color_left").arg(p), - player.button_color_left); - qt_config->setValue(QString("player_%1_button_color_right").arg(p), - player.button_color_right); + WriteSetting(QString("player_%1_body_color_left").arg(p), player.body_color_left, + Settings::JOYCON_BODY_NEON_BLUE); + WriteSetting(QString("player_%1_body_color_right").arg(p), player.body_color_right, + Settings::JOYCON_BODY_NEON_RED); + WriteSetting(QString("player_%1_button_color_left").arg(p), player.button_color_left, + Settings::JOYCON_BUTTONS_NEON_BLUE); + WriteSetting(QString("player_%1_button_color_right").arg(p), player.button_color_right, + Settings::JOYCON_BUTTONS_NEON_RED); for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { - qt_config->setValue(QString("player_%1_").arg(p) + - QString::fromStdString(Settings::NativeButton::mapping[i]), - QString::fromStdString(player.buttons[i])); + std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); + WriteSetting(QString("player_%1_").arg(p) + + QString::fromStdString(Settings::NativeButton::mapping[i]), + QString::fromStdString(player.buttons[i]), + QString::fromStdString(default_param)); } for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) { - qt_config->setValue(QString("player_%1_").arg(p) + - QString::fromStdString(Settings::NativeAnalog::mapping[i]), - QString::fromStdString(player.analogs[i])); + std::string default_param = InputCommon::GenerateAnalogParamFromKeys( + default_analogs[i][0], default_analogs[i][1], default_analogs[i][2], + default_analogs[i][3], default_analogs[i][4], 0.5f); + WriteSetting(QString("player_%1_").arg(p) + + QString::fromStdString(Settings::NativeAnalog::mapping[i]), + QString::fromStdString(player.analogs[i]), + QString::fromStdString(default_param)); } } } void Config::SaveDebugValues() { - qt_config->setValue("debug_pad_enabled", Settings::values.debug_pad_enabled); + WriteSetting("debug_pad_enabled", Settings::values.debug_pad_enabled, false); for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { - qt_config->setValue(QString("debug_pad_") + - QString::fromStdString(Settings::NativeButton::mapping[i]), - QString::fromStdString(Settings::values.debug_pad_buttons[i])); + std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); + WriteSetting(QString("debug_pad_") + + QString::fromStdString(Settings::NativeButton::mapping[i]), + QString::fromStdString(Settings::values.debug_pad_buttons[i]), + QString::fromStdString(default_param)); } for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) { - qt_config->setValue(QString("debug_pad_") + - QString::fromStdString(Settings::NativeAnalog::mapping[i]), - QString::fromStdString(Settings::values.debug_pad_analogs[i])); + std::string default_param = InputCommon::GenerateAnalogParamFromKeys( + default_analogs[i][0], default_analogs[i][1], default_analogs[i][2], + default_analogs[i][3], default_analogs[i][4], 0.5f); + WriteSetting(QString("debug_pad_") + + QString::fromStdString(Settings::NativeAnalog::mapping[i]), + QString::fromStdString(Settings::values.debug_pad_analogs[i]), + QString::fromStdString(default_param)); } } void Config::SaveMouseValues() { - qt_config->setValue("mouse_enabled", Settings::values.mouse_enabled); + WriteSetting("mouse_enabled", Settings::values.mouse_enabled, false); for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) { - qt_config->setValue(QString("mouse_") + - QString::fromStdString(Settings::NativeMouseButton::mapping[i]), - QString::fromStdString(Settings::values.mouse_buttons[i])); + std::string default_param = InputCommon::GenerateKeyboardParam(default_mouse_buttons[i]); + WriteSetting(QString("mouse_") + + QString::fromStdString(Settings::NativeMouseButton::mapping[i]), + QString::fromStdString(Settings::values.mouse_buttons[i]), + QString::fromStdString(default_param)); } } void Config::SaveTouchscreenValues() { - qt_config->setValue("touchscreen_enabled", Settings::values.touchscreen.enabled); - qt_config->setValue("touchscreen_device", - QString::fromStdString(Settings::values.touchscreen.device)); - - qt_config->setValue("touchscreen_finger", Settings::values.touchscreen.finger); - qt_config->setValue("touchscreen_angle", Settings::values.touchscreen.rotation_angle); - qt_config->setValue("touchscreen_diameter_x", Settings::values.touchscreen.diameter_x); - qt_config->setValue("touchscreen_diameter_y", Settings::values.touchscreen.diameter_y); + WriteSetting("touchscreen_enabled", Settings::values.touchscreen.enabled, true); + WriteSetting("touchscreen_device", QString::fromStdString(Settings::values.touchscreen.device), + "engine:emu_window"); + + WriteSetting("touchscreen_finger", Settings::values.touchscreen.finger, 0); + WriteSetting("touchscreen_angle", Settings::values.touchscreen.rotation_angle, 0); + WriteSetting("touchscreen_diameter_x", Settings::values.touchscreen.diameter_x, 15); + WriteSetting("touchscreen_diameter_y", Settings::values.touchscreen.diameter_y, 15); } void Config::SaveValues() { @@ -617,89 +630,96 @@ void Config::SaveValues() { SaveMouseValues(); SaveTouchscreenValues(); - qt_config->setValue("motion_device", QString::fromStdString(Settings::values.motion_device)); - qt_config->setValue("keyboard_enabled", Settings::values.keyboard_enabled); + WriteSetting("motion_device", QString::fromStdString(Settings::values.motion_device), + "engine:motion_emu,update_period:100,sensitivity:0.01"); + WriteSetting("keyboard_enabled", Settings::values.keyboard_enabled, false); qt_config->endGroup(); qt_config->beginGroup("Core"); - qt_config->setValue("use_cpu_jit", Settings::values.use_cpu_jit); - qt_config->setValue("use_multi_core", Settings::values.use_multi_core); + WriteSetting("use_cpu_jit", Settings::values.use_cpu_jit, true); + WriteSetting("use_multi_core", Settings::values.use_multi_core, false); qt_config->endGroup(); qt_config->beginGroup("Renderer"); - qt_config->setValue("resolution_factor", (double)Settings::values.resolution_factor); - qt_config->setValue("use_frame_limit", Settings::values.use_frame_limit); - qt_config->setValue("frame_limit", Settings::values.frame_limit); - qt_config->setValue("use_disk_shader_cache", Settings::values.use_disk_shader_cache); - qt_config->setValue("use_accurate_gpu_emulation", Settings::values.use_accurate_gpu_emulation); + WriteSetting("resolution_factor", (double)Settings::values.resolution_factor, 1.0); + WriteSetting("use_frame_limit", Settings::values.use_frame_limit, true); + WriteSetting("frame_limit", Settings::values.frame_limit, 100); + WriteSetting("use_disk_shader_cache", Settings::values.use_disk_shader_cache, true); + WriteSetting("use_accurate_gpu_emulation", Settings::values.use_accurate_gpu_emulation, false); + WriteSetting("use_asynchronous_gpu_emulation", Settings::values.use_asynchronous_gpu_emulation, + false); // Cast to double because Qt's written float values are not human-readable - qt_config->setValue("bg_red", (double)Settings::values.bg_red); - qt_config->setValue("bg_green", (double)Settings::values.bg_green); - qt_config->setValue("bg_blue", (double)Settings::values.bg_blue); + WriteSetting("bg_red", (double)Settings::values.bg_red, 0.0); + WriteSetting("bg_green", (double)Settings::values.bg_green, 0.0); + WriteSetting("bg_blue", (double)Settings::values.bg_blue, 0.0); qt_config->endGroup(); qt_config->beginGroup("Audio"); - qt_config->setValue("output_engine", QString::fromStdString(Settings::values.sink_id)); - qt_config->setValue("enable_audio_stretching", Settings::values.enable_audio_stretching); - qt_config->setValue("output_device", QString::fromStdString(Settings::values.audio_device_id)); - qt_config->setValue("volume", Settings::values.volume); + WriteSetting("output_engine", QString::fromStdString(Settings::values.sink_id), "auto"); + WriteSetting("enable_audio_stretching", Settings::values.enable_audio_stretching, true); + WriteSetting("output_device", QString::fromStdString(Settings::values.audio_device_id), "auto"); + WriteSetting("volume", Settings::values.volume, 1.0f); qt_config->endGroup(); qt_config->beginGroup("Data Storage"); - qt_config->setValue("use_virtual_sd", Settings::values.use_virtual_sd); - qt_config->setValue("nand_directory", - QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir))); - qt_config->setValue("sdmc_directory", - QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir))); + WriteSetting("use_virtual_sd", Settings::values.use_virtual_sd, true); + WriteSetting("nand_directory", + QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir)), + QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir))); + WriteSetting("sdmc_directory", + QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir)), + QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir))); qt_config->endGroup(); qt_config->beginGroup("System"); - qt_config->setValue("use_docked_mode", Settings::values.use_docked_mode); - qt_config->setValue("enable_nfc", Settings::values.enable_nfc); - qt_config->setValue("current_user", Settings::values.current_user); - qt_config->setValue("language_index", Settings::values.language_index); + WriteSetting("use_docked_mode", Settings::values.use_docked_mode, false); + WriteSetting("enable_nfc", Settings::values.enable_nfc, true); + WriteSetting("current_user", Settings::values.current_user, 0); + WriteSetting("language_index", Settings::values.language_index, 1); - qt_config->setValue("rng_seed_enabled", Settings::values.rng_seed.has_value()); - qt_config->setValue("rng_seed", Settings::values.rng_seed.value_or(0)); + WriteSetting("rng_seed_enabled", Settings::values.rng_seed.has_value(), false); + WriteSetting("rng_seed", Settings::values.rng_seed.value_or(0), 0); - qt_config->setValue("custom_rtc_enabled", Settings::values.custom_rtc.has_value()); - qt_config->setValue("custom_rtc", - QVariant::fromValue<long long>( - Settings::values.custom_rtc.value_or(std::chrono::seconds{}).count())); + WriteSetting("custom_rtc_enabled", Settings::values.custom_rtc.has_value(), false); + WriteSetting("custom_rtc", + QVariant::fromValue<long long>( + Settings::values.custom_rtc.value_or(std::chrono::seconds{}).count()), + 0); qt_config->endGroup(); qt_config->beginGroup("Miscellaneous"); - qt_config->setValue("log_filter", QString::fromStdString(Settings::values.log_filter)); - qt_config->setValue("use_dev_keys", Settings::values.use_dev_keys); + WriteSetting("log_filter", QString::fromStdString(Settings::values.log_filter), "*:Info"); + WriteSetting("use_dev_keys", Settings::values.use_dev_keys, false); qt_config->endGroup(); qt_config->beginGroup("Debugging"); - qt_config->setValue("use_gdbstub", Settings::values.use_gdbstub); - qt_config->setValue("gdbstub_port", Settings::values.gdbstub_port); - qt_config->setValue("program_args", QString::fromStdString(Settings::values.program_args)); - qt_config->setValue("dump_exefs", Settings::values.dump_exefs); - qt_config->setValue("dump_nso", Settings::values.dump_nso); + WriteSetting("use_gdbstub", Settings::values.use_gdbstub, false); + WriteSetting("gdbstub_port", Settings::values.gdbstub_port, 24689); + WriteSetting("program_args", QString::fromStdString(Settings::values.program_args), ""); + WriteSetting("dump_exefs", Settings::values.dump_exefs, false); + WriteSetting("dump_nso", Settings::values.dump_nso, false); qt_config->endGroup(); qt_config->beginGroup("WebService"); - qt_config->setValue("enable_telemetry", Settings::values.enable_telemetry); - qt_config->setValue("web_api_url", QString::fromStdString(Settings::values.web_api_url)); - qt_config->setValue("yuzu_username", QString::fromStdString(Settings::values.yuzu_username)); - qt_config->setValue("yuzu_token", QString::fromStdString(Settings::values.yuzu_token)); + WriteSetting("enable_telemetry", Settings::values.enable_telemetry, true); + WriteSetting("web_api_url", QString::fromStdString(Settings::values.web_api_url), + "https://api.yuzu-emu.org"); + WriteSetting("yuzu_username", QString::fromStdString(Settings::values.yuzu_username)); + WriteSetting("yuzu_token", QString::fromStdString(Settings::values.yuzu_token)); qt_config->endGroup(); qt_config->beginWriteArray("DisabledAddOns"); int i = 0; for (const auto& elem : Settings::values.disabled_addons) { qt_config->setArrayIndex(i); - qt_config->setValue("title_id", QVariant::fromValue<u64>(elem.first)); + WriteSetting("title_id", QVariant::fromValue<u64>(elem.first), 0); qt_config->beginWriteArray("disabled"); for (std::size_t j = 0; j < elem.second.size(); ++j) { qt_config->setArrayIndex(static_cast<int>(j)); - qt_config->setValue("d", QString::fromStdString(elem.second[j])); + WriteSetting("d", QString::fromStdString(elem.second[j]), ""); } qt_config->endArray(); ++i; @@ -707,60 +727,86 @@ void Config::SaveValues() { qt_config->endArray(); qt_config->beginGroup("UI"); - qt_config->setValue("theme", UISettings::values.theme); - qt_config->setValue("enable_discord_presence", UISettings::values.enable_discord_presence); - qt_config->setValue("screenshot_resolution_factor", - UISettings::values.screenshot_resolution_factor); - qt_config->setValue("select_user_on_boot", UISettings::values.select_user_on_boot); + WriteSetting("theme", UISettings::values.theme, UISettings::themes[0].second); + WriteSetting("enable_discord_presence", UISettings::values.enable_discord_presence, true); + WriteSetting("screenshot_resolution_factor", UISettings::values.screenshot_resolution_factor, + 0); + WriteSetting("select_user_on_boot", UISettings::values.select_user_on_boot, false); qt_config->beginGroup("UIGameList"); - qt_config->setValue("show_unknown", UISettings::values.show_unknown); - qt_config->setValue("show_add_ons", UISettings::values.show_add_ons); - qt_config->setValue("icon_size", UISettings::values.icon_size); - qt_config->setValue("row_1_text_id", UISettings::values.row_1_text_id); - qt_config->setValue("row_2_text_id", UISettings::values.row_2_text_id); + WriteSetting("show_unknown", UISettings::values.show_unknown, true); + WriteSetting("show_add_ons", UISettings::values.show_add_ons, true); + WriteSetting("icon_size", UISettings::values.icon_size, 64); + WriteSetting("row_1_text_id", UISettings::values.row_1_text_id, 3); + WriteSetting("row_2_text_id", UISettings::values.row_2_text_id, 2); qt_config->endGroup(); qt_config->beginGroup("UILayout"); - qt_config->setValue("geometry", UISettings::values.geometry); - qt_config->setValue("state", UISettings::values.state); - qt_config->setValue("geometryRenderWindow", UISettings::values.renderwindow_geometry); - qt_config->setValue("gameListHeaderState", UISettings::values.gamelist_header_state); - qt_config->setValue("microProfileDialogGeometry", UISettings::values.microprofile_geometry); - qt_config->setValue("microProfileDialogVisible", UISettings::values.microprofile_visible); + WriteSetting("geometry", UISettings::values.geometry); + WriteSetting("state", UISettings::values.state); + WriteSetting("geometryRenderWindow", UISettings::values.renderwindow_geometry); + WriteSetting("gameListHeaderState", UISettings::values.gamelist_header_state); + WriteSetting("microProfileDialogGeometry", UISettings::values.microprofile_geometry); + WriteSetting("microProfileDialogVisible", UISettings::values.microprofile_visible, false); qt_config->endGroup(); qt_config->beginGroup("Paths"); - qt_config->setValue("romsPath", UISettings::values.roms_path); - qt_config->setValue("symbolsPath", UISettings::values.symbols_path); - qt_config->setValue("screenshotPath", UISettings::values.screenshot_path); - qt_config->setValue("gameListRootDir", UISettings::values.gamedir); - qt_config->setValue("gameListDeepScan", UISettings::values.gamedir_deepscan); - qt_config->setValue("recentFiles", UISettings::values.recent_files); + WriteSetting("romsPath", UISettings::values.roms_path); + WriteSetting("symbolsPath", UISettings::values.symbols_path); + WriteSetting("screenshotPath", UISettings::values.screenshot_path); + WriteSetting("gameListRootDir", UISettings::values.gamedir, "."); + WriteSetting("gameListDeepScan", UISettings::values.gamedir_deepscan, false); + WriteSetting("recentFiles", UISettings::values.recent_files); qt_config->endGroup(); qt_config->beginGroup("Shortcuts"); for (auto shortcut : UISettings::values.shortcuts) { - qt_config->setValue(shortcut.first + "/KeySeq", shortcut.second.first); - qt_config->setValue(shortcut.first + "/Context", shortcut.second.second); + WriteSetting(shortcut.first + "/KeySeq", shortcut.second.first); + WriteSetting(shortcut.first + "/Context", shortcut.second.second); } qt_config->endGroup(); - qt_config->setValue("singleWindowMode", UISettings::values.single_window_mode); - qt_config->setValue("fullscreen", UISettings::values.fullscreen); - qt_config->setValue("displayTitleBars", UISettings::values.display_titlebar); - qt_config->setValue("showFilterBar", UISettings::values.show_filter_bar); - qt_config->setValue("showStatusBar", UISettings::values.show_status_bar); - qt_config->setValue("confirmClose", UISettings::values.confirm_before_closing); - qt_config->setValue("firstStart", UISettings::values.first_start); - qt_config->setValue("calloutFlags", UISettings::values.callout_flags); - qt_config->setValue("showConsole", UISettings::values.show_console); - qt_config->setValue("profileIndex", UISettings::values.profile_index); + WriteSetting("singleWindowMode", UISettings::values.single_window_mode, true); + WriteSetting("fullscreen", UISettings::values.fullscreen, false); + WriteSetting("displayTitleBars", UISettings::values.display_titlebar, true); + WriteSetting("showFilterBar", UISettings::values.show_filter_bar, true); + WriteSetting("showStatusBar", UISettings::values.show_status_bar, true); + WriteSetting("confirmClose", UISettings::values.confirm_before_closing, true); + WriteSetting("firstStart", UISettings::values.first_start, true); + WriteSetting("calloutFlags", UISettings::values.callout_flags, 0); + WriteSetting("showConsole", UISettings::values.show_console, false); + WriteSetting("profileIndex", UISettings::values.profile_index, 0); qt_config->endGroup(); } +QVariant Config::ReadSetting(const QString& name) const { + return qt_config->value(name); +} + +QVariant Config::ReadSetting(const QString& name, const QVariant& default_value) const { + QVariant result; + if (qt_config->value(name + "/default", false).toBool()) { + result = default_value; + } else { + result = qt_config->value(name, default_value); + } + return result; +} + +void Config::WriteSetting(const QString& name, const QVariant& value) { + qt_config->setValue(name, value); +} + +void Config::WriteSetting(const QString& name, const QVariant& value, + const QVariant& default_value) { + qt_config->setValue(name + "/default", value == default_value); + qt_config->setValue(name, value); +} + void Config::Reload() { ReadValues(); + // To apply default value changes + SaveValues(); Settings::Apply(); } diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h index e73ad19bb..f4185db18 100644 --- a/src/yuzu/configuration/config.h +++ b/src/yuzu/configuration/config.h @@ -42,6 +42,11 @@ private: void SaveMouseValues(); void SaveTouchscreenValues(); + QVariant ReadSetting(const QString& name) const; + QVariant ReadSetting(const QString& name, const QVariant& default_value) const; + void WriteSetting(const QString& name, const QVariant& value); + void WriteSetting(const QString& name, const QVariant& value, const QVariant& default_value); + std::unique_ptr<QSettings> qt_config; std::string qt_config_loc; }; diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp index 0f5dd534b..dd1d67488 100644 --- a/src/yuzu/configuration/configure_graphics.cpp +++ b/src/yuzu/configuration/configure_graphics.cpp @@ -75,6 +75,8 @@ void ConfigureGraphics::setConfiguration() { ui->frame_limit->setValue(Settings::values.frame_limit); ui->use_disk_shader_cache->setChecked(Settings::values.use_disk_shader_cache); ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation); + ui->use_asynchronous_gpu_emulation->setEnabled(!Core::System::GetInstance().IsPoweredOn()); + ui->use_asynchronous_gpu_emulation->setChecked(Settings::values.use_asynchronous_gpu_emulation); UpdateBackgroundColorButton(QColor::fromRgbF(Settings::values.bg_red, Settings::values.bg_green, Settings::values.bg_blue)); } @@ -86,6 +88,8 @@ void ConfigureGraphics::applyConfiguration() { Settings::values.frame_limit = ui->frame_limit->value(); Settings::values.use_disk_shader_cache = ui->use_disk_shader_cache->isChecked(); Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked(); + Settings::values.use_asynchronous_gpu_emulation = + ui->use_asynchronous_gpu_emulation->isChecked(); Settings::values.bg_red = static_cast<float>(bg_color.redF()); Settings::values.bg_green = static_cast<float>(bg_color.greenF()); Settings::values.bg_blue = static_cast<float>(bg_color.blueF()); diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui index 824f5810a..c6767e0ca 100644 --- a/src/yuzu/configuration/configure_graphics.ui +++ b/src/yuzu/configuration/configure_graphics.ui @@ -64,6 +64,13 @@ </widget> </item> <item> + <widget class="QCheckBox" name="use_asynchronous_gpu_emulation"> + <property name="text"> + <string>Use asynchronous GPU emulation</string> + </property> + </widget> + </item> + <item> <layout class="QHBoxLayout" name="horizontalLayout"> <item> <widget class="QLabel" name="label"> diff --git a/src/yuzu/debugger/graphics/graphics_surface.cpp b/src/yuzu/debugger/graphics/graphics_surface.cpp index 209798521..71683da8e 100644 --- a/src/yuzu/debugger/graphics/graphics_surface.cpp +++ b/src/yuzu/debugger/graphics/graphics_surface.cpp @@ -398,7 +398,7 @@ void GraphicsSurfaceWidget::OnUpdate() { for (unsigned int y = 0; y < surface_height; ++y) { for (unsigned int x = 0; x < surface_width; ++x) { - Math::Vec4<u8> color; + Common::Vec4<u8> color; color[0] = texture_data[x + y * surface_width + 0]; color[1] = texture_data[x + y * surface_width + 1]; color[2] = texture_data[x + y * surface_width + 2]; diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index f50225d5f..06ad74ffe 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -81,9 +81,8 @@ QString WaitTreeText::GetText() const { return text; } -WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address) : mutex_address(mutex_address) { - const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - +WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) + : mutex_address(mutex_address) { mutex_value = Memory::Read32(mutex_address); owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask); owner = handle_table.Get<Kernel::Thread>(owner_handle); @@ -316,7 +315,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); if (mutex_wait_address != 0) { - list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address)); + const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); + list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table)); } else { list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); } diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h index 365c3dbfe..62886609d 100644 --- a/src/yuzu/debugger/wait_tree.h +++ b/src/yuzu/debugger/wait_tree.h @@ -17,6 +17,7 @@ class EmuThread; namespace Kernel { +class HandleTable; class ReadableEvent; class WaitObject; class Thread; @@ -72,7 +73,7 @@ public: class WaitTreeMutexInfo : public WaitTreeExpandableItem { Q_OBJECT public: - explicit WaitTreeMutexInfo(VAddr mutex_address); + explicit WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table); ~WaitTreeMutexInfo() override; QString GetText() const override; diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 1d460c189..41ba3c4c6 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -11,6 +11,7 @@ #include "applets/profile_select.h" #include "applets/software_keyboard.h" #include "applets/web_browser.h" +#include "configuration/configure_input.h" #include "configuration/configure_per_general.h" #include "core/file_sys/vfs.h" #include "core/file_sys/vfs_real.h" @@ -339,6 +340,11 @@ void GMainWindow::WebBrowserOpenPage(std::string_view filename, std::string_view .arg(QString::fromStdString(std::to_string(key_code)))); }; + QMessageBox::information( + this, tr("Exit"), + tr("To exit the web application, use the game provided controls to select exit, select the " + "'Exit Web Applet' option in the menu bar, or press the 'Enter' key.")); + bool running_exit_check = false; while (!finished) { QApplication::processEvents(); @@ -522,6 +528,7 @@ void GMainWindow::InitializeHotkeys() { Qt::ApplicationShortcut); hotkey_registry.RegisterHotkey("Main Window", "Capture Screenshot", QKeySequence(QKeySequence::Print)); + hotkey_registry.RegisterHotkey("Main Window", "Change Docked Mode", QKeySequence(Qt::Key_F10)); hotkey_registry.LoadHotkeys(); @@ -561,7 +568,10 @@ void GMainWindow::InitializeHotkeys() { Settings::values.use_frame_limit = !Settings::values.use_frame_limit; UpdateStatusBar(); }); - constexpr u16 SPEED_LIMIT_STEP = 5; + // TODO: Remove this comment/static whenever the next major release of + // MSVC occurs and we make it a requirement (see: + // https://developercommunity.visualstudio.com/content/problem/93922/constexprs-are-trying-to-be-captured-in-lambda-fun.html) + static constexpr u16 SPEED_LIMIT_STEP = 5; connect(hotkey_registry.GetHotkey("Main Window", "Increase Speed Limit", this), &QShortcut::activated, this, [&] { if (Settings::values.frame_limit < 9999 - SPEED_LIMIT_STEP) { @@ -588,6 +598,12 @@ void GMainWindow::InitializeHotkeys() { OnCaptureScreenshot(); } }); + connect(hotkey_registry.GetHotkey("Main Window", "Change Docked Mode", this), + &QShortcut::activated, this, [&] { + Settings::values.use_docked_mode = !Settings::values.use_docked_mode; + OnDockedModeChanged(!Settings::values.use_docked_mode, + Settings::values.use_docked_mode); + }); } void GMainWindow::SetDefaultUIGeometry() { @@ -846,7 +862,7 @@ bool GMainWindow::LoadROM(const QString& filename) { } game_path = filename; - Core::Telemetry().AddField(Telemetry::FieldType::App, "Frontend", "Qt"); + system.TelemetrySession().AddField(Telemetry::FieldType::App, "Frontend", "Qt"); return true; } diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp index ff05b3179..32e78049c 100644 --- a/src/yuzu_cmd/config.cpp +++ b/src/yuzu_cmd/config.cpp @@ -346,7 +346,7 @@ void Config::ReadValues() { // Renderer Settings::values.resolution_factor = - (float)sdl2_config->GetReal("Renderer", "resolution_factor", 1.0); + static_cast<float>(sdl2_config->GetReal("Renderer", "resolution_factor", 1.0)); Settings::values.use_frame_limit = sdl2_config->GetBoolean("Renderer", "use_frame_limit", true); Settings::values.frame_limit = static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100)); @@ -354,17 +354,20 @@ void Config::ReadValues() { sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false); Settings::values.use_accurate_gpu_emulation = sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false); + Settings::values.use_asynchronous_gpu_emulation = + sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false); - Settings::values.bg_red = (float)sdl2_config->GetReal("Renderer", "bg_red", 0.0); - Settings::values.bg_green = (float)sdl2_config->GetReal("Renderer", "bg_green", 0.0); - Settings::values.bg_blue = (float)sdl2_config->GetReal("Renderer", "bg_blue", 0.0); + Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0)); + Settings::values.bg_green = + static_cast<float>(sdl2_config->GetReal("Renderer", "bg_green", 0.0)); + Settings::values.bg_blue = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_blue", 0.0)); // Audio Settings::values.sink_id = sdl2_config->Get("Audio", "output_engine", "auto"); Settings::values.enable_audio_stretching = sdl2_config->GetBoolean("Audio", "enable_audio_stretching", true); Settings::values.audio_device_id = sdl2_config->Get("Audio", "output_device", "auto"); - Settings::values.volume = sdl2_config->GetReal("Audio", "volume", 1); + Settings::values.volume = static_cast<float>(sdl2_config->GetReal("Audio", "volume", 1)); Settings::values.language_index = sdl2_config->GetInteger("System", "language_index", 1); diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h index a81986f8e..6538af098 100644 --- a/src/yuzu_cmd/default_ini.h +++ b/src/yuzu_cmd/default_ini.h @@ -118,6 +118,10 @@ use_disk_shader_cache = # 0 (default): Off (fast), 1 : On (slow) use_accurate_gpu_emulation = +# Whether to use asynchronous GPU emulation +# 0 : Off (slow), 1 (default): On (fast) +use_asynchronous_gpu_emulation = + # The clear color for the renderer. What shows up on the sides of the bottom screen. # Must be in range of 0.0-1.0. Defaults to 1.0 for all. bg_red = diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp index 7df8eff53..de7a26e14 100644 --- a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp +++ b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp @@ -135,16 +135,16 @@ bool EmuWindow_SDL2::SupportsRequiredGLExtensions() { } EmuWindow_SDL2::EmuWindow_SDL2(bool fullscreen) { - InputCommon::Init(); - - SDL_SetMainReady(); - // Initialize the window if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_JOYSTICK) < 0) { LOG_CRITICAL(Frontend, "Failed to initialize SDL2! Exiting..."); exit(1); } + InputCommon::Init(); + + SDL_SetMainReady(); + SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3); SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE); @@ -201,11 +201,9 @@ EmuWindow_SDL2::EmuWindow_SDL2(bool fullscreen) { } EmuWindow_SDL2::~EmuWindow_SDL2() { - InputCommon::SDL::CloseSDLJoysticks(); + InputCommon::Shutdown(); SDL_GL_DeleteContext(gl_context); SDL_Quit(); - - InputCommon::Shutdown(); } void EmuWindow_SDL2::SwapBuffers() { @@ -262,7 +260,6 @@ void EmuWindow_SDL2::PollEvents() { is_open = false; break; default: - InputCommon::SDL::HandleGameControllerEvent(event); break; } } diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp index c34b5467f..c6c66a787 100644 --- a/src/yuzu_cmd/yuzu.cpp +++ b/src/yuzu_cmd/yuzu.cpp @@ -216,7 +216,7 @@ int main(int argc, char** argv) { } } - Core::Telemetry().AddField(Telemetry::FieldType::App, "Frontend", "SDL"); + system.TelemetrySession().AddField(Telemetry::FieldType::App, "Frontend", "SDL"); system.Renderer().Rasterizer().LoadDiskResources(); |