summaryrefslogtreecommitdiff
path: root/src/common/ring_buffer.h
diff options
context:
space:
mode:
authorZephyron <zephyron@citron-emu.org>2025-01-06 12:41:42 +1000
committerZephyron <zephyron@citron-emu.org>2025-01-06 12:41:42 +1000
commit08f1ed40116d859ff680dcd733dac5e3d1abe386 (patch)
tree846c189310493d301de52a961809631fd0a4b1c9 /src/common/ring_buffer.h
parent21f94d5825d350bdee54f72eede880186e689c76 (diff)
common: Use consistent cache line size in RingBuffer
Replace hardcoded 128-byte alignment with a defined CACHE_LINE_SIZE constant of 64 bytes for the atomic indices in RingBuffer. This value is more appropriate for most modern CPU architectures and simplifies the implementation by using a consistent value regardless of compiler support for hardware_interference_size. Changes: - Add CACHE_LINE_SIZE constant set to 64 bytes - Use CACHE_LINE_SIZE for atomic index alignment in both code paths - Remove outdated TODO comment about hardware_destructive_interference_size
Diffstat (limited to 'src/common/ring_buffer.h')
-rw-r--r--src/common/ring_buffer.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/src/common/ring_buffer.h b/src/common/ring_buffer.h
index 218896660..8d193ce03 100644
--- a/src/common/ring_buffer.h
+++ b/src/common/ring_buffer.h
@@ -31,6 +31,8 @@ class RingBuffer {
// Ensure lock-free.
static_assert(std::atomic_size_t::is_always_lock_free);
+ static constexpr size_t CACHE_LINE_SIZE = 64;
+
public:
/// Pushes slots into the ring buffer
/// @param new_slots Pointer to the slots to push
@@ -105,11 +107,11 @@ private:
// TODO: Remove this ifdef whenever clang and GCC support
// std::hardware_destructive_interference_size.
#ifdef __cpp_lib_hardware_interference_size
- alignas(std::hardware_destructive_interference_size) std::atomic_size_t m_read_index{0};
- alignas(std::hardware_destructive_interference_size) std::atomic_size_t m_write_index{0};
+ alignas(CACHE_LINE_SIZE) std::atomic_size_t m_read_index{0};
+ alignas(CACHE_LINE_SIZE) std::atomic_size_t m_write_index{0};
#else
- alignas(128) std::atomic_size_t m_read_index{0};
- alignas(128) std::atomic_size_t m_write_index{0};
+ alignas(CACHE_LINE_SIZE) std::atomic_size_t m_read_index{0};
+ alignas(CACHE_LINE_SIZE) std::atomic_size_t m_write_index{0};
#endif
std::array<T, capacity> m_data;