summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorWeiyi Wang <wwylele@gmail.com>2019-02-08 10:43:06 -0500
committerfearlessTobi <thm.frey@gmail.com>2019-03-02 15:20:05 +0100
commit5159f4eee882027928098962cdcd71a0b56bfd38 (patch)
tree3f4898bdaeb0a06ee5c6f93f387423b5e933d572 /src/core/memory.cpp
parent3c39b39bbc4363aeaf0a163d283b40c3d4453d49 (diff)
Memory: don't lock hle mutex in memory read/write
The comment already invalidates itself: neither MMIO nor rasterizer cache belongsHLE kernel state. This mutex has a too large scope if MMIO or cache is included, which is prone to dead lock when multiple thread acquires these resource at the same time. If necessary, each MMIO component or rasterizer should have their own lock.
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp6
1 files changed, 0 insertions, 6 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index f809567b6..ec279cef8 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -171,9 +171,6 @@ T Read(const VAddr vaddr) {
return value;
}
- // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
-
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped:
@@ -204,9 +201,6 @@ void Write(const VAddr vaddr, const T data) {
return;
}
- // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
-
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped: