summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar danzel2017-08-29 20:39:55 +1200
committerGravatar danzel2017-08-29 20:39:55 +1200
commit826606479682234c98e4dfa6e616e637a28d4fcc (patch)
treebe6cbcc451182e0ec9801470b9c454e94ba124c5 /src/core
parentMerge pull request #2839 from Subv/global_kernel_lock (diff)
downloadyuzu-826606479682234c98e4dfa6e616e637a28d4fcc.tar.gz
yuzu-826606479682234c98e4dfa6e616e637a28d4fcc.tar.xz
yuzu-826606479682234c98e4dfa6e616e637a28d4fcc.zip
Use recursive_mutex instead of mutex to fix #2902
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/lock.cpp2
-rw-r--r--src/core/hle/lock.h2
-rw-r--r--src/core/hle/svc.cpp2
-rw-r--r--src/core/memory.cpp4
4 files changed, 5 insertions, 5 deletions
diff --git a/src/core/hle/lock.cpp b/src/core/hle/lock.cpp
index 082f689c8..1c24c7ce9 100644
--- a/src/core/hle/lock.cpp
+++ b/src/core/hle/lock.cpp
@@ -7,5 +7,5 @@
7#include <core/hle/lock.h> 7#include <core/hle/lock.h>
8 8
9namespace HLE { 9namespace HLE {
10std::mutex g_hle_lock; 10std::recursive_mutex g_hle_lock;
11} 11}
diff --git a/src/core/hle/lock.h b/src/core/hle/lock.h
index 8265621e1..5c99fe996 100644
--- a/src/core/hle/lock.h
+++ b/src/core/hle/lock.h
@@ -14,5 +14,5 @@ namespace HLE {
14 * to the emulated memory is not protected by this mutex, and should be avoided in any threads other 14 * to the emulated memory is not protected by this mutex, and should be avoided in any threads other
15 * than the CPU thread. 15 * than the CPU thread.
16 */ 16 */
17extern std::mutex g_hle_lock; 17extern std::recursive_mutex g_hle_lock;
18} // namespace HLE 18} // namespace HLE
diff --git a/src/core/hle/svc.cpp b/src/core/hle/svc.cpp
index b98938cb4..dfc36748c 100644
--- a/src/core/hle/svc.cpp
+++ b/src/core/hle/svc.cpp
@@ -1334,7 +1334,7 @@ void CallSVC(u32 immediate) {
1334 MICROPROFILE_SCOPE(Kernel_SVC); 1334 MICROPROFILE_SCOPE(Kernel_SVC);
1335 1335
1336 // Lock the global kernel mutex when we enter the kernel HLE. 1336 // Lock the global kernel mutex when we enter the kernel HLE.
1337 std::lock_guard<std::mutex> lock(HLE::g_hle_lock); 1337 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
1338 1338
1339 const FunctionDef* info = GetSVCInfo(immediate); 1339 const FunctionDef* info = GetSVCInfo(immediate);
1340 if (info) { 1340 if (info) {
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index a3c5f4a9d..097bc5b47 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -183,7 +183,7 @@ T Read(const VAddr vaddr) {
183 } 183 }
184 184
185 // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state 185 // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
186 std::lock_guard<std::mutex> lock(HLE::g_hle_lock); 186 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
187 187
188 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 188 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
189 switch (type) { 189 switch (type) {
@@ -224,7 +224,7 @@ void Write(const VAddr vaddr, const T data) {
224 } 224 }
225 225
226 // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state 226 // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
227 std::lock_guard<std::mutex> lock(HLE::g_hle_lock); 227 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
228 228
229 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 229 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
230 switch (type) { 230 switch (type) {