summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt4
-rw-r--r--src/audio_core/audio_renderer.cpp15
-rw-r--r--src/audio_core/audio_renderer.h6
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/alignment.h7
-rw-r--r--src/common/common_funcs.h32
-rw-r--r--src/common/page_table.cpp34
-rw-r--r--src/common/page_table.h37
-rw-r--r--src/common/scope_exit.h9
-rw-r--r--src/common/virtual_buffer.cpp52
-rw-r--r--src/common/virtual_buffer.h58
-rw-r--r--src/core/CMakeLists.txt29
-rw-r--r--src/core/arm/arm_interface.cpp6
-rw-r--r--src/core/arm/arm_interface.h32
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp2
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h2
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp7
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h4
-rw-r--r--src/core/arm/exclusive_monitor.h2
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp2
-rw-r--r--src/core/core.cpp17
-rw-r--r--src/core/core.h20
-rw-r--r--src/core/core_manager.h2
-rw-r--r--src/core/device_memory.cpp15
-rw-r--r--src/core/device_memory.h51
-rw-r--r--src/core/file_sys/patch_manager.cpp11
-rw-r--r--src/core/file_sys/patch_manager.h4
-rw-r--r--src/core/file_sys/registered_cache.cpp23
-rw-r--r--src/core/file_sys/vfs_libzip.cpp4
-rw-r--r--src/core/frontend/framebuffer_layout.cpp2
-rw-r--r--src/core/gdbstub/gdbstub.cpp4
-rw-r--r--src/core/hle/kernel/client_session.cpp3
-rw-r--r--src/core/hle/kernel/client_session.h4
-rw-r--r--src/core/hle/kernel/errors.h1
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp24
-rw-r--r--src/core/hle/kernel/kernel.cpp124
-rw-r--r--src/core/hle/kernel/kernel.h44
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp118
-rw-r--r--src/core/hle/kernel/memory/address_space_info.h54
-rw-r--r--src/core/hle/kernel/memory/memory_block.h318
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.cpp190
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.h64
-rw-r--r--src/core/hle/kernel/memory/memory_layout.h73
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp176
-rw-r--r--src/core/hle/kernel/memory/memory_manager.h97
-rw-r--r--src/core/hle/kernel/memory/memory_types.h18
-rw-r--r--src/core/hle/kernel/memory/page_heap.cpp119
-rw-r--r--src/core/hle/kernel/memory/page_heap.h370
-rw-r--r--src/core/hle/kernel/memory/page_linked_list.h93
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp1130
-rw-r--r--src/core/hle/kernel/memory/page_table.h276
-rw-r--r--src/core/hle/kernel/memory/slab_heap.h164
-rw-r--r--src/core/hle/kernel/memory/system_control.cpp41
-rw-r--r--src/core/hle/kernel/memory/system_control.h18
-rw-r--r--src/core/hle/kernel/physical_memory.h2
-rw-r--r--src/core/hle/kernel/process.cpp200
-rw-r--r--src/core/hle/kernel/process.h45
-rw-r--r--src/core/hle/kernel/process_capability.cpp24
-rw-r--r--src/core/hle/kernel/process_capability.h24
-rw-r--r--src/core/hle/kernel/resource_limit.cpp50
-rw-r--r--src/core/hle/kernel/resource_limit.h12
-rw-r--r--src/core/hle/kernel/server_session.cpp5
-rw-r--r--src/core/hle/kernel/server_session.h6
-rw-r--r--src/core/hle/kernel/shared_memory.cpp151
-rw-r--r--src/core/hle/kernel/shared_memory.h127
-rw-r--r--src/core/hle/kernel/svc.cpp621
-rw-r--r--src/core/hle/kernel/svc.h6
-rw-r--r--src/core/hle/kernel/svc_types.h68
-rw-r--r--src/core/hle/kernel/thread.cpp2
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp104
-rw-r--r--src/core/hle/kernel/transfer_memory.h55
-rw-r--r--src/core/hle/kernel/vm_manager.cpp1175
-rw-r--r--src/core/hle/kernel/vm_manager.h796
-rw-r--r--src/core/hle/service/audio/audout_u.cpp2
-rw-r--r--src/core/hle/service/audio/audren_u.cpp2
-rw-r--r--src/core/hle/service/filesystem/filesystem.cpp3
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp3
-rw-r--r--src/core/hle/service/hid/hid.cpp5
-rw-r--r--src/core/hle/service/hid/irs.cpp6
-rw-r--r--src/core/hle/service/ldr/ldr.cpp362
-rw-r--r--src/core/hle/service/lm/lm.cpp8
-rw-r--r--src/core/hle/service/ns/pl_u.cpp12
-rw-r--r--src/core/hle/service/time/interface.cpp2
-rw-r--r--src/core/hle/service/time/time.cpp23
-rw-r--r--src/core/hle/service/time/time.h1
-rw-r--r--src/core/hle/service/time/time_sharedmemory.cpp5
-rw-r--r--src/core/hle/service/time/time_zone_manager.cpp14
-rw-r--r--src/core/hle/service/vi/vi.cpp4
-rw-r--r--src/core/loader/deconstructed_rom_directory.cpp47
-rw-r--r--src/core/loader/elf.cpp6
-rw-r--r--src/core/loader/kip.cpp6
-rw-r--r--src/core/loader/nro.cpp8
-rw-r--r--src/core/loader/nso.cpp54
-rw-r--r--src/core/loader/nso.h3
-rw-r--r--src/core/memory.cpp201
-rw-r--r--src/core/memory.h19
-rw-r--r--src/core/memory/cheat_engine.cpp17
-rw-r--r--src/core/memory/cheat_engine.h4
-rw-r--r--src/core/memory/dmnt_cheat_types.h4
-rw-r--r--src/core/memory/dmnt_cheat_vm.cpp9
-rw-r--r--src/core/memory/dmnt_cheat_vm.h4
-rw-r--r--src/core/reporter.cpp10
-rw-r--r--src/core/telemetry_session.cpp6
-rw-r--r--src/core/tools/freezer.cpp6
-rw-r--r--src/core/tools/freezer.h6
-rw-r--r--src/input_common/CMakeLists.txt2
-rw-r--r--src/input_common/sdl/sdl_impl.cpp1
-rw-r--r--src/tests/core/arm/arm_test_common.cpp8
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h47
-rw-r--r--src/video_core/engines/shader_bytecode.h10
-rw-r--r--src/video_core/gpu_asynch.cpp5
-rw-r--r--src/video_core/memory_manager.cpp99
-rw-r--r--src/video_core/memory_manager.h2
-rw-r--r--src/video_core/rasterizer_accelerated.cpp10
-rw-r--r--src/video_core/rasterizer_accelerated.h6
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp18
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h12
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp39
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h4
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp27
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp4
-rw-r--r--src/video_core/renderer_opengl/utils.cpp62
-rw-r--r--src/video_core/renderer_opengl/utils.h43
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp30
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp65
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h16
-rw-r--r--src/video_core/shader/control_flow.cpp6
-rw-r--r--src/video_core/shader/decode/image.cpp11
-rw-r--r--src/video_core/shader/decode/memory.cpp100
-rw-r--r--src/video_core/shader/decode/shift.cpp1
-rw-r--r--src/video_core/shader/node.h14
-rw-r--r--src/video_core/shader/shader_ir.cpp7
-rw-r--r--src/video_core/shader/track.cpp7
-rw-r--r--src/video_core/texture_cache/surface_base.cpp6
-rw-r--r--src/video_core/texture_cache/surface_base.h4
-rw-r--r--src/video_core/texture_cache/surface_params.cpp1
-rw-r--r--src/video_core/texture_cache/surface_view.cpp4
-rw-r--r--src/video_core/texture_cache/surface_view.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.h11
-rw-r--r--src/web_service/CMakeLists.txt7
-rw-r--r--src/web_service/web_backend.cpp14
-rw-r--r--src/yuzu/debugger/profiler.cpp3
-rw-r--r--src/yuzu/game_list_worker.cpp3
-rw-r--r--src/yuzu/main.cpp8
152 files changed, 5200 insertions, 3920 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 9d0af02fd..0913be72c 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -53,7 +53,11 @@ if (MSVC)
53else() 53else()
54 add_compile_options( 54 add_compile_options(
55 -Wall 55 -Wall
56 -Werror=implicit-fallthrough
57 -Werror=reorder
58 -Wextra
56 -Wno-attributes 59 -Wno-attributes
60 -Wno-unused-parameter
57 ) 61 )
58 62
59 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) 63 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp
index c187d8ac5..7a9dc61d4 100644
--- a/src/audio_core/audio_renderer.cpp
+++ b/src/audio_core/audio_renderer.cpp
@@ -36,9 +36,9 @@ public:
36 } 36 }
37 37
38 void SetWaveIndex(std::size_t index); 38 void SetWaveIndex(std::size_t index);
39 std::vector<s16> DequeueSamples(std::size_t sample_count, Memory::Memory& memory); 39 std::vector<s16> DequeueSamples(std::size_t sample_count, Core::Memory::Memory& memory);
40 void UpdateState(); 40 void UpdateState();
41 void RefreshBuffer(Memory::Memory& memory); 41 void RefreshBuffer(Core::Memory::Memory& memory);
42 42
43private: 43private:
44 bool is_in_use{}; 44 bool is_in_use{};
@@ -66,13 +66,14 @@ public:
66 return info; 66 return info;
67 } 67 }
68 68
69 void UpdateState(Memory::Memory& memory); 69 void UpdateState(Core::Memory::Memory& memory);
70 70
71private: 71private:
72 EffectOutStatus out_status{}; 72 EffectOutStatus out_status{};
73 EffectInStatus info{}; 73 EffectInStatus info{};
74}; 74};
75AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, 75
76AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
76 AudioRendererParameter params, 77 AudioRendererParameter params,
77 std::shared_ptr<Kernel::WritableEvent> buffer_event, 78 std::shared_ptr<Kernel::WritableEvent> buffer_event,
78 std::size_t instance_number) 79 std::size_t instance_number)
@@ -208,7 +209,7 @@ void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
208} 209}
209 210
210std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count, 211std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count,
211 Memory::Memory& memory) { 212 Core::Memory::Memory& memory) {
212 if (!IsPlaying()) { 213 if (!IsPlaying()) {
213 return {}; 214 return {};
214 } 215 }
@@ -258,7 +259,7 @@ void AudioRenderer::VoiceState::UpdateState() {
258 is_in_use = info.is_in_use; 259 is_in_use = info.is_in_use;
259} 260}
260 261
261void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) { 262void AudioRenderer::VoiceState::RefreshBuffer(Core::Memory::Memory& memory) {
262 const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr; 263 const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr;
263 const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz; 264 const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz;
264 std::vector<s16> new_samples(wave_buffer_size / sizeof(s16)); 265 std::vector<s16> new_samples(wave_buffer_size / sizeof(s16));
@@ -310,7 +311,7 @@ void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) {
310 is_refresh_pending = false; 311 is_refresh_pending = false;
311} 312}
312 313
313void AudioRenderer::EffectState::UpdateState(Memory::Memory& memory) { 314void AudioRenderer::EffectState::UpdateState(Core::Memory::Memory& memory) {
314 if (info.is_new) { 315 if (info.is_new) {
315 out_status.state = EffectStatus::New; 316 out_status.state = EffectStatus::New;
316 } else { 317 } else {
diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h
index c0fae669e..62faf9f19 100644
--- a/src/audio_core/audio_renderer.h
+++ b/src/audio_core/audio_renderer.h
@@ -22,7 +22,7 @@ namespace Kernel {
22class WritableEvent; 22class WritableEvent;
23} 23}
24 24
25namespace Memory { 25namespace Core::Memory {
26class Memory; 26class Memory;
27} 27}
28 28
@@ -221,7 +221,7 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size
221 221
222class AudioRenderer { 222class AudioRenderer {
223public: 223public:
224 AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, 224 AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
225 AudioRendererParameter params, 225 AudioRendererParameter params,
226 std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number); 226 std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number);
227 ~AudioRenderer(); 227 ~AudioRenderer();
@@ -244,7 +244,7 @@ private:
244 std::vector<EffectState> effects; 244 std::vector<EffectState> effects;
245 std::unique_ptr<AudioOut> audio_out; 245 std::unique_ptr<AudioOut> audio_out;
246 StreamPtr stream; 246 StreamPtr stream;
247 Memory::Memory& memory; 247 Core::Memory::Memory& memory;
248}; 248};
249 249
250} // namespace AudioCore 250} // namespace AudioCore
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index eeceaa655..6ffc612e7 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -155,6 +155,8 @@ add_library(common STATIC
155 uuid.cpp 155 uuid.cpp
156 uuid.h 156 uuid.h
157 vector_math.h 157 vector_math.h
158 virtual_buffer.cpp
159 virtual_buffer.h
158 web_result.h 160 web_result.h
159 zstd_compression.cpp 161 zstd_compression.cpp
160 zstd_compression.h 162 zstd_compression.h
diff --git a/src/common/alignment.h b/src/common/alignment.h
index cdd4833f8..f8c49e079 100644
--- a/src/common/alignment.h
+++ b/src/common/alignment.h
@@ -38,6 +38,13 @@ constexpr bool IsWordAligned(T value) {
38 return (value & 0b11) == 0; 38 return (value & 0b11) == 0;
39} 39}
40 40
41template <typename T>
42constexpr bool IsAligned(T value, std::size_t alignment) {
43 using U = typename std::make_unsigned<T>::type;
44 const U mask = static_cast<U>(alignment - 1);
45 return (value & mask) == 0;
46}
47
41template <typename T, std::size_t Align = 16> 48template <typename T, std::size_t Align = 16>
42class AlignmentAllocator { 49class AlignmentAllocator {
43public: 50public:
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h
index 052254678..88cf5250a 100644
--- a/src/common/common_funcs.h
+++ b/src/common/common_funcs.h
@@ -55,6 +55,38 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
55// Defined in Misc.cpp. 55// Defined in Misc.cpp.
56std::string GetLastErrorMsg(); 56std::string GetLastErrorMsg();
57 57
58#define DECLARE_ENUM_FLAG_OPERATORS(type) \
59 constexpr type operator|(type a, type b) noexcept { \
60 using T = std::underlying_type_t<type>; \
61 return static_cast<type>(static_cast<T>(a) | static_cast<T>(b)); \
62 } \
63 constexpr type operator&(type a, type b) noexcept { \
64 using T = std::underlying_type_t<type>; \
65 return static_cast<type>(static_cast<T>(a) & static_cast<T>(b)); \
66 } \
67 constexpr type& operator|=(type& a, type b) noexcept { \
68 using T = std::underlying_type_t<type>; \
69 a = static_cast<type>(static_cast<T>(a) | static_cast<T>(b)); \
70 return a; \
71 } \
72 constexpr type& operator&=(type& a, type b) noexcept { \
73 using T = std::underlying_type_t<type>; \
74 a = static_cast<type>(static_cast<T>(a) & static_cast<T>(b)); \
75 return a; \
76 } \
77 constexpr type operator~(type key) noexcept { \
78 using T = std::underlying_type_t<type>; \
79 return static_cast<type>(~static_cast<T>(key)); \
80 } \
81 constexpr bool True(type key) noexcept { \
82 using T = std::underlying_type_t<type>; \
83 return static_cast<T>(key) != 0; \
84 } \
85 constexpr bool False(type key) noexcept { \
86 using T = std::underlying_type_t<type>; \
87 return static_cast<T>(key) == 0; \
88 }
89
58namespace Common { 90namespace Common {
59 91
60constexpr u32 MakeMagic(char a, char b, char c, char d) { 92constexpr u32 MakeMagic(char a, char b, char c, char d) {
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 566b57b62..e5d3090d5 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -6,36 +6,20 @@
6 6
7namespace Common { 7namespace Common {
8 8
9PageTable::PageTable(std::size_t page_size_in_bits) : page_size_in_bits{page_size_in_bits} {} 9PageTable::PageTable() = default;
10 10
11PageTable::~PageTable() = default; 11PageTable::~PageTable() = default;
12 12
13void PageTable::Resize(std::size_t address_space_width_in_bits) { 13void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
14 const std::size_t num_page_table_entries = 1ULL 14 bool has_attribute) {
15 << (address_space_width_in_bits - page_size_in_bits); 15 const std::size_t num_page_table_entries{1ULL
16 16 << (address_space_width_in_bits - page_size_in_bits)};
17 pointers.resize(num_page_table_entries); 17 pointers.resize(num_page_table_entries);
18 attributes.resize(num_page_table_entries);
19
20 // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
21 // vector size is subsequently decreased (via resize), the vector might not automatically
22 // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for
23 // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use.
24
25 pointers.shrink_to_fit();
26 attributes.shrink_to_fit();
27}
28
29BackingPageTable::BackingPageTable(std::size_t page_size_in_bits) : PageTable{page_size_in_bits} {}
30
31BackingPageTable::~BackingPageTable() = default;
32
33void BackingPageTable::Resize(std::size_t address_space_width_in_bits) {
34 PageTable::Resize(address_space_width_in_bits);
35 const std::size_t num_page_table_entries = 1ULL
36 << (address_space_width_in_bits - page_size_in_bits);
37 backing_addr.resize(num_page_table_entries); 18 backing_addr.resize(num_page_table_entries);
38 backing_addr.shrink_to_fit(); 19
20 if (has_attribute) {
21 attributes.resize(num_page_table_entries);
22 }
39} 23}
40 24
41} // namespace Common 25} // namespace Common
diff --git a/src/common/page_table.h b/src/common/page_table.h
index dbc272ab7..1e8bd3187 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -5,9 +5,12 @@
5#pragma once 5#pragma once
6 6
7#include <vector> 7#include <vector>
8
8#include <boost/icl/interval_map.hpp> 9#include <boost/icl/interval_map.hpp>
10
9#include "common/common_types.h" 11#include "common/common_types.h"
10#include "common/memory_hook.h" 12#include "common/memory_hook.h"
13#include "common/virtual_buffer.h"
11 14
12namespace Common { 15namespace Common {
13 16
@@ -47,7 +50,7 @@ struct SpecialRegion {
47 * mimics the way a real CPU page table works. 50 * mimics the way a real CPU page table works.
48 */ 51 */
49struct PageTable { 52struct PageTable {
50 explicit PageTable(std::size_t page_size_in_bits); 53 PageTable();
51 ~PageTable(); 54 ~PageTable();
52 55
53 /** 56 /**
@@ -56,40 +59,18 @@ struct PageTable {
56 * 59 *
57 * @param address_space_width_in_bits The address size width in bits. 60 * @param address_space_width_in_bits The address size width in bits.
58 */ 61 */
59 void Resize(std::size_t address_space_width_in_bits); 62 void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
63 bool has_attribute);
60 64
61 /** 65 /**
62 * Vector of memory pointers backing each page. An entry can only be non-null if the 66 * Vector of memory pointers backing each page. An entry can only be non-null if the
63 * corresponding entry in the `attributes` vector is of type `Memory`. 67 * corresponding entry in the `attributes` vector is of type `Memory`.
64 */ 68 */
65 std::vector<u8*> pointers; 69 VirtualBuffer<u8*> pointers;
66
67 /**
68 * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
69 * of type `Special`.
70 */
71 boost::icl::interval_map<u64, std::set<SpecialRegion>> special_regions;
72
73 /**
74 * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
75 * the corresponding entry in `pointers` MUST be set to null.
76 */
77 std::vector<PageType> attributes;
78
79 const std::size_t page_size_in_bits{};
80};
81
82/**
83 * A more advanced Page Table with the ability to save a backing address when using it
84 * depends on another MMU.
85 */
86struct BackingPageTable : PageTable {
87 explicit BackingPageTable(std::size_t page_size_in_bits);
88 ~BackingPageTable();
89 70
90 void Resize(std::size_t address_space_width_in_bits); 71 VirtualBuffer<u64> backing_addr;
91 72
92 std::vector<u64> backing_addr; 73 VirtualBuffer<PageType> attributes;
93}; 74};
94 75
95} // namespace Common 76} // namespace Common
diff --git a/src/common/scope_exit.h b/src/common/scope_exit.h
index 1176a72b1..68ef5f197 100644
--- a/src/common/scope_exit.h
+++ b/src/common/scope_exit.h
@@ -12,10 +12,17 @@ template <typename Func>
12struct ScopeExitHelper { 12struct ScopeExitHelper {
13 explicit ScopeExitHelper(Func&& func) : func(std::move(func)) {} 13 explicit ScopeExitHelper(Func&& func) : func(std::move(func)) {}
14 ~ScopeExitHelper() { 14 ~ScopeExitHelper() {
15 func(); 15 if (active) {
16 func();
17 }
18 }
19
20 void Cancel() {
21 active = false;
16 } 22 }
17 23
18 Func func; 24 Func func;
25 bool active{true};
19}; 26};
20 27
21template <typename Func> 28template <typename Func>
diff --git a/src/common/virtual_buffer.cpp b/src/common/virtual_buffer.cpp
new file mode 100644
index 000000000..b426f4747
--- /dev/null
+++ b/src/common/virtual_buffer.cpp
@@ -0,0 +1,52 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#ifdef _WIN32
6#include <windows.h>
7#else
8#include <stdio.h>
9#include <sys/mman.h>
10#include <sys/types.h>
11#if defined __APPLE__ || defined __FreeBSD__ || defined __OpenBSD__
12#include <sys/sysctl.h>
13#elif defined __HAIKU__
14#include <OS.h>
15#else
16#include <sys/sysinfo.h>
17#endif
18#endif
19
20#include "common/assert.h"
21#include "common/virtual_buffer.h"
22
23namespace Common {
24
25void* AllocateMemoryPages(std::size_t size) {
26#ifdef _WIN32
27 void* base{VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE)};
28#else
29 void* base{mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)};
30
31 if (base == MAP_FAILED) {
32 base = nullptr;
33 }
34#endif
35
36 ASSERT(base);
37
38 return base;
39}
40
41void FreeMemoryPages(void* base, std::size_t size) {
42 if (!base) {
43 return;
44 }
45#ifdef _WIN32
46 ASSERT(VirtualFree(base, 0, MEM_RELEASE));
47#else
48 ASSERT(munmap(base, size) == 0);
49#endif
50}
51
52} // namespace Common
diff --git a/src/common/virtual_buffer.h b/src/common/virtual_buffer.h
new file mode 100644
index 000000000..da064e59e
--- /dev/null
+++ b/src/common/virtual_buffer.h
@@ -0,0 +1,58 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_funcs.h"
8
9namespace Common {
10
11void* AllocateMemoryPages(std::size_t size);
12void FreeMemoryPages(void* base, std::size_t size);
13
14template <typename T>
15class VirtualBuffer final : NonCopyable {
16public:
17 constexpr VirtualBuffer() = default;
18 explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} {
19 base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size));
20 }
21
22 ~VirtualBuffer() {
23 FreeMemoryPages(base_ptr, alloc_size);
24 }
25
26 void resize(std::size_t count) {
27 FreeMemoryPages(base_ptr, alloc_size);
28
29 alloc_size = count * sizeof(T);
30 base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size));
31 }
32
33 constexpr const T& operator[](std::size_t index) const {
34 return base_ptr[index];
35 }
36
37 constexpr T& operator[](std::size_t index) {
38 return base_ptr[index];
39 }
40
41 constexpr T* data() {
42 return base_ptr;
43 }
44
45 constexpr const T* data() const {
46 return base_ptr;
47 }
48
49 constexpr std::size_t size() const {
50 return alloc_size / sizeof(T);
51 }
52
53private:
54 std::size_t alloc_size{};
55 T* base_ptr{};
56};
57
58} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 66497a386..8546d3602 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -35,6 +35,8 @@ add_library(core STATIC
35 crypto/ctr_encryption_layer.h 35 crypto/ctr_encryption_layer.h
36 crypto/xts_encryption_layer.cpp 36 crypto/xts_encryption_layer.cpp
37 crypto/xts_encryption_layer.h 37 crypto/xts_encryption_layer.h
38 device_memory.cpp
39 device_memory.h
38 file_sys/bis_factory.cpp 40 file_sys/bis_factory.cpp
39 file_sys/bis_factory.h 41 file_sys/bis_factory.h
40 file_sys/card_image.cpp 42 file_sys/card_image.cpp
@@ -152,6 +154,23 @@ add_library(core STATIC
152 hle/kernel/hle_ipc.h 154 hle/kernel/hle_ipc.h
153 hle/kernel/kernel.cpp 155 hle/kernel/kernel.cpp
154 hle/kernel/kernel.h 156 hle/kernel/kernel.h
157 hle/kernel/memory/address_space_info.cpp
158 hle/kernel/memory/address_space_info.h
159 hle/kernel/memory/memory_block.h
160 hle/kernel/memory/memory_block_manager.cpp
161 hle/kernel/memory/memory_block_manager.h
162 hle/kernel/memory/memory_layout.h
163 hle/kernel/memory/memory_manager.cpp
164 hle/kernel/memory/memory_manager.h
165 hle/kernel/memory/memory_types.h
166 hle/kernel/memory/page_linked_list.h
167 hle/kernel/memory/page_heap.cpp
168 hle/kernel/memory/page_heap.h
169 hle/kernel/memory/page_table.cpp
170 hle/kernel/memory/page_table.h
171 hle/kernel/memory/slab_heap.h
172 hle/kernel/memory/system_control.cpp
173 hle/kernel/memory/system_control.h
155 hle/kernel/mutex.cpp 174 hle/kernel/mutex.cpp
156 hle/kernel/mutex.h 175 hle/kernel/mutex.h
157 hle/kernel/object.cpp 176 hle/kernel/object.cpp
@@ -178,6 +197,7 @@ add_library(core STATIC
178 hle/kernel/shared_memory.h 197 hle/kernel/shared_memory.h
179 hle/kernel/svc.cpp 198 hle/kernel/svc.cpp
180 hle/kernel/svc.h 199 hle/kernel/svc.h
200 hle/kernel/svc_types.h
181 hle/kernel/svc_wrap.h 201 hle/kernel/svc_wrap.h
182 hle/kernel/synchronization_object.cpp 202 hle/kernel/synchronization_object.cpp
183 hle/kernel/synchronization_object.h 203 hle/kernel/synchronization_object.h
@@ -189,8 +209,6 @@ add_library(core STATIC
189 hle/kernel/time_manager.h 209 hle/kernel/time_manager.h
190 hle/kernel/transfer_memory.cpp 210 hle/kernel/transfer_memory.cpp
191 hle/kernel/transfer_memory.h 211 hle/kernel/transfer_memory.h
192 hle/kernel/vm_manager.cpp
193 hle/kernel/vm_manager.h
194 hle/kernel/writable_event.cpp 212 hle/kernel/writable_event.cpp
195 hle/kernel/writable_event.h 213 hle/kernel/writable_event.h
196 hle/lock.cpp 214 hle/lock.cpp
@@ -591,11 +609,8 @@ target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
591target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn) 609target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn)
592 610
593if (YUZU_ENABLE_BOXCAT) 611if (YUZU_ENABLE_BOXCAT)
594 get_directory_property(OPENSSL_LIBS 612 target_compile_definitions(core PRIVATE -DYUZU_ENABLE_BOXCAT)
595 DIRECTORY ${PROJECT_SOURCE_DIR}/externals/libressl 613 target_link_libraries(core PRIVATE httplib json-headers zip)
596 DEFINITION OPENSSL_LIBS)
597 target_compile_definitions(core PRIVATE -DCPPHTTPLIB_OPENSSL_SUPPORT -DYUZU_ENABLE_BOXCAT)
598 target_link_libraries(core PRIVATE httplib json-headers ${OPENSSL_LIBS} zip)
599endif() 614endif()
600 615
601if (ENABLE_WEB_SERVICE) 616if (ENABLE_WEB_SERVICE)
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 7e846ddd5..d079a1bc8 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -60,7 +60,7 @@ static_assert(sizeof(ELFSymbol) == 0x18, "ELFSymbol has incorrect size.");
60 60
61using Symbols = std::vector<std::pair<ELFSymbol, std::string>>; 61using Symbols = std::vector<std::pair<ELFSymbol, std::string>>;
62 62
63Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) { 63Symbols GetSymbols(VAddr text_offset, Core::Memory::Memory& memory) {
64 const auto mod_offset = text_offset + memory.Read32(text_offset + 4); 64 const auto mod_offset = text_offset + memory.Read32(text_offset + 4);
65 65
66 if (mod_offset < text_offset || (mod_offset & 0b11) != 0 || 66 if (mod_offset < text_offset || (mod_offset & 0b11) != 0 ||
@@ -123,7 +123,7 @@ Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) {
123std::optional<std::string> GetSymbolName(const Symbols& symbols, VAddr func_address) { 123std::optional<std::string> GetSymbolName(const Symbols& symbols, VAddr func_address) {
124 const auto iter = 124 const auto iter =
125 std::find_if(symbols.begin(), symbols.end(), [func_address](const auto& pair) { 125 std::find_if(symbols.begin(), symbols.end(), [func_address](const auto& pair) {
126 const auto& [symbol, name] = pair; 126 const auto& symbol = pair.first;
127 const auto end_address = symbol.value + symbol.size; 127 const auto end_address = symbol.value + symbol.size;
128 return func_address >= symbol.value && func_address < end_address; 128 return func_address >= symbol.value && func_address < end_address;
129 }); 129 });
@@ -146,7 +146,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
146 auto fp = GetReg(29); 146 auto fp = GetReg(29);
147 auto lr = GetReg(30); 147 auto lr = GetReg(30);
148 while (true) { 148 while (true) {
149 out.push_back({"", 0, lr, 0}); 149 out.push_back({"", 0, lr, 0, ""});
150 if (!fp) { 150 if (!fp) {
151 break; 151 break;
152 } 152 }
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 57eae839e..cb2e640e2 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -26,28 +26,28 @@ public:
26 virtual ~ARM_Interface() = default; 26 virtual ~ARM_Interface() = default;
27 27
28 struct ThreadContext32 { 28 struct ThreadContext32 {
29 std::array<u32, 16> cpu_registers; 29 std::array<u32, 16> cpu_registers{};
30 u32 cpsr; 30 u32 cpsr{};
31 std::array<u8, 4> padding; 31 std::array<u8, 4> padding{};
32 std::array<u64, 32> fprs; 32 std::array<u64, 32> fprs{};
33 u32 fpscr; 33 u32 fpscr{};
34 u32 fpexc; 34 u32 fpexc{};
35 u32 tpidr; 35 u32 tpidr{};
36 }; 36 };
37 // Internally within the kernel, it expects the AArch32 version of the 37 // Internally within the kernel, it expects the AArch32 version of the
38 // thread context to be 344 bytes in size. 38 // thread context to be 344 bytes in size.
39 static_assert(sizeof(ThreadContext32) == 0x158); 39 static_assert(sizeof(ThreadContext32) == 0x158);
40 40
41 struct ThreadContext64 { 41 struct ThreadContext64 {
42 std::array<u64, 31> cpu_registers; 42 std::array<u64, 31> cpu_registers{};
43 u64 sp; 43 u64 sp{};
44 u64 pc; 44 u64 pc{};
45 u32 pstate; 45 u32 pstate{};
46 std::array<u8, 4> padding; 46 std::array<u8, 4> padding{};
47 std::array<u128, 32> vector_registers; 47 std::array<u128, 32> vector_registers{};
48 u32 fpcr; 48 u32 fpcr{};
49 u32 fpsr; 49 u32 fpsr{};
50 u64 tpidr; 50 u64 tpidr{};
51 }; 51 };
52 // Internally within the kernel, it expects the AArch64 version of the 52 // Internally within the kernel, it expects the AArch64 version of the
53 // thread context to be 800 bytes in size. 53 // thread context to be 800 bytes in size.
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 187a972ac..9bc86e3b9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -67,7 +67,7 @@ public:
67 } 67 }
68 68
69 void CallSVC(u32 swi) override { 69 void CallSVC(u32 swi) override {
70 Kernel::CallSVC(parent.system, swi); 70 Kernel::Svc::Call(parent.system, swi);
71 } 71 }
72 72
73 void AddTicks(u64 ticks) override { 73 void AddTicks(u64 ticks) override {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index 143e46e4d..8ba9cea8f 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -15,7 +15,7 @@
15#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
16#include "core/arm/exclusive_monitor.h" 16#include "core/arm/exclusive_monitor.h"
17 17
18namespace Memory { 18namespace Core::Memory {
19class Memory; 19class Memory;
20} 20}
21 21
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index a53a58ba0..9add5d363 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -8,6 +8,7 @@
8#include <dynarmic/A64/config.h> 8#include <dynarmic/A64/config.h>
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/microprofile.h" 10#include "common/microprofile.h"
11#include "common/page_table.h"
11#include "core/arm/dynarmic/arm_dynarmic_64.h" 12#include "core/arm/dynarmic/arm_dynarmic_64.h"
12#include "core/core.h" 13#include "core/core.h"
13#include "core/core_manager.h" 14#include "core/core_manager.h"
@@ -18,7 +19,6 @@
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/scheduler.h" 20#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/svc.h" 21#include "core/hle/kernel/svc.h"
21#include "core/hle/kernel/vm_manager.h"
22#include "core/memory.h" 22#include "core/memory.h"
23 23
24namespace Core { 24namespace Core {
@@ -103,7 +103,7 @@ public:
103 } 103 }
104 104
105 void CallSVC(u32 swi) override { 105 void CallSVC(u32 swi) override {
106 Kernel::CallSVC(parent.system, swi); 106 Kernel::Svc::Call(parent.system, swi);
107 } 107 }
108 108
109 void AddTicks(u64 ticks) override { 109 void AddTicks(u64 ticks) override {
@@ -159,6 +159,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
159 // Unpredictable instructions 159 // Unpredictable instructions
160 config.define_unpredictable_behaviour = true; 160 config.define_unpredictable_behaviour = true;
161 161
162 config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
163 config.only_detect_misalignment_via_page_table_on_page_boundary = true;
164
162 return std::make_shared<Dynarmic::A64::Jit>(config); 165 return std::make_shared<Dynarmic::A64::Jit>(config);
163} 166}
164 167
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index e71240a96..647cecaf0 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -15,7 +15,7 @@
15#include "core/arm/exclusive_monitor.h" 15#include "core/arm/exclusive_monitor.h"
16#include "core/arm/unicorn/arm_unicorn.h" 16#include "core/arm/unicorn/arm_unicorn.h"
17 17
18namespace Memory { 18namespace Core::Memory {
19class Memory; 19class Memory;
20} 20}
21 21
@@ -92,7 +92,7 @@ public:
92private: 92private:
93 friend class ARM_Dynarmic_64; 93 friend class ARM_Dynarmic_64;
94 Dynarmic::A64::ExclusiveMonitor monitor; 94 Dynarmic::A64::ExclusiveMonitor monitor;
95 Memory::Memory& memory; 95 Core::Memory::Memory& memory;
96}; 96};
97 97
98} // namespace Core 98} // namespace Core
diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h
index 4ef418b90..ccd73b80f 100644
--- a/src/core/arm/exclusive_monitor.h
+++ b/src/core/arm/exclusive_monitor.h
@@ -8,7 +8,7 @@
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10 10
11namespace Memory { 11namespace Core::Memory {
12class Memory; 12class Memory;
13} 13}
14 14
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index 8a9800a96..d189efb63 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -266,7 +266,7 @@ void ARM_Unicorn::InterruptHook(uc_engine* uc, u32 int_no, void* user_data) {
266 266
267 switch (ec) { 267 switch (ec) {
268 case 0x15: // SVC 268 case 0x15: // SVC
269 Kernel::CallSVC(arm_instance->system, iss); 269 Kernel::Svc::Call(arm_instance->system, iss);
270 break; 270 break;
271 } 271 }
272} 272}
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 3bd90d79f..f9f8a3000 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -14,6 +14,7 @@
14#include "core/core_manager.h" 14#include "core/core_manager.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/cpu_manager.h" 16#include "core/cpu_manager.h"
17#include "core/device_memory.h"
17#include "core/file_sys/bis_factory.h" 18#include "core/file_sys/bis_factory.h"
18#include "core/file_sys/card_image.h" 19#include "core/file_sys/card_image.h"
19#include "core/file_sys/mode.h" 20#include "core/file_sys/mode.h"
@@ -140,6 +141,8 @@ struct System::Impl {
140 ResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { 141 ResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
141 LOG_DEBUG(HW_Memory, "initialized OK"); 142 LOG_DEBUG(HW_Memory, "initialized OK");
142 143
144 device_memory = std::make_unique<Core::DeviceMemory>(system);
145
143 core_timing.Initialize(); 146 core_timing.Initialize();
144 kernel.Initialize(); 147 kernel.Initialize();
145 cpu_manager.Initialize(); 148 cpu_manager.Initialize();
@@ -276,6 +279,7 @@ struct System::Impl {
276 telemetry_session.reset(); 279 telemetry_session.reset();
277 perf_stats.reset(); 280 perf_stats.reset();
278 gpu_core.reset(); 281 gpu_core.reset();
282 device_memory.reset();
279 283
280 // Close all CPU/threading state 284 // Close all CPU/threading state
281 cpu_manager.Shutdown(); 285 cpu_manager.Shutdown();
@@ -346,7 +350,8 @@ struct System::Impl {
346 std::unique_ptr<Loader::AppLoader> app_loader; 350 std::unique_ptr<Loader::AppLoader> app_loader;
347 std::unique_ptr<Tegra::GPU> gpu_core; 351 std::unique_ptr<Tegra::GPU> gpu_core;
348 std::unique_ptr<Hardware::InterruptManager> interrupt_manager; 352 std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
349 Memory::Memory memory; 353 std::unique_ptr<Core::DeviceMemory> device_memory;
354 Core::Memory::Memory memory;
350 CpuManager cpu_manager; 355 CpuManager cpu_manager;
351 bool is_powered_on = false; 356 bool is_powered_on = false;
352 bool exit_lock = false; 357 bool exit_lock = false;
@@ -472,6 +477,14 @@ Kernel::Process* System::CurrentProcess() {
472 return impl->kernel.CurrentProcess(); 477 return impl->kernel.CurrentProcess();
473} 478}
474 479
480Core::DeviceMemory& System::DeviceMemory() {
481 return *impl->device_memory;
482}
483
484const Core::DeviceMemory& System::DeviceMemory() const {
485 return *impl->device_memory;
486}
487
475const Kernel::Process* System::CurrentProcess() const { 488const Kernel::Process* System::CurrentProcess() const {
476 return impl->kernel.CurrentProcess(); 489 return impl->kernel.CurrentProcess();
477} 490}
@@ -505,7 +518,7 @@ Memory::Memory& System::Memory() {
505 return impl->memory; 518 return impl->memory;
506} 519}
507 520
508const Memory::Memory& System::Memory() const { 521const Core::Memory::Memory& System::Memory() const {
509 return impl->memory; 522 return impl->memory;
510} 523}
511 524
diff --git a/src/core/core.h b/src/core/core.h
index 8d862a8e6..acc53d6a1 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -36,9 +36,10 @@ class AppLoader;
36enum class ResultStatus : u16; 36enum class ResultStatus : u16;
37} // namespace Loader 37} // namespace Loader
38 38
39namespace Memory { 39namespace Core::Memory {
40struct CheatEntry; 40struct CheatEntry;
41} // namespace Memory 41class Memory;
42} // namespace Core::Memory
42 43
43namespace Service { 44namespace Service {
44 45
@@ -86,14 +87,11 @@ namespace Core::Hardware {
86class InterruptManager; 87class InterruptManager;
87} 88}
88 89
89namespace Memory {
90class Memory;
91}
92
93namespace Core { 90namespace Core {
94 91
95class ARM_Interface; 92class ARM_Interface;
96class CoreManager; 93class CoreManager;
94class DeviceMemory;
97class ExclusiveMonitor; 95class ExclusiveMonitor;
98class FrameLimiter; 96class FrameLimiter;
99class PerfStats; 97class PerfStats;
@@ -230,10 +228,10 @@ public:
230 const ExclusiveMonitor& Monitor() const; 228 const ExclusiveMonitor& Monitor() const;
231 229
232 /// Gets a mutable reference to the system memory instance. 230 /// Gets a mutable reference to the system memory instance.
233 Memory::Memory& Memory(); 231 Core::Memory::Memory& Memory();
234 232
235 /// Gets a constant reference to the system memory instance. 233 /// Gets a constant reference to the system memory instance.
236 const Memory::Memory& Memory() const; 234 const Core::Memory::Memory& Memory() const;
237 235
238 /// Gets a mutable reference to the GPU interface 236 /// Gets a mutable reference to the GPU interface
239 Tegra::GPU& GPU(); 237 Tegra::GPU& GPU();
@@ -259,6 +257,12 @@ public:
259 /// Gets the global scheduler 257 /// Gets the global scheduler
260 const Kernel::GlobalScheduler& GlobalScheduler() const; 258 const Kernel::GlobalScheduler& GlobalScheduler() const;
261 259
260 /// Gets the manager for the guest device memory
261 Core::DeviceMemory& DeviceMemory();
262
263 /// Gets the manager for the guest device memory
264 const Core::DeviceMemory& DeviceMemory() const;
265
262 /// Provides a pointer to the current process 266 /// Provides a pointer to the current process
263 Kernel::Process* CurrentProcess(); 267 Kernel::Process* CurrentProcess();
264 268
diff --git a/src/core/core_manager.h b/src/core/core_manager.h
index b14e723d7..d525de00a 100644
--- a/src/core/core_manager.h
+++ b/src/core/core_manager.h
@@ -22,7 +22,7 @@ namespace Core::Timing {
22class CoreTiming; 22class CoreTiming;
23} 23}
24 24
25namespace Memory { 25namespace Core::Memory {
26class Memory; 26class Memory;
27} 27}
28 28
diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp
new file mode 100644
index 000000000..51097ced3
--- /dev/null
+++ b/src/core/device_memory.cpp
@@ -0,0 +1,15 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/core.h"
6#include "core/device_memory.h"
7#include "core/memory.h"
8
9namespace Core {
10
11DeviceMemory::DeviceMemory(System& system) : buffer{DramMemoryMap::Size}, system{system} {}
12
13DeviceMemory::~DeviceMemory() = default;
14
15} // namespace Core
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
new file mode 100644
index 000000000..9efa088d0
--- /dev/null
+++ b/src/core/device_memory.h
@@ -0,0 +1,51 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/assert.h"
8#include "common/common_funcs.h"
9#include "common/virtual_buffer.h"
10
11namespace Core {
12
13class System;
14
15namespace DramMemoryMap {
16enum : u64 {
17 Base = 0x80000000ULL,
18 Size = 0x100000000ULL,
19 End = Base + Size,
20 KernelReserveBase = Base + 0x60000,
21 SlabHeapBase = KernelReserveBase + 0x85000,
22 SlapHeapSize = 0xa21000,
23 SlabHeapEnd = SlabHeapBase + SlapHeapSize,
24};
25}; // namespace DramMemoryMap
26
27class DeviceMemory : NonCopyable {
28public:
29 explicit DeviceMemory(Core::System& system);
30 ~DeviceMemory();
31
32 template <typename T>
33 PAddr GetPhysicalAddr(const T* ptr) const {
34 return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) +
35 DramMemoryMap::Base;
36 }
37
38 u8* GetPointer(PAddr addr) {
39 return buffer.data() + (addr - DramMemoryMap::Base);
40 }
41
42 const u8* GetPointer(PAddr addr) const {
43 return buffer.data() + (addr - DramMemoryMap::Base);
44 }
45
46private:
47 Common::VirtualBuffer<u8> buffer;
48 Core::System& system;
49};
50
51} // namespace Core
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index e77e82b8d..b93aa6935 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -249,7 +249,7 @@ bool PatchManager::HasNSOPatch(const std::array<u8, 32>& build_id_) const {
249} 249}
250 250
251namespace { 251namespace {
252std::optional<std::vector<Memory::CheatEntry>> ReadCheatFileFromFolder( 252std::optional<std::vector<Core::Memory::CheatEntry>> ReadCheatFileFromFolder(
253 const Core::System& system, u64 title_id, const std::array<u8, 0x20>& build_id_, 253 const Core::System& system, u64 title_id, const std::array<u8, 0x20>& build_id_,
254 const VirtualDir& base_path, bool upper) { 254 const VirtualDir& base_path, bool upper) {
255 const auto build_id_raw = Common::HexToString(build_id_, upper); 255 const auto build_id_raw = Common::HexToString(build_id_, upper);
@@ -269,14 +269,14 @@ std::optional<std::vector<Memory::CheatEntry>> ReadCheatFileFromFolder(
269 return std::nullopt; 269 return std::nullopt;
270 } 270 }
271 271
272 Memory::TextCheatParser parser; 272 Core::Memory::TextCheatParser parser;
273 return parser.Parse( 273 return parser.Parse(
274 system, std::string_view(reinterpret_cast<const char* const>(data.data()), data.size())); 274 system, std::string_view(reinterpret_cast<const char* const>(data.data()), data.size()));
275} 275}
276 276
277} // Anonymous namespace 277} // Anonymous namespace
278 278
279std::vector<Memory::CheatEntry> PatchManager::CreateCheatList( 279std::vector<Core::Memory::CheatEntry> PatchManager::CreateCheatList(
280 const Core::System& system, const std::array<u8, 32>& build_id_) const { 280 const Core::System& system, const std::array<u8, 32>& build_id_) const {
281 const auto load_dir = system.GetFileSystemController().GetModificationLoadRoot(title_id); 281 const auto load_dir = system.GetFileSystemController().GetModificationLoadRoot(title_id);
282 if (load_dir == nullptr) { 282 if (load_dir == nullptr) {
@@ -289,7 +289,7 @@ std::vector<Memory::CheatEntry> PatchManager::CreateCheatList(
289 std::sort(patch_dirs.begin(), patch_dirs.end(), 289 std::sort(patch_dirs.begin(), patch_dirs.end(),
290 [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); }); 290 [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
291 291
292 std::vector<Memory::CheatEntry> out; 292 std::vector<Core::Memory::CheatEntry> out;
293 for (const auto& subdir : patch_dirs) { 293 for (const auto& subdir : patch_dirs) {
294 if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) { 294 if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) {
295 continue; 295 continue;
@@ -440,7 +440,8 @@ std::map<std::string, std::string, std::less<>> PatchManager::GetPatchVersionNam
440 // Game Updates 440 // Game Updates
441 const auto update_tid = GetUpdateTitleID(title_id); 441 const auto update_tid = GetUpdateTitleID(title_id);
442 PatchManager update{update_tid}; 442 PatchManager update{update_tid};
443 auto [nacp, discard_icon_file] = update.GetControlMetadata(); 443 const auto metadata = update.GetControlMetadata();
444 const auto& nacp = metadata.first;
444 445
445 const auto update_disabled = 446 const auto update_disabled =
446 std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); 447 std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h
index e857e6e82..ec6db524d 100644
--- a/src/core/file_sys/patch_manager.h
+++ b/src/core/file_sys/patch_manager.h
@@ -51,8 +51,8 @@ public:
51 bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const; 51 bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const;
52 52
53 // Creates a CheatList object with all 53 // Creates a CheatList object with all
54 std::vector<Memory::CheatEntry> CreateCheatList(const Core::System& system, 54 std::vector<Core::Memory::CheatEntry> CreateCheatList(
55 const std::array<u8, 0x20>& build_id) const; 55 const Core::System& system, const std::array<u8, 0x20>& build_id) const;
56 56
57 // Currently tracked RomFS patches: 57 // Currently tracked RomFS patches:
58 // - Game Updates 58 // - Game Updates
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index 6e9cf67ef..ba5f76288 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -591,14 +591,18 @@ InstallResult RegisteredCache::InstallEntry(const NSP& nsp, bool overwrite_if_ex
591InstallResult RegisteredCache::InstallEntry(const NCA& nca, TitleType type, 591InstallResult RegisteredCache::InstallEntry(const NCA& nca, TitleType type,
592 bool overwrite_if_exists, const VfsCopyFunction& copy) { 592 bool overwrite_if_exists, const VfsCopyFunction& copy) {
593 CNMTHeader header{ 593 CNMTHeader header{
594 nca.GetTitleId(), ///< Title ID 594 nca.GetTitleId(), // Title ID
595 0, ///< Ignore/Default title version 595 0, // Ignore/Default title version
596 type, ///< Type 596 type, // Type
597 {}, ///< Padding 597 {}, // Padding
598 0x10, ///< Default table offset 598 0x10, // Default table offset
599 1, ///< 1 Content Entry 599 1, // 1 Content Entry
600 0, ///< No Meta Entries 600 0, // No Meta Entries
601 {}, ///< Padding 601 {}, // Padding
602 {}, // Reserved 1
603 0, // Is committed
604 0, // Required download system version
605 {}, // Reserved 2
602 }; 606 };
603 OptionalHeader opt_header{0, 0}; 607 OptionalHeader opt_header{0, 0};
604 ContentRecord c_rec{{}, {}, {}, GetCRTypeFromNCAType(nca.GetType()), {}}; 608 ContentRecord c_rec{{}, {}, {}, GetCRTypeFromNCAType(nca.GetType()), {}};
@@ -848,7 +852,8 @@ VirtualFile ManualContentProvider::GetEntryUnparsed(u64 title_id, ContentRecordT
848VirtualFile ManualContentProvider::GetEntryRaw(u64 title_id, ContentRecordType type) const { 852VirtualFile ManualContentProvider::GetEntryRaw(u64 title_id, ContentRecordType type) const {
849 const auto iter = 853 const auto iter =
850 std::find_if(entries.begin(), entries.end(), [title_id, type](const auto& entry) { 854 std::find_if(entries.begin(), entries.end(), [title_id, type](const auto& entry) {
851 const auto [title_type, content_type, e_title_id] = entry.first; 855 const auto content_type = std::get<1>(entry.first);
856 const auto e_title_id = std::get<2>(entry.first);
852 return content_type == type && e_title_id == title_id; 857 return content_type == type && e_title_id == title_id;
853 }); 858 });
854 if (iter == entries.end()) 859 if (iter == entries.end())
diff --git a/src/core/file_sys/vfs_libzip.cpp b/src/core/file_sys/vfs_libzip.cpp
index 11d1978ea..d69952940 100644
--- a/src/core/file_sys/vfs_libzip.cpp
+++ b/src/core/file_sys/vfs_libzip.cpp
@@ -42,11 +42,11 @@ VirtualDir ExtractZIP(VirtualFile file) {
42 continue; 42 continue;
43 43
44 if (name.back() != '/') { 44 if (name.back() != '/') {
45 std::unique_ptr<zip_file_t, decltype(&zip_fclose)> file{ 45 std::unique_ptr<zip_file_t, decltype(&zip_fclose)> file2{
46 zip_fopen_index(zip.get(), i, 0), zip_fclose}; 46 zip_fopen_index(zip.get(), i, 0), zip_fclose};
47 47
48 std::vector<u8> buf(stat.size); 48 std::vector<u8> buf(stat.size);
49 if (zip_fread(file.get(), buf.data(), buf.size()) != buf.size()) 49 if (zip_fread(file2.get(), buf.data(), buf.size()) != s64(buf.size()))
50 return nullptr; 50 return nullptr;
51 51
52 const auto parts = FileUtil::SplitPathComponents(stat.name); 52 const auto parts = FileUtil::SplitPathComponents(stat.name);
diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp
index 68a0e0906..d0c43447c 100644
--- a/src/core/frontend/framebuffer_layout.cpp
+++ b/src/core/frontend/framebuffer_layout.cpp
@@ -25,7 +25,7 @@ FramebufferLayout DefaultFrameLayout(u32 width, u32 height) {
25 ASSERT(height > 0); 25 ASSERT(height > 0);
26 // The drawing code needs at least somewhat valid values for both screens 26 // The drawing code needs at least somewhat valid values for both screens
27 // so just calculate them both even if the other isn't showing. 27 // so just calculate them both even if the other isn't showing.
28 FramebufferLayout res{width, height}; 28 FramebufferLayout res{width, height, false, {}};
29 29
30 const float window_aspect_ratio = static_cast<float>(height) / width; 30 const float window_aspect_ratio = static_cast<float>(height) / width;
31 const float emulation_aspect_ratio = EmulationAspectRatio( 31 const float emulation_aspect_ratio = EmulationAspectRatio(
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index 6d15aeed9..2f15635c5 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -37,9 +37,9 @@
37#include "core/core.h" 37#include "core/core.h"
38#include "core/core_manager.h" 38#include "core/core_manager.h"
39#include "core/gdbstub/gdbstub.h" 39#include "core/gdbstub/gdbstub.h"
40#include "core/hle/kernel/memory/page_table.h"
40#include "core/hle/kernel/process.h" 41#include "core/hle/kernel/process.h"
41#include "core/hle/kernel/scheduler.h" 42#include "core/hle/kernel/scheduler.h"
42#include "core/hle/kernel/vm_manager.h"
43#include "core/loader/loader.h" 43#include "core/loader/loader.h"
44#include "core/memory.h" 44#include "core/memory.h"
45 45
@@ -643,7 +643,7 @@ static void HandleQuery() {
643 SendReply(target_xml); 643 SendReply(target_xml);
644 } else if (strncmp(query, "Offsets", strlen("Offsets")) == 0) { 644 } else if (strncmp(query, "Offsets", strlen("Offsets")) == 0) {
645 const VAddr base_address = 645 const VAddr base_address =
646 Core::System::GetInstance().CurrentProcess()->VMManager().GetCodeRegionBaseAddress(); 646 Core::System::GetInstance().CurrentProcess()->PageTable().GetCodeRegionStart();
647 std::string buffer = fmt::format("TextSeg={:0x}", base_address); 647 std::string buffer = fmt::format("TextSeg={:0x}", base_address);
648 SendReply(buffer.c_str()); 648 SendReply(buffer.c_str());
649 } else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) { 649 } else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) {
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index 6d66276bc..5ab204b9b 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -47,7 +47,8 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
47 return MakeResult(std::move(client_session)); 47 return MakeResult(std::move(client_session));
48} 48}
49 49
50ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { 50ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
51 Core::Memory::Memory& memory) {
51 // Keep ServerSession alive until we're done working with it. 52 // Keep ServerSession alive until we're done working with it.
52 if (!parent->Server()) { 53 if (!parent->Server()) {
53 return ERR_SESSION_CLOSED_BY_REMOTE; 54 return ERR_SESSION_CLOSED_BY_REMOTE;
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index d15b09554..c5f760d7d 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -12,7 +12,7 @@
12 12
13union ResultCode; 13union ResultCode;
14 14
15namespace Memory { 15namespace Core::Memory {
16class Memory; 16class Memory;
17} 17}
18 18
@@ -42,7 +42,7 @@ public:
42 return HANDLE_TYPE; 42 return HANDLE_TYPE;
43 } 43 }
44 44
45 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); 45 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
46 46
47 bool ShouldWait(const Thread* thread) const override; 47 bool ShouldWait(const Thread* thread) const override;
48 48
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index 8097b3863..29bfa3621 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -14,6 +14,7 @@ constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; 14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
15constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; 15constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
16constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; 16constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
17constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
17constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; 18constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
18constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; 19constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
19constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; 20constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index c558a2f33..d65dae3ae 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -284,17 +284,17 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
284 284
285std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { 285std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
286 std::vector<u8> buffer; 286 std::vector<u8> buffer;
287 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 287 const bool is_buffer_a{BufferDescriptorA().size() > std::size_t(buffer_index) &&
288 BufferDescriptorA()[buffer_index].Size()}; 288 BufferDescriptorA()[buffer_index].Size()};
289 auto& memory = Core::System::GetInstance().Memory(); 289 auto& memory = Core::System::GetInstance().Memory();
290 290
291 if (is_buffer_a) { 291 if (is_buffer_a) {
292 ASSERT_MSG(BufferDescriptorA().size() > buffer_index, 292 ASSERT_MSG(BufferDescriptorA().size() > std::size_t(buffer_index),
293 "BufferDescriptorA invalid buffer_index {}", buffer_index); 293 "BufferDescriptorA invalid buffer_index {}", buffer_index);
294 buffer.resize(BufferDescriptorA()[buffer_index].Size()); 294 buffer.resize(BufferDescriptorA()[buffer_index].Size());
295 memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); 295 memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
296 } else { 296 } else {
297 ASSERT_MSG(BufferDescriptorX().size() > buffer_index, 297 ASSERT_MSG(BufferDescriptorX().size() > std::size_t(buffer_index),
298 "BufferDescriptorX invalid buffer_index {}", buffer_index); 298 "BufferDescriptorX invalid buffer_index {}", buffer_index);
299 buffer.resize(BufferDescriptorX()[buffer_index].Size()); 299 buffer.resize(BufferDescriptorX()[buffer_index].Size());
300 memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); 300 memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
@@ -310,7 +310,7 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
310 return 0; 310 return 0;
311 } 311 }
312 312
313 const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && 313 const bool is_buffer_b{BufferDescriptorB().size() > std::size_t(buffer_index) &&
314 BufferDescriptorB()[buffer_index].Size()}; 314 BufferDescriptorB()[buffer_index].Size()};
315 const std::size_t buffer_size{GetWriteBufferSize(buffer_index)}; 315 const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
316 if (size > buffer_size) { 316 if (size > buffer_size) {
@@ -321,13 +321,13 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
321 321
322 auto& memory = Core::System::GetInstance().Memory(); 322 auto& memory = Core::System::GetInstance().Memory();
323 if (is_buffer_b) { 323 if (is_buffer_b) {
324 ASSERT_MSG(BufferDescriptorB().size() > buffer_index, 324 ASSERT_MSG(BufferDescriptorB().size() > std::size_t(buffer_index),
325 "BufferDescriptorB invalid buffer_index {}", buffer_index); 325 "BufferDescriptorB invalid buffer_index {}", buffer_index);
326 ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size, 326 ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size,
327 "BufferDescriptorB buffer_index {} is not large enough", buffer_index); 327 "BufferDescriptorB buffer_index {} is not large enough", buffer_index);
328 memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); 328 memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
329 } else { 329 } else {
330 ASSERT_MSG(BufferDescriptorC().size() > buffer_index, 330 ASSERT_MSG(BufferDescriptorC().size() > std::size_t(buffer_index),
331 "BufferDescriptorC invalid buffer_index {}", buffer_index); 331 "BufferDescriptorC invalid buffer_index {}", buffer_index);
332 ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size, 332 ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size,
333 "BufferDescriptorC buffer_index {} is not large enough", buffer_index); 333 "BufferDescriptorC buffer_index {} is not large enough", buffer_index);
@@ -338,16 +338,16 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
338} 338}
339 339
340std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { 340std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
341 const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && 341 const bool is_buffer_a{BufferDescriptorA().size() > std::size_t(buffer_index) &&
342 BufferDescriptorA()[buffer_index].Size()}; 342 BufferDescriptorA()[buffer_index].Size()};
343 if (is_buffer_a) { 343 if (is_buffer_a) {
344 ASSERT_MSG(BufferDescriptorA().size() > buffer_index, 344 ASSERT_MSG(BufferDescriptorA().size() > std::size_t(buffer_index),
345 "BufferDescriptorA invalid buffer_index {}", buffer_index); 345 "BufferDescriptorA invalid buffer_index {}", buffer_index);
346 ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0, 346 ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0,
347 "BufferDescriptorA buffer_index {} is empty", buffer_index); 347 "BufferDescriptorA buffer_index {} is empty", buffer_index);
348 return BufferDescriptorA()[buffer_index].Size(); 348 return BufferDescriptorA()[buffer_index].Size();
349 } else { 349 } else {
350 ASSERT_MSG(BufferDescriptorX().size() > buffer_index, 350 ASSERT_MSG(BufferDescriptorX().size() > std::size_t(buffer_index),
351 "BufferDescriptorX invalid buffer_index {}", buffer_index); 351 "BufferDescriptorX invalid buffer_index {}", buffer_index);
352 ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0, 352 ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0,
353 "BufferDescriptorX buffer_index {} is empty", buffer_index); 353 "BufferDescriptorX buffer_index {} is empty", buffer_index);
@@ -356,14 +356,14 @@ std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
356} 356}
357 357
358std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { 358std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
359 const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && 359 const bool is_buffer_b{BufferDescriptorB().size() > std::size_t(buffer_index) &&
360 BufferDescriptorB()[buffer_index].Size()}; 360 BufferDescriptorB()[buffer_index].Size()};
361 if (is_buffer_b) { 361 if (is_buffer_b) {
362 ASSERT_MSG(BufferDescriptorB().size() > buffer_index, 362 ASSERT_MSG(BufferDescriptorB().size() > std::size_t(buffer_index),
363 "BufferDescriptorB invalid buffer_index {}", buffer_index); 363 "BufferDescriptorB invalid buffer_index {}", buffer_index);
364 return BufferDescriptorB()[buffer_index].Size(); 364 return BufferDescriptorB()[buffer_index].Size();
365 } else { 365 } else {
366 ASSERT_MSG(BufferDescriptorC().size() > buffer_index, 366 ASSERT_MSG(BufferDescriptorC().size() > std::size_t(buffer_index),
367 "BufferDescriptorC invalid buffer_index {}", buffer_index); 367 "BufferDescriptorC invalid buffer_index {}", buffer_index);
368 return BufferDescriptorC()[buffer_index].Size(); 368 return BufferDescriptorC()[buffer_index].Size();
369 } 369 }
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index e47f1deed..7655382fa 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -18,15 +18,20 @@
18#include "core/core.h" 18#include "core/core.h"
19#include "core/core_timing.h" 19#include "core/core_timing.h"
20#include "core/core_timing_util.h" 20#include "core/core_timing_util.h"
21#include "core/device_memory.h"
21#include "core/hardware_properties.h" 22#include "core/hardware_properties.h"
22#include "core/hle/kernel/client_port.h" 23#include "core/hle/kernel/client_port.h"
23#include "core/hle/kernel/errors.h" 24#include "core/hle/kernel/errors.h"
24#include "core/hle/kernel/handle_table.h" 25#include "core/hle/kernel/handle_table.h"
25#include "core/hle/kernel/kernel.h" 26#include "core/hle/kernel/kernel.h"
27#include "core/hle/kernel/memory/memory_layout.h"
28#include "core/hle/kernel/memory/memory_manager.h"
29#include "core/hle/kernel/memory/slab_heap.h"
26#include "core/hle/kernel/physical_core.h" 30#include "core/hle/kernel/physical_core.h"
27#include "core/hle/kernel/process.h" 31#include "core/hle/kernel/process.h"
28#include "core/hle/kernel/resource_limit.h" 32#include "core/hle/kernel/resource_limit.h"
29#include "core/hle/kernel/scheduler.h" 33#include "core/hle/kernel/scheduler.h"
34#include "core/hle/kernel/shared_memory.h"
30#include "core/hle/kernel/synchronization.h" 35#include "core/hle/kernel/synchronization.h"
31#include "core/hle/kernel/thread.h" 36#include "core/hle/kernel/thread.h"
32#include "core/hle/kernel/time_manager.h" 37#include "core/hle/kernel/time_manager.h"
@@ -103,13 +108,14 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
103 108
104struct KernelCore::Impl { 109struct KernelCore::Impl {
105 explicit Impl(Core::System& system, KernelCore& kernel) 110 explicit Impl(Core::System& system, KernelCore& kernel)
106 : system{system}, global_scheduler{kernel}, synchronization{system}, time_manager{system} {} 111 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {}
107 112
108 void Initialize(KernelCore& kernel) { 113 void Initialize(KernelCore& kernel) {
109 Shutdown(); 114 Shutdown();
110 115
111 InitializePhysicalCores(); 116 InitializePhysicalCores();
112 InitializeSystemResourceLimit(kernel); 117 InitializeSystemResourceLimit(kernel);
118 InitializeMemoryLayout();
113 InitializeThreads(); 119 InitializeThreads();
114 InitializePreemption(); 120 InitializePreemption();
115 } 121 }
@@ -154,12 +160,17 @@ struct KernelCore::Impl {
154 system_resource_limit = ResourceLimit::Create(kernel); 160 system_resource_limit = ResourceLimit::Create(kernel);
155 161
156 // If setting the default system values fails, then something seriously wrong has occurred. 162 // If setting the default system values fails, then something seriously wrong has occurred.
157 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) 163 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000)
158 .IsSuccess()); 164 .IsSuccess());
159 ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess()); 165 ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess());
160 ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess()); 166 ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess());
161 ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess()); 167 ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess());
162 ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess()); 168 ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess());
169
170 if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) ||
171 !system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) {
172 UNREACHABLE();
173 }
163 } 174 }
164 175
165 void InitializeThreads() { 176 void InitializeThreads() {
@@ -237,6 +248,57 @@ struct KernelCore::Impl {
237 return result; 248 return result;
238 } 249 }
239 250
251 void InitializeMemoryLayout() {
252 // Initialize memory layout
253 constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()};
254 constexpr std::size_t hid_size{0x40000};
255 constexpr std::size_t font_size{0x1100000};
256 constexpr std::size_t irs_size{0x8000};
257 constexpr std::size_t time_size{0x1000};
258 constexpr PAddr hid_addr{layout.System().StartAddress()};
259 constexpr PAddr font_pa{layout.System().StartAddress() + hid_size};
260 constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size};
261 constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
262
263 // Initialize memory manager
264 memory_manager = std::make_unique<Memory::MemoryManager>();
265 memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application,
266 layout.Application().StartAddress(),
267 layout.Application().EndAddress());
268 memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet,
269 layout.Applet().StartAddress(),
270 layout.Applet().EndAddress());
271 memory_manager->InitializeManager(Memory::MemoryManager::Pool::System,
272 layout.System().StartAddress(),
273 layout.System().EndAddress());
274
275 hid_shared_mem = Kernel::SharedMemory::Create(
276 system.Kernel(), system.DeviceMemory(), nullptr,
277 {hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None,
278 Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory");
279 font_shared_mem = Kernel::SharedMemory::Create(
280 system.Kernel(), system.DeviceMemory(), nullptr,
281 {font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None,
282 Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory");
283 irs_shared_mem = Kernel::SharedMemory::Create(
284 system.Kernel(), system.DeviceMemory(), nullptr,
285 {irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None,
286 Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory");
287 time_shared_mem = Kernel::SharedMemory::Create(
288 system.Kernel(), system.DeviceMemory(), nullptr,
289 {time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None,
290 Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory");
291
292 // Allocate slab heaps
293 user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
294
295 // Initialize slab heaps
296 constexpr u64 user_slab_heap_size{0x3de000};
297 user_slab_heap_pages->Initialize(
298 system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
299 user_slab_heap_size);
300 }
301
240 std::atomic<u32> next_object_id{0}; 302 std::atomic<u32> next_object_id{0};
241 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; 303 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
242 std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; 304 std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
@@ -271,6 +333,16 @@ struct KernelCore::Impl {
271 std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads; 333 std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads;
272 std::mutex register_thread_mutex; 334 std::mutex register_thread_mutex;
273 335
336 // Kernel memory management
337 std::unique_ptr<Memory::MemoryManager> memory_manager;
338 std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages;
339
340 // Shared memory for services
341 std::shared_ptr<Kernel::SharedMemory> hid_shared_mem;
342 std::shared_ptr<Kernel::SharedMemory> font_shared_mem;
343 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
344 std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
345
274 // System context 346 // System context
275 Core::System& system; 347 Core::System& system;
276}; 348};
@@ -437,4 +509,52 @@ Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const {
437 return impl->GetCurrentEmuThreadID(); 509 return impl->GetCurrentEmuThreadID();
438} 510}
439 511
512Memory::MemoryManager& KernelCore::MemoryManager() {
513 return *impl->memory_manager;
514}
515
516const Memory::MemoryManager& KernelCore::MemoryManager() const {
517 return *impl->memory_manager;
518}
519
520Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() {
521 return *impl->user_slab_heap_pages;
522}
523
524const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const {
525 return *impl->user_slab_heap_pages;
526}
527
528Kernel::SharedMemory& KernelCore::GetHidSharedMem() {
529 return *impl->hid_shared_mem;
530}
531
532const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const {
533 return *impl->hid_shared_mem;
534}
535
536Kernel::SharedMemory& KernelCore::GetFontSharedMem() {
537 return *impl->font_shared_mem;
538}
539
540const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const {
541 return *impl->font_shared_mem;
542}
543
544Kernel::SharedMemory& KernelCore::GetIrsSharedMem() {
545 return *impl->irs_shared_mem;
546}
547
548const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const {
549 return *impl->irs_shared_mem;
550}
551
552Kernel::SharedMemory& KernelCore::GetTimeSharedMem() {
553 return *impl->time_shared_mem;
554}
555
556const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
557 return *impl->time_shared_mem;
558}
559
440} // namespace Kernel 560} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index c4f78ab71..83de1f542 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -8,6 +8,7 @@
8#include <string> 8#include <string>
9#include <unordered_map> 9#include <unordered_map>
10#include <vector> 10#include <vector>
11#include "core/hle/kernel/memory/memory_types.h"
11#include "core/hle/kernel/object.h" 12#include "core/hle/kernel/object.h"
12 13
13namespace Core { 14namespace Core {
@@ -23,6 +24,12 @@ struct EventType;
23 24
24namespace Kernel { 25namespace Kernel {
25 26
27namespace Memory {
28class MemoryManager;
29template <typename T>
30class SlabHeap;
31} // namespace Memory
32
26class AddressArbiter; 33class AddressArbiter;
27class ClientPort; 34class ClientPort;
28class GlobalScheduler; 35class GlobalScheduler;
@@ -31,6 +38,7 @@ class PhysicalCore;
31class Process; 38class Process;
32class ResourceLimit; 39class ResourceLimit;
33class Scheduler; 40class Scheduler;
41class SharedMemory;
34class Synchronization; 42class Synchronization;
35class Thread; 43class Thread;
36class TimeManager; 44class TimeManager;
@@ -147,6 +155,42 @@ public:
147 /// Register the current thread as a non CPU core thread. 155 /// Register the current thread as a non CPU core thread.
148 void RegisterHostThread(); 156 void RegisterHostThread();
149 157
158 /// Gets the virtual memory manager for the kernel.
159 Memory::MemoryManager& MemoryManager();
160
161 /// Gets the virtual memory manager for the kernel.
162 const Memory::MemoryManager& MemoryManager() const;
163
164 /// Gets the slab heap allocated for user space pages.
165 Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages();
166
167 /// Gets the slab heap allocated for user space pages.
168 const Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages() const;
169
170 /// Gets the shared memory object for HID services.
171 Kernel::SharedMemory& GetHidSharedMem();
172
173 /// Gets the shared memory object for HID services.
174 const Kernel::SharedMemory& GetHidSharedMem() const;
175
176 /// Gets the shared memory object for font services.
177 Kernel::SharedMemory& GetFontSharedMem();
178
179 /// Gets the shared memory object for font services.
180 const Kernel::SharedMemory& GetFontSharedMem() const;
181
182 /// Gets the shared memory object for IRS services.
183 Kernel::SharedMemory& GetIrsSharedMem();
184
185 /// Gets the shared memory object for IRS services.
186 const Kernel::SharedMemory& GetIrsSharedMem() const;
187
188 /// Gets the shared memory object for Time services.
189 Kernel::SharedMemory& GetTimeSharedMem();
190
191 /// Gets the shared memory object for Time services.
192 const Kernel::SharedMemory& GetTimeSharedMem() const;
193
150private: 194private:
151 friend class Object; 195 friend class Object;
152 friend class Process; 196 friend class Process;
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
new file mode 100644
index 000000000..27fae05e7
--- /dev/null
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -0,0 +1,118 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphère, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
7
8#include <array>
9
10#include "common/assert.h"
11#include "core/hle/kernel/memory/address_space_info.h"
12
13namespace Kernel::Memory {
14
15namespace {
16
17enum : u64 {
18 Size_1_MB = 0x100000,
19 Size_2_MB = 2 * Size_1_MB,
20 Size_128_MB = 128 * Size_1_MB,
21 Size_1_GB = 0x40000000,
22 Size_2_GB = 2 * Size_1_GB,
23 Size_4_GB = 4 * Size_1_GB,
24 Size_6_GB = 6 * Size_1_GB,
25 Size_64_GB = 64 * Size_1_GB,
26 Size_512_GB = 512 * Size_1_GB,
27 Invalid = std::numeric_limits<u64>::max(),
28};
29
30// clang-format off
31constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
32 { 32 /*bit_width*/, Size_2_MB /*addr*/, Size_1_GB - Size_2_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, },
33 { 32 /*bit_width*/, Size_1_GB /*addr*/, Size_4_GB - Size_1_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, },
34 { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Heap, },
35 { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Alias, },
36 { 36 /*bit_width*/, Size_128_MB /*addr*/, Size_2_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, },
37 { 36 /*bit_width*/, Size_2_GB /*addr*/, Size_64_GB - Size_2_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, },
38 { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, },
39 { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Alias, },
40 { 39 /*bit_width*/, Size_128_MB /*addr*/, Size_512_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Large64Bit, },
41 { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Is32Bit },
42 { 39 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, },
43 { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Alias, },
44 { 39 /*bit_width*/, Invalid /*addr*/, Size_2_GB /*size*/, AddressSpaceInfo::Type::Stack, },
45}};
46// clang-format on
47
48constexpr bool IsAllowedIndexForAddress(std::size_t index) {
49 return index < std::size(AddressSpaceInfos) && AddressSpaceInfos[index].GetAddress() != Invalid;
50}
51
52constexpr std::size_t
53 AddressSpaceIndices32Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{
54 0, 1, 0, 2, 0, 3,
55 };
56
57constexpr std::size_t
58 AddressSpaceIndices36Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{
59 4, 5, 4, 6, 4, 7,
60 };
61
62constexpr std::size_t
63 AddressSpaceIndices39Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{
64 9, 8, 8, 10, 12, 11,
65 };
66
67constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
68 return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
69 type != AddressSpaceInfo::Type::Stack;
70}
71
72constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
73 return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
74 type != AddressSpaceInfo::Type::Stack;
75}
76
77constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
78 return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
79}
80
81} // namespace
82
83u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, AddressSpaceInfo::Type type) {
84 const std::size_t index{static_cast<std::size_t>(type)};
85 switch (width) {
86 case 32:
87 ASSERT(IsAllowed32BitType(type));
88 ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
89 return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetAddress();
90 case 36:
91 ASSERT(IsAllowed36BitType(type));
92 ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
93 return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetAddress();
94 case 39:
95 ASSERT(IsAllowed39BitType(type));
96 ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
97 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetAddress();
98 }
99 UNREACHABLE();
100}
101
102std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, AddressSpaceInfo::Type type) {
103 const std::size_t index{static_cast<std::size_t>(type)};
104 switch (width) {
105 case 32:
106 ASSERT(IsAllowed32BitType(type));
107 return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetSize();
108 case 36:
109 ASSERT(IsAllowed36BitType(type));
110 return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetSize();
111 case 39:
112 ASSERT(IsAllowed39BitType(type));
113 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetSize();
114 }
115 UNREACHABLE();
116}
117
118} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/address_space_info.h b/src/core/hle/kernel/memory/address_space_info.h
new file mode 100644
index 000000000..cc9a6421e
--- /dev/null
+++ b/src/core/hle/kernel/memory/address_space_info.h
@@ -0,0 +1,54 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphère, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
7
8#pragma once
9
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12
13namespace Kernel::Memory {
14
15class AddressSpaceInfo final : NonCopyable {
16public:
17 enum class Type : u32 {
18 Is32Bit = 0,
19 Small64Bit = 1,
20 Large64Bit = 2,
21 Heap = 3,
22 Stack = 4,
23 Alias = 5,
24 Count,
25 };
26
27private:
28 std::size_t bit_width{};
29 std::size_t addr{};
30 std::size_t size{};
31 Type type{};
32
33public:
34 static u64 GetAddressSpaceStart(std::size_t width, Type type);
35 static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
36
37 constexpr AddressSpaceInfo(std::size_t bit_width, std::size_t addr, std::size_t size, Type type)
38 : bit_width{bit_width}, addr{addr}, size{size}, type{type} {}
39
40 constexpr std::size_t GetWidth() const {
41 return bit_width;
42 }
43 constexpr std::size_t GetAddress() const {
44 return addr;
45 }
46 constexpr std::size_t GetSize() const {
47 return size;
48 }
49 constexpr Type GetType() const {
50 return type;
51 }
52};
53
54} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h
new file mode 100644
index 000000000..e11043b60
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_block.h
@@ -0,0 +1,318 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphère, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
7
8#pragma once
9
10#include "common/alignment.h"
11#include "common/assert.h"
12#include "common/common_types.h"
13#include "core/hle/kernel/memory/memory_types.h"
14#include "core/hle/kernel/svc_types.h"
15
16namespace Kernel::Memory {
17
18enum class MemoryState : u32 {
19 None = 0,
20 Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF
21 All = ~None,
22
23 FlagCanReprotect = (1 << 8),
24 FlagCanDebug = (1 << 9),
25 FlagCanUseIpc = (1 << 10),
26 FlagCanUseNonDeviceIpc = (1 << 11),
27 FlagCanUseNonSecureIpc = (1 << 12),
28 FlagMapped = (1 << 13),
29 FlagCode = (1 << 14),
30 FlagCanAlias = (1 << 15),
31 FlagCanCodeAlias = (1 << 16),
32 FlagCanTransfer = (1 << 17),
33 FlagCanQueryPhysical = (1 << 18),
34 FlagCanDeviceMap = (1 << 19),
35 FlagCanAlignedDeviceMap = (1 << 20),
36 FlagCanIpcUserBuffer = (1 << 21),
37 FlagReferenceCounted = (1 << 22),
38 FlagCanMapProcess = (1 << 23),
39 FlagCanChangeAttribute = (1 << 24),
40 FlagCanCodeMemory = (1 << 25),
41
42 FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
43 FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
44 FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
45 FlagReferenceCounted | FlagCanChangeAttribute,
46
47 FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
48 FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
49 FlagCanAlignedDeviceMap | FlagReferenceCounted,
50
51 FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
52
53 Free = static_cast<u32>(Svc::MemoryState::Free),
54 Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
55 Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
56 Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
57 CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
58 FlagCanCodeMemory,
59 Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
60 Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
61
62 AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
63 FlagCanCodeAlias,
64 AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
65 FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
66
67 Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
68 FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
69
70 Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
71 FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
72
73 ThreadLocal =
74 static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
75
76 Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
77 FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
78 FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
79
80 SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
81 FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
82
83 SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
84 FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
85
86 Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
87
88 NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc |
89 FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
90
91 NonDeviceIpc =
92 static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
93
94 Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
95
96 GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
97 FlagReferenceCounted | FlagCanDebug,
98 CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
99};
100DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
101
102static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
103static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
104static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
105static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
106static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
107static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
108static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
109static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
110static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
111static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
112static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
113static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
114static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
115static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
116static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
117static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
118static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
119static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
120static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
121static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
122static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
123
124enum class MemoryPermission : u8 {
125 None = 0,
126 Mask = static_cast<u8>(~None),
127
128 Read = 1 << 0,
129 Write = 1 << 1,
130 Execute = 1 << 2,
131
132 ReadAndWrite = Read | Write,
133 ReadAndExecute = Read | Execute,
134
135 UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
136 Svc::MemoryPermission::Execute),
137};
138DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
139
140enum class MemoryAttribute : u8 {
141 None = 0x00,
142 Mask = 0x7F,
143 All = Mask,
144 DontCareMask = 0x80,
145
146 Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
147 IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
148 DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
149 Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
150
151 IpcAndDeviceMapped = IpcLocked | DeviceShared,
152 LockedAndIpcLocked = Locked | IpcLocked,
153 DeviceSharedAndUncached = DeviceShared | Uncached
154};
155DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
156
157static_assert((static_cast<u8>(MemoryAttribute::Mask) &
158 static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
159
160struct MemoryInfo {
161 VAddr addr{};
162 std::size_t size{};
163 MemoryState state{};
164 MemoryPermission perm{};
165 MemoryAttribute attribute{};
166 MemoryPermission original_perm{};
167 u16 ipc_lock_count{};
168 u16 device_use_count{};
169
170 constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
171 return {
172 addr,
173 size,
174 static_cast<Svc::MemoryState>(state & MemoryState::Mask),
175 static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
176 static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
177 ipc_lock_count,
178 device_use_count,
179 };
180 }
181
182 constexpr VAddr GetAddress() const {
183 return addr;
184 }
185 constexpr std::size_t GetSize() const {
186 return size;
187 }
188 constexpr std::size_t GetNumPages() const {
189 return GetSize() / PageSize;
190 }
191 constexpr VAddr GetEndAddress() const {
192 return GetAddress() + GetSize();
193 }
194 constexpr VAddr GetLastAddress() const {
195 return GetEndAddress() - 1;
196 }
197};
198
199class MemoryBlock final {
200 friend class MemoryBlockManager;
201
202private:
203 VAddr addr{};
204 std::size_t num_pages{};
205 MemoryState state{MemoryState::None};
206 u16 ipc_lock_count{};
207 u16 device_use_count{};
208 MemoryPermission perm{MemoryPermission::None};
209 MemoryPermission original_perm{MemoryPermission::None};
210 MemoryAttribute attribute{MemoryAttribute::None};
211
212public:
213 static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
214 if (lhs.GetAddress() < rhs.GetAddress()) {
215 return -1;
216 } else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
217 return 0;
218 } else {
219 return 1;
220 }
221 }
222
223public:
224 constexpr MemoryBlock() = default;
225 constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state,
226 MemoryPermission perm, MemoryAttribute attribute)
227 : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {}
228
229 constexpr VAddr GetAddress() const {
230 return addr;
231 }
232
233 constexpr std::size_t GetNumPages() const {
234 return num_pages;
235 }
236
237 constexpr std::size_t GetSize() const {
238 return GetNumPages() * PageSize;
239 }
240
241 constexpr VAddr GetEndAddress() const {
242 return GetAddress() + GetSize();
243 }
244
245 constexpr VAddr GetLastAddress() const {
246 return GetEndAddress() - 1;
247 }
248
249 constexpr MemoryInfo GetMemoryInfo() const {
250 return {
251 GetAddress(), GetSize(), state, perm,
252 attribute, original_perm, ipc_lock_count, device_use_count,
253 };
254 }
255
256private:
257 constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
258 constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
259 MemoryAttribute::IpcLocked |
260 MemoryAttribute::DeviceShared};
261 return state == s && perm == p &&
262 (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
263 }
264
265 constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
266 return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
267 attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
268 device_use_count == rhs.device_use_count;
269 }
270
271 constexpr bool Contains(VAddr start) const {
272 return GetAddress() <= start && start <= GetEndAddress();
273 }
274
275 constexpr void Add(std::size_t count) {
276 ASSERT(count > 0);
277 ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
278
279 num_pages += count;
280 }
281
282 constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
283 MemoryAttribute new_attribute) {
284 ASSERT(original_perm == MemoryPermission::None);
285 ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
286
287 state = new_state;
288 perm = new_perm;
289
290 // TODO(bunnei): Is this right?
291 attribute = static_cast<MemoryAttribute>(
292 new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/);
293 }
294
295 constexpr MemoryBlock Split(VAddr split_addr) {
296 ASSERT(GetAddress() < split_addr);
297 ASSERT(Contains(split_addr));
298 ASSERT(Common::IsAligned(split_addr, PageSize));
299
300 MemoryBlock block;
301 block.addr = addr;
302 block.num_pages = (split_addr - GetAddress()) / PageSize;
303 block.state = state;
304 block.ipc_lock_count = ipc_lock_count;
305 block.device_use_count = device_use_count;
306 block.perm = perm;
307 block.original_perm = original_perm;
308 block.attribute = attribute;
309
310 addr = split_addr;
311 num_pages -= block.num_pages;
312
313 return block;
314 }
315};
316static_assert(std::is_trivially_destructible<MemoryBlock>::value);
317
318} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_block_manager.cpp b/src/core/hle/kernel/memory/memory_block_manager.cpp
new file mode 100644
index 000000000..1ebc126c0
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_block_manager.cpp
@@ -0,0 +1,190 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/memory/memory_block_manager.h"
6#include "core/hle/kernel/memory/memory_types.h"
7
8namespace Kernel::Memory {
9
10MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
11 : start_addr{start_addr}, end_addr{end_addr} {
12 const u64 num_pages{(end_addr - start_addr) / PageSize};
13 memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
14 MemoryAttribute::None);
15}
16
17MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
18 auto node{memory_block_tree.begin()};
19 while (node != end()) {
20 const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
21 if (node->GetAddress() <= addr && end_addr - 1 >= addr) {
22 return node;
23 }
24 node = std::next(node);
25 }
26 return end();
27}
28
29VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
30 std::size_t num_pages, std::size_t align, std::size_t offset,
31 std::size_t guard_pages) {
32 if (num_pages == 0) {
33 return {};
34 }
35
36 const VAddr region_end{region_start + region_num_pages * PageSize};
37 const VAddr region_last{region_end - 1};
38 for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
39 const auto info{it->GetMemoryInfo()};
40 if (region_last < info.GetAddress()) {
41 break;
42 }
43
44 if (info.state != MemoryState::Free) {
45 continue;
46 }
47
48 VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
49 area += guard_pages * PageSize;
50
51 const VAddr offset_area{Common::AlignDown(area, align) + offset};
52 area = (area <= offset_area) ? offset_area : offset_area + align;
53
54 const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
55 const VAddr area_last{area_end - 1};
56
57 if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
58 area_last <= info.GetLastAddress()) {
59 return area;
60 }
61 }
62
63 return {};
64}
65
66void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
67 MemoryPermission prev_perm, MemoryAttribute prev_attribute,
68 MemoryState state, MemoryPermission perm,
69 MemoryAttribute attribute) {
70 const std::size_t prev_count{memory_block_tree.size()};
71 const VAddr end_addr{addr + num_pages * PageSize};
72 iterator node{memory_block_tree.begin()};
73
74 prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
75
76 while (node != memory_block_tree.end()) {
77 MemoryBlock* block{&(*node)};
78 iterator next_node{std::next(node)};
79 const VAddr cur_addr{block->GetAddress()};
80 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
81
82 if (addr < cur_end_addr && cur_addr < end_addr) {
83 if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
84 node = next_node;
85 continue;
86 }
87
88 iterator new_node{node};
89 if (addr > cur_addr) {
90 memory_block_tree.insert(node, block->Split(addr));
91 }
92
93 if (end_addr < cur_end_addr) {
94 new_node = memory_block_tree.insert(node, block->Split(end_addr));
95 }
96
97 new_node->Update(state, perm, attribute);
98
99 MergeAdjacent(new_node, next_node);
100 }
101
102 if (cur_end_addr - 1 >= end_addr - 1) {
103 break;
104 }
105
106 node = next_node;
107 }
108}
109
110void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
111 MemoryPermission perm, MemoryAttribute attribute) {
112 const std::size_t prev_count{memory_block_tree.size()};
113 const VAddr end_addr{addr + num_pages * PageSize};
114 iterator node{memory_block_tree.begin()};
115
116 while (node != memory_block_tree.end()) {
117 MemoryBlock* block{&(*node)};
118 iterator next_node{std::next(node)};
119 const VAddr cur_addr{block->GetAddress()};
120 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
121
122 if (addr < cur_end_addr && cur_addr < end_addr) {
123 iterator new_node{node};
124
125 if (addr > cur_addr) {
126 memory_block_tree.insert(node, block->Split(addr));
127 }
128
129 if (end_addr < cur_end_addr) {
130 new_node = memory_block_tree.insert(node, block->Split(end_addr));
131 }
132
133 new_node->Update(state, perm, attribute);
134
135 MergeAdjacent(new_node, next_node);
136 }
137
138 if (cur_end_addr - 1 >= end_addr - 1) {
139 break;
140 }
141
142 node = next_node;
143 }
144}
145
146void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
147 const_iterator it{FindIterator(start)};
148 MemoryInfo info{};
149 do {
150 info = it->GetMemoryInfo();
151 func(info);
152 it = std::next(it);
153 } while (info.addr + info.size - 1 < end - 1 && it != cend());
154}
155
156void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
157 MemoryBlock* block{&(*it)};
158
159 auto EraseIt = [&](const iterator it_to_erase) {
160 if (next_it == it_to_erase) {
161 next_it = std::next(next_it);
162 }
163 memory_block_tree.erase(it_to_erase);
164 };
165
166 if (it != memory_block_tree.begin()) {
167 MemoryBlock* prev{&(*std::prev(it))};
168
169 if (block->HasSameProperties(*prev)) {
170 const iterator prev_it{std::prev(it)};
171
172 prev->Add(block->GetNumPages());
173 EraseIt(it);
174
175 it = prev_it;
176 block = prev;
177 }
178 }
179
180 if (it != cend()) {
181 const MemoryBlock* const next{&(*std::next(it))};
182
183 if (block->HasSameProperties(*next)) {
184 block->Add(next->GetNumPages());
185 EraseIt(std::next(it));
186 }
187 }
188}
189
190} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h
new file mode 100644
index 000000000..0f2270f0f
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_block_manager.h
@@ -0,0 +1,64 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <functional>
8#include <list>
9#include <memory>
10
11#include "common/common_types.h"
12#include "core/hle/kernel/memory/memory_block.h"
13
14namespace Kernel::Memory {
15
16class MemoryBlockManager final {
17public:
18 using MemoryBlockTree = std::list<MemoryBlock>;
19 using iterator = MemoryBlockTree::iterator;
20 using const_iterator = MemoryBlockTree::const_iterator;
21
22public:
23 MemoryBlockManager(VAddr start_addr, VAddr end_addr);
24
25 iterator end() {
26 return memory_block_tree.end();
27 }
28 const_iterator end() const {
29 return memory_block_tree.end();
30 }
31 const_iterator cend() const {
32 return memory_block_tree.cend();
33 }
34
35 iterator FindIterator(VAddr addr);
36
37 VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
38 std::size_t align, std::size_t offset, std::size_t guard_pages);
39
40 void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
41 MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
42 MemoryPermission perm, MemoryAttribute attribute);
43
44 void Update(VAddr addr, std::size_t num_pages, MemoryState state,
45 MemoryPermission perm = MemoryPermission::None,
46 MemoryAttribute attribute = MemoryAttribute::None);
47
48 using IterateFunc = std::function<void(const MemoryInfo&)>;
49 void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
50
51 MemoryBlock& FindBlock(VAddr addr) {
52 return *FindIterator(addr);
53 }
54
55private:
56 void MergeAdjacent(iterator it, iterator& next_it);
57
58 const VAddr start_addr;
59 const VAddr end_addr;
60
61 MemoryBlockTree memory_block_tree;
62};
63
64} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h
new file mode 100644
index 000000000..830c6f0d7
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_layout.h
@@ -0,0 +1,73 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9namespace Kernel::Memory {
10
11class MemoryRegion final {
12 friend class MemoryLayout;
13
14public:
15 constexpr PAddr StartAddress() const {
16 return start_address;
17 }
18
19 constexpr PAddr EndAddress() const {
20 return end_address;
21 }
22
23private:
24 constexpr MemoryRegion() = default;
25 constexpr MemoryRegion(PAddr start_address, PAddr end_address)
26 : start_address{start_address}, end_address{end_address} {}
27
28 const PAddr start_address{};
29 const PAddr end_address{};
30};
31
32class MemoryLayout final {
33public:
34 constexpr const MemoryRegion& Application() const {
35 return application;
36 }
37
38 constexpr const MemoryRegion& Applet() const {
39 return applet;
40 }
41
42 constexpr const MemoryRegion& System() const {
43 return system;
44 }
45
46 static constexpr MemoryLayout GetDefaultLayout() {
47 constexpr std::size_t application_size{0xcd500000};
48 constexpr std::size_t applet_size{0x1fb00000};
49 constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
50 constexpr PAddr application_end_address{Core::DramMemoryMap::End};
51 constexpr PAddr applet_start_address{application_start_address - applet_size};
52 constexpr PAddr applet_end_address{applet_start_address + applet_size};
53 constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
54 constexpr PAddr system_end_address{applet_start_address};
55 return {application_start_address, application_end_address, applet_start_address,
56 applet_end_address, system_start_address, system_end_address};
57 }
58
59private:
60 constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
61 PAddr applet_start_address, std::size_t applet_size,
62 PAddr system_start_address, std::size_t system_size)
63 : application{application_start_address, application_size},
64 applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
65
66 const MemoryRegion application;
67 const MemoryRegion applet;
68 const MemoryRegion system;
69
70 const PAddr start_address{};
71};
72
73} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
new file mode 100644
index 000000000..3cd4f9e85
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -0,0 +1,176 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6
7#include "common/alignment.h"
8#include "common/assert.h"
9#include "common/common_types.h"
10#include "common/scope_exit.h"
11#include "core/hle/kernel/errors.h"
12#include "core/hle/kernel/memory/memory_manager.h"
13#include "core/hle/kernel/memory/page_linked_list.h"
14
15namespace Kernel::Memory {
16
17std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
18 const auto size{end_address - start_address};
19
20 // Calculate metadata sizes
21 const auto ref_count_size{(size / PageSize) * sizeof(u16)};
22 const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
23 const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
24 const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
25 const auto total_metadata_size{manager_size + page_heap_size};
26 ASSERT(manager_size <= total_metadata_size);
27 ASSERT(Common::IsAligned(total_metadata_size, PageSize));
28
29 // Setup region
30 pool = new_pool;
31
32 // Initialize the manager's KPageHeap
33 heap.Initialize(start_address, size, page_heap_size);
34
35 // Free the memory to the heap
36 heap.Free(start_address, size / PageSize);
37
38 // Update the heap's used size
39 heap.UpdateUsedSize();
40
41 return total_metadata_size;
42}
43
44void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
45 ASSERT(pool < Pool::Count);
46 managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
47}
48
49VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
50 Direction dir) {
51 // Early return if we're allocating no pages
52 if (num_pages == 0) {
53 return {};
54 }
55
56 // Lock the pool that we're allocating from
57 const auto pool_index{static_cast<std::size_t>(pool)};
58 std::lock_guard lock{pool_locks[pool_index]};
59
60 // Choose a heap based on our page size request
61 const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
62
63 // Loop, trying to iterate from each block
64 // TODO (bunnei): Support multiple managers
65 Impl& chosen_manager{managers[pool_index]};
66 VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
67
68 // If we failed to allocate, quit now
69 if (!allocated_block) {
70 return {};
71 }
72
73 // If we allocated more than we need, free some
74 const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
75 if (allocated_pages > num_pages) {
76 chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
77 }
78
79 return allocated_block;
80}
81
82ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
83 Direction dir) {
84 ASSERT(page_list.GetNumPages() == 0);
85
86 // Early return if we're allocating no pages
87 if (num_pages == 0) {
88 return RESULT_SUCCESS;
89 }
90
91 // Lock the pool that we're allocating from
92 const auto pool_index{static_cast<std::size_t>(pool)};
93 std::lock_guard lock{pool_locks[pool_index]};
94
95 // Choose a heap based on our page size request
96 const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
97 if (heap_index < 0) {
98 return ERR_OUT_OF_MEMORY;
99 }
100
101 // TODO (bunnei): Support multiple managers
102 Impl& chosen_manager{managers[pool_index]};
103
104 // Ensure that we don't leave anything un-freed
105 auto group_guard = detail::ScopeExit([&] {
106 for (const auto& it : page_list.Nodes()) {
107 const auto num_pages{std::min(
108 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
109 chosen_manager.Free(it.GetAddress(), num_pages);
110 }
111 });
112
113 // Keep allocating until we've allocated all our pages
114 for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
115 const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
116
117 while (num_pages >= pages_per_alloc) {
118 // Allocate a block
119 VAddr allocated_block{chosen_manager.AllocateBlock(index)};
120 if (!allocated_block) {
121 break;
122 }
123
124 // Safely add it to our group
125 {
126 auto block_guard = detail::ScopeExit(
127 [&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
128
129 if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
130 result.IsError()) {
131 return result;
132 }
133
134 block_guard.Cancel();
135 }
136
137 num_pages -= pages_per_alloc;
138 }
139 }
140
141 // Only succeed if we allocated as many pages as we wanted
142 ASSERT(num_pages >= 0);
143 if (num_pages) {
144 return ERR_OUT_OF_MEMORY;
145 }
146
147 // We succeeded!
148 group_guard.Cancel();
149 return RESULT_SUCCESS;
150}
151
152ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
153 Direction dir) {
154 // Early return if we're freeing no pages
155 if (!num_pages) {
156 return RESULT_SUCCESS;
157 }
158
159 // Lock the pool that we're freeing from
160 const auto pool_index{static_cast<std::size_t>(pool)};
161 std::lock_guard lock{pool_locks[pool_index]};
162
163 // TODO (bunnei): Support multiple managers
164 Impl& chosen_manager{managers[pool_index]};
165
166 // Free all of the pages
167 for (const auto& it : page_list.Nodes()) {
168 const auto num_pages{std::min(
169 it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
170 chosen_manager.Free(it.GetAddress(), num_pages);
171 }
172
173 return RESULT_SUCCESS;
174}
175
176} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_manager.h b/src/core/hle/kernel/memory/memory_manager.h
new file mode 100644
index 000000000..b078d7a5e
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_manager.h
@@ -0,0 +1,97 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <mutex>
9
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "core/hle/kernel/memory/page_heap.h"
13#include "core/hle/result.h"
14
15namespace Kernel::Memory {
16
17class PageLinkedList;
18
19class MemoryManager final : NonCopyable {
20public:
21 enum class Pool : u32 {
22 Application = 0,
23 Applet = 1,
24 System = 2,
25 SystemNonSecure = 3,
26
27 Count,
28
29 Shift = 4,
30 Mask = (0xF << Shift),
31 };
32
33 enum class Direction : u32 {
34 FromFront = 0,
35 FromBack = 1,
36
37 Shift = 0,
38 Mask = (0xF << Shift),
39 };
40
41 MemoryManager() = default;
42
43 constexpr std::size_t GetSize(Pool pool) const {
44 return managers[static_cast<std::size_t>(pool)].GetSize();
45 }
46
47 void InitializeManager(Pool pool, u64 start_address, u64 end_address);
48 VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
49 Direction dir = Direction::FromFront);
50 ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
51 Direction dir = Direction::FromFront);
52 ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
53 Direction dir = Direction::FromFront);
54
55 static constexpr std::size_t MaxManagerCount = 10;
56
57private:
58 class Impl final : NonCopyable {
59 private:
60 using RefCount = u16;
61
62 private:
63 PageHeap heap;
64 Pool pool{};
65
66 public:
67 Impl() = default;
68
69 std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
70
71 VAddr AllocateBlock(s32 index) {
72 return heap.AllocateBlock(index);
73 }
74
75 void Free(VAddr addr, std::size_t num_pages) {
76 heap.Free(addr, num_pages);
77 }
78
79 constexpr std::size_t GetSize() const {
80 return heap.GetSize();
81 }
82
83 constexpr VAddr GetAddress() const {
84 return heap.GetAddress();
85 }
86
87 constexpr VAddr GetEndAddress() const {
88 return heap.GetEndAddress();
89 }
90 };
91
92private:
93 std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
94 std::array<Impl, MaxManagerCount> managers;
95};
96
97} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_types.h b/src/core/hle/kernel/memory/memory_types.h
new file mode 100644
index 000000000..a75bf77c0
--- /dev/null
+++ b/src/core/hle/kernel/memory/memory_types.h
@@ -0,0 +1,18 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/common_types.h"
10
11namespace Kernel::Memory {
12
13constexpr std::size_t PageBits{12};
14constexpr std::size_t PageSize{1 << PageBits};
15
16using Page = std::array<u8, PageSize>;
17
18} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp
new file mode 100644
index 000000000..efcbb3cad
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_heap.cpp
@@ -0,0 +1,119 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphère, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
7
8#include "core/core.h"
9#include "core/hle/kernel/memory/page_heap.h"
10#include "core/memory.h"
11
12namespace Kernel::Memory {
13
14void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
15 // Check our assumptions
16 ASSERT(Common::IsAligned((address), PageSize));
17 ASSERT(Common::IsAligned(size, PageSize));
18
19 // Set our members
20 heap_address = address;
21 heap_size = size;
22
23 // Setup bitmaps
24 metadata.resize(metadata_size / sizeof(u64));
25 u64* cur_bitmap_storage{metadata.data()};
26 for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
27 const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
28 const std::size_t next_block_shift{
29 (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
30 cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
31 next_block_shift, cur_bitmap_storage);
32 }
33}
34
35VAddr PageHeap::AllocateBlock(s32 index) {
36 const std::size_t needed_size{blocks[index].GetSize()};
37
38 for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
39 if (const VAddr addr{blocks[i].PopBlock()}; addr) {
40 if (const std::size_t allocated_size{blocks[i].GetSize()};
41 allocated_size > needed_size) {
42 Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
43 }
44 return addr;
45 }
46 }
47
48 return 0;
49}
50
51void PageHeap::FreeBlock(VAddr block, s32 index) {
52 do {
53 block = blocks[index++].PushBlock(block);
54 } while (block != 0);
55}
56
57void PageHeap::Free(VAddr addr, std::size_t num_pages) {
58 // Freeing no pages is a no-op
59 if (num_pages == 0) {
60 return;
61 }
62
63 // Find the largest block size that we can free, and free as many as possible
64 s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
65 const VAddr start{addr};
66 const VAddr end{(num_pages * PageSize) + addr};
67 VAddr before_start{start};
68 VAddr before_end{start};
69 VAddr after_start{end};
70 VAddr after_end{end};
71 while (big_index >= 0) {
72 const std::size_t block_size{blocks[big_index].GetSize()};
73 const VAddr big_start{Common::AlignUp((start), block_size)};
74 const VAddr big_end{Common::AlignDown((end), block_size)};
75 if (big_start < big_end) {
76 // Free as many big blocks as we can
77 for (auto block{big_start}; block < big_end; block += block_size) {
78 FreeBlock(block, big_index);
79 }
80 before_end = big_start;
81 after_start = big_end;
82 break;
83 }
84 big_index--;
85 }
86 ASSERT(big_index >= 0);
87
88 // Free space before the big blocks
89 for (s32 i{big_index - 1}; i >= 0; i--) {
90 const std::size_t block_size{blocks[i].GetSize()};
91 while (before_start + block_size <= before_end) {
92 before_end -= block_size;
93 FreeBlock(before_end, i);
94 }
95 }
96
97 // Free space after the big blocks
98 for (s32 i{big_index - 1}; i >= 0; i--) {
99 const std::size_t block_size{blocks[i].GetSize()};
100 while (after_start + block_size <= after_end) {
101 FreeBlock(after_start, i);
102 after_start += block_size;
103 }
104 }
105}
106
107std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
108 std::size_t overhead_size = 0;
109 for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
110 const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
111 const std::size_t next_block_shift{
112 (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
113 overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
114 region_size, cur_block_shift, next_block_shift);
115 }
116 return Common::AlignUp(overhead_size, PageSize);
117}
118
119} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h
new file mode 100644
index 000000000..380c3f5a1
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_heap.h
@@ -0,0 +1,370 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphère, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
7
8#pragma once
9
10#include <array>
11#include <vector>
12
13#include "common/alignment.h"
14#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/common_funcs.h"
17#include "common/common_types.h"
18#include "core/hle/kernel/memory/memory_types.h"
19
20namespace Kernel::Memory {
21
22class PageHeap final : NonCopyable {
23public:
24 static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
25 const auto target_pages{std::max(num_pages, align_pages)};
26 for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
27 if (target_pages <=
28 (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
29 return static_cast<s32>(i);
30 }
31 }
32 return -1;
33 }
34
35 static constexpr s32 GetBlockIndex(std::size_t num_pages) {
36 for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
37 if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
38 return i;
39 }
40 }
41 return -1;
42 }
43
44 static constexpr std::size_t GetBlockSize(std::size_t index) {
45 return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
46 }
47
48 static constexpr std::size_t GetBlockNumPages(std::size_t index) {
49 return GetBlockSize(index) / PageSize;
50 }
51
52private:
53 static constexpr std::size_t NumMemoryBlockPageShifts{7};
54 static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
55 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
56 };
57
58 class Block final : NonCopyable {
59 private:
60 class Bitmap final : NonCopyable {
61 public:
62 static constexpr std::size_t MaxDepth{4};
63
64 private:
65 std::array<u64*, MaxDepth> bit_storages{};
66 std::size_t num_bits{};
67 std::size_t used_depths{};
68
69 public:
70 constexpr Bitmap() = default;
71
72 constexpr std::size_t GetNumBits() const {
73 return num_bits;
74 }
75 constexpr s32 GetHighestDepthIndex() const {
76 return static_cast<s32>(used_depths) - 1;
77 }
78
79 constexpr u64* Initialize(u64* storage, std::size_t size) {
80 //* Initially, everything is un-set
81 num_bits = 0;
82
83 // Calculate the needed bitmap depth
84 used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
85 ASSERT(used_depths <= MaxDepth);
86
87 // Set the bitmap pointers
88 for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
89 bit_storages[depth] = storage;
90 size = Common::AlignUp(size, 64) / 64;
91 storage += size;
92 }
93
94 return storage;
95 }
96
97 s64 FindFreeBlock() const {
98 uintptr_t offset{};
99 s32 depth{};
100
101 do {
102 const u64 v{bit_storages[depth][offset]};
103 if (v == 0) {
104 // Non-zero depth indicates that a previous level had a free block
105 ASSERT(depth == 0);
106 return -1;
107 }
108 offset = offset * 64 + Common::CountTrailingZeroes64(v);
109 ++depth;
110 } while (depth < static_cast<s32>(used_depths));
111
112 return static_cast<s64>(offset);
113 }
114
115 constexpr void SetBit(std::size_t offset) {
116 SetBit(GetHighestDepthIndex(), offset);
117 num_bits++;
118 }
119
120 constexpr void ClearBit(std::size_t offset) {
121 ClearBit(GetHighestDepthIndex(), offset);
122 num_bits--;
123 }
124
125 constexpr bool ClearRange(std::size_t offset, std::size_t count) {
126 const s32 depth{GetHighestDepthIndex()};
127 const auto bit_ind{offset / 64};
128 u64* bits{bit_storages[depth]};
129 if (count < 64) {
130 const auto shift{offset % 64};
131 ASSERT(shift + count <= 64);
132 // Check that all the bits are set
133 const u64 mask{((1ULL << count) - 1) << shift};
134 u64 v{bits[bit_ind]};
135 if ((v & mask) != mask) {
136 return false;
137 }
138
139 // Clear the bits
140 v &= ~mask;
141 bits[bit_ind] = v;
142 if (v == 0) {
143 ClearBit(depth - 1, bit_ind);
144 }
145 } else {
146 ASSERT(offset % 64 == 0);
147 ASSERT(count % 64 == 0);
148 // Check that all the bits are set
149 std::size_t remaining{count};
150 std::size_t i = 0;
151 do {
152 if (bits[bit_ind + i++] != ~u64(0)) {
153 return false;
154 }
155 remaining -= 64;
156 } while (remaining > 0);
157
158 // Clear the bits
159 remaining = count;
160 i = 0;
161 do {
162 bits[bit_ind + i] = 0;
163 ClearBit(depth - 1, bit_ind + i);
164 i++;
165 remaining -= 64;
166 } while (remaining > 0);
167 }
168
169 num_bits -= count;
170 return true;
171 }
172
173 private:
174 constexpr void SetBit(s32 depth, std::size_t offset) {
175 while (depth >= 0) {
176 const auto ind{offset / 64};
177 const auto which{offset % 64};
178 const u64 mask{1ULL << which};
179
180 u64* bit{std::addressof(bit_storages[depth][ind])};
181 const u64 v{*bit};
182 ASSERT((v & mask) == 0);
183 *bit = v | mask;
184 if (v) {
185 break;
186 }
187 offset = ind;
188 depth--;
189 }
190 }
191
192 constexpr void ClearBit(s32 depth, std::size_t offset) {
193 while (depth >= 0) {
194 const auto ind{offset / 64};
195 const auto which{offset % 64};
196 const u64 mask{1ULL << which};
197
198 u64* bit{std::addressof(bit_storages[depth][ind])};
199 u64 v{*bit};
200 ASSERT((v & mask) != 0);
201 v &= ~mask;
202 *bit = v;
203 if (v) {
204 break;
205 }
206 offset = ind;
207 depth--;
208 }
209 }
210
211 private:
212 static constexpr s32 GetRequiredDepth(std::size_t region_size) {
213 s32 depth = 0;
214 while (true) {
215 region_size /= 64;
216 depth++;
217 if (region_size == 0) {
218 return depth;
219 }
220 }
221 }
222
223 public:
224 static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
225 std::size_t overhead_bits = 0;
226 for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
227 region_size = Common::AlignUp(region_size, 64) / 64;
228 overhead_bits += region_size;
229 }
230 return overhead_bits * sizeof(u64);
231 }
232 };
233
234 private:
235 Bitmap bitmap;
236 VAddr heap_address{};
237 uintptr_t end_offset{};
238 std::size_t block_shift{};
239 std::size_t next_block_shift{};
240
241 public:
242 constexpr Block() = default;
243
244 constexpr std::size_t GetShift() const {
245 return block_shift;
246 }
247 constexpr std::size_t GetNextShift() const {
248 return next_block_shift;
249 }
250 constexpr std::size_t GetSize() const {
251 return static_cast<std::size_t>(1) << GetShift();
252 }
253 constexpr std::size_t GetNumPages() const {
254 return GetSize() / PageSize;
255 }
256 constexpr std::size_t GetNumFreeBlocks() const {
257 return bitmap.GetNumBits();
258 }
259 constexpr std::size_t GetNumFreePages() const {
260 return GetNumFreeBlocks() * GetNumPages();
261 }
262
263 constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
264 u64* bit_storage) {
265 // Set shifts
266 block_shift = bs;
267 next_block_shift = nbs;
268
269 // Align up the address
270 VAddr end{addr + size};
271 const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
272 : (1ULL << block_shift)};
273 addr = Common::AlignDown((addr), align);
274 end = Common::AlignUp((end), align);
275
276 heap_address = addr;
277 end_offset = (end - addr) / (1ULL << block_shift);
278 return bitmap.Initialize(bit_storage, end_offset);
279 }
280
281 constexpr VAddr PushBlock(VAddr address) {
282 // Set the bit for the free block
283 std::size_t offset{(address - heap_address) >> GetShift()};
284 bitmap.SetBit(offset);
285
286 // If we have a next shift, try to clear the blocks below and return the address
287 if (GetNextShift()) {
288 const auto diff{1ULL << (GetNextShift() - GetShift())};
289 offset = Common::AlignDown(offset, diff);
290 if (bitmap.ClearRange(offset, diff)) {
291 return heap_address + (offset << GetShift());
292 }
293 }
294
295 // We couldn't coalesce, or we're already as big as possible
296 return 0;
297 }
298
299 VAddr PopBlock() {
300 // Find a free block
301 const s64 soffset{bitmap.FindFreeBlock()};
302 if (soffset < 0) {
303 return 0;
304 }
305 const auto offset{static_cast<std::size_t>(soffset)};
306
307 // Update our tracking and return it
308 bitmap.ClearBit(offset);
309 return heap_address + (offset << GetShift());
310 }
311
312 public:
313 static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
314 std::size_t cur_block_shift,
315 std::size_t next_block_shift) {
316 const auto cur_block_size{(1ULL << cur_block_shift)};
317 const auto next_block_size{(1ULL << next_block_shift)};
318 const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
319 return Bitmap::CalculateMetadataOverheadSize(
320 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
321 }
322 };
323
324public:
325 PageHeap() = default;
326
327 constexpr VAddr GetAddress() const {
328 return heap_address;
329 }
330 constexpr std::size_t GetSize() const {
331 return heap_size;
332 }
333 constexpr VAddr GetEndAddress() const {
334 return GetAddress() + GetSize();
335 }
336 constexpr std::size_t GetPageOffset(VAddr block) const {
337 return (block - GetAddress()) / PageSize;
338 }
339
340 void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
341 VAddr AllocateBlock(s32 index);
342 void Free(VAddr addr, std::size_t num_pages);
343
344 void UpdateUsedSize() {
345 used_size = heap_size - (GetNumFreePages() * PageSize);
346 }
347
348 static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
349
350private:
351 constexpr std::size_t GetNumFreePages() const {
352 std::size_t num_free{};
353
354 for (const auto& block : blocks) {
355 num_free += block.GetNumFreePages();
356 }
357
358 return num_free;
359 }
360
361 void FreeBlock(VAddr block, s32 index);
362
363 VAddr heap_address{};
364 std::size_t heap_size{};
365 std::size_t used_size{};
366 std::array<Block, NumMemoryBlockPageShifts> blocks{};
367 std::vector<u64> metadata;
368};
369
370} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/page_linked_list.h b/src/core/hle/kernel/memory/page_linked_list.h
new file mode 100644
index 000000000..0668d00c6
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_linked_list.h
@@ -0,0 +1,93 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <list>
8
9#include "common/assert.h"
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "core/hle/kernel/memory/memory_types.h"
13#include "core/hle/result.h"
14
15namespace Kernel::Memory {
16
17class PageLinkedList final {
18public:
19 class Node final {
20 public:
21 constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {}
22
23 constexpr u64 GetAddress() const {
24 return addr;
25 }
26
27 constexpr std::size_t GetNumPages() const {
28 return num_pages;
29 }
30
31 private:
32 u64 addr{};
33 std::size_t num_pages{};
34 };
35
36public:
37 PageLinkedList() = default;
38 PageLinkedList(u64 address, u64 num_pages) {
39 ASSERT(AddBlock(address, num_pages).IsSuccess());
40 }
41
42 constexpr std::list<Node>& Nodes() {
43 return nodes;
44 }
45
46 constexpr const std::list<Node>& Nodes() const {
47 return nodes;
48 }
49
50 std::size_t GetNumPages() const {
51 std::size_t num_pages = 0;
52 for (const Node& node : nodes) {
53 num_pages += node.GetNumPages();
54 }
55 return num_pages;
56 }
57
58 bool IsEqual(PageLinkedList& other) const {
59 auto this_node = nodes.begin();
60 auto other_node = other.nodes.begin();
61 while (this_node != nodes.end() && other_node != other.nodes.end()) {
62 if (this_node->GetAddress() != other_node->GetAddress() ||
63 this_node->GetNumPages() != other_node->GetNumPages()) {
64 return false;
65 }
66 this_node = std::next(this_node);
67 other_node = std::next(other_node);
68 }
69
70 return this_node == nodes.end() && other_node == other.nodes.end();
71 }
72
73 ResultCode AddBlock(u64 address, u64 num_pages) {
74 if (!num_pages) {
75 return RESULT_SUCCESS;
76 }
77 if (!nodes.empty()) {
78 const auto node = nodes.back();
79 if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
80 address = node.GetAddress();
81 num_pages += node.GetNumPages();
82 nodes.pop_back();
83 }
84 }
85 nodes.push_back({address, num_pages});
86 return RESULT_SUCCESS;
87 }
88
89private:
90 std::list<Node> nodes;
91};
92
93} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
new file mode 100644
index 000000000..091e52ca4
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -0,0 +1,1130 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/alignment.h"
6#include "common/assert.h"
7#include "common/scope_exit.h"
8#include "core/core.h"
9#include "core/device_memory.h"
10#include "core/hle/kernel/errors.h"
11#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/memory/address_space_info.h"
13#include "core/hle/kernel/memory/memory_block.h"
14#include "core/hle/kernel/memory/memory_block_manager.h"
15#include "core/hle/kernel/memory/page_linked_list.h"
16#include "core/hle/kernel/memory/page_table.h"
17#include "core/hle/kernel/memory/system_control.h"
18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/resource_limit.h"
20#include "core/memory.h"
21
22namespace Kernel::Memory {
23
24namespace {
25
26constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
27 switch (as_type) {
28 case FileSys::ProgramAddressSpaceType::Is32Bit:
29 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
30 return 32;
31 case FileSys::ProgramAddressSpaceType::Is36Bit:
32 return 36;
33 case FileSys::ProgramAddressSpaceType::Is39Bit:
34 return 39;
35 default:
36 UNREACHABLE();
37 return {};
38 }
39}
40
41constexpr u64 GetAddressInRange(const MemoryInfo& info, VAddr addr) {
42 if (info.GetAddress() < addr) {
43 return addr;
44 }
45 return info.GetAddress();
46}
47
48constexpr std::size_t GetSizeInRange(const MemoryInfo& info, VAddr start, VAddr end) {
49 std::size_t size{info.GetSize()};
50 if (info.GetAddress() < start) {
51 size -= start - info.GetAddress();
52 }
53 if (info.GetEndAddress() > end) {
54 size -= info.GetEndAddress() - end;
55 }
56 return size;
57}
58
59} // namespace
60
61PageTable::PageTable(Core::System& system) : system{system} {}
62
63ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
64 bool enable_aslr, VAddr code_addr, std::size_t code_size,
65 Memory::MemoryManager::Pool pool) {
66
67 const auto GetSpaceStart = [this](AddressSpaceInfo::Type type) {
68 return AddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
69 };
70 const auto GetSpaceSize = [this](AddressSpaceInfo::Type type) {
71 return AddressSpaceInfo::GetAddressSpaceSize(address_space_width, type);
72 };
73
74 // Set our width and heap/alias sizes
75 address_space_width = GetAddressSpaceWidthFromType(as_type);
76 const VAddr start = 0;
77 const VAddr end{1ULL << address_space_width};
78 std::size_t alias_region_size{GetSpaceSize(AddressSpaceInfo::Type::Alias)};
79 std::size_t heap_region_size{GetSpaceSize(AddressSpaceInfo::Type::Heap)};
80
81 ASSERT(start <= code_addr);
82 ASSERT(code_addr < code_addr + code_size);
83 ASSERT(code_addr + code_size - 1 <= end - 1);
84
85 // Adjust heap/alias size if we don't have an alias region
86 if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
87 heap_region_size += alias_region_size;
88 alias_region_size = 0;
89 }
90
91 // Set code regions and determine remaining
92 constexpr std::size_t RegionAlignment{2 * 1024 * 1024};
93 VAddr process_code_start{};
94 VAddr process_code_end{};
95 std::size_t stack_region_size{};
96 std::size_t kernel_map_region_size{};
97
98 if (address_space_width == 39) {
99 alias_region_size = GetSpaceSize(AddressSpaceInfo::Type::Alias);
100 heap_region_size = GetSpaceSize(AddressSpaceInfo::Type::Heap);
101 stack_region_size = GetSpaceSize(AddressSpaceInfo::Type::Stack);
102 kernel_map_region_size = GetSpaceSize(AddressSpaceInfo::Type::Is32Bit);
103 code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Large64Bit);
104 code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Large64Bit);
105 alias_code_region_start = code_region_start;
106 alias_code_region_end = code_region_end;
107 process_code_start = Common::AlignDown(code_addr, RegionAlignment);
108 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
109 } else {
110 stack_region_size = 0;
111 kernel_map_region_size = 0;
112 code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Is32Bit);
113 code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Is32Bit);
114 stack_region_start = code_region_start;
115 alias_code_region_start = code_region_start;
116 alias_code_region_end = GetSpaceStart(AddressSpaceInfo::Type::Small64Bit) +
117 GetSpaceSize(AddressSpaceInfo::Type::Small64Bit);
118 stack_region_end = code_region_end;
119 kernel_map_region_start = code_region_start;
120 kernel_map_region_end = code_region_end;
121 process_code_start = code_region_start;
122 process_code_end = code_region_end;
123 }
124
125 // Set other basic fields
126 is_aslr_enabled = enable_aslr;
127 address_space_start = start;
128 address_space_end = end;
129 is_kernel = false;
130
131 // Determine the region we can place our undetermineds in
132 VAddr alloc_start{};
133 std::size_t alloc_size{};
134 if ((process_code_start - code_region_start) >= (end - process_code_end)) {
135 alloc_start = code_region_start;
136 alloc_size = process_code_start - code_region_start;
137 } else {
138 alloc_start = process_code_end;
139 alloc_size = end - process_code_end;
140 }
141 const std::size_t needed_size{
142 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
143 if (alloc_size < needed_size) {
144 UNREACHABLE();
145 return ERR_OUT_OF_MEMORY;
146 }
147
148 const std::size_t remaining_size{alloc_size - needed_size};
149
150 // Determine random placements for each region
151 std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
152 if (enable_aslr) {
153 alias_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
154 RegionAlignment;
155 heap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
156 RegionAlignment;
157 stack_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
158 RegionAlignment;
159 kmap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
160 RegionAlignment;
161 }
162
163 // Setup heap and alias regions
164 alias_region_start = alloc_start + alias_rnd;
165 alias_region_end = alias_region_start + alias_region_size;
166 heap_region_start = alloc_start + heap_rnd;
167 heap_region_end = heap_region_start + heap_region_size;
168
169 if (alias_rnd <= heap_rnd) {
170 heap_region_start += alias_region_size;
171 heap_region_end += alias_region_size;
172 } else {
173 alias_region_start += heap_region_size;
174 alias_region_end += heap_region_size;
175 }
176
177 // Setup stack region
178 if (stack_region_size) {
179 stack_region_start = alloc_start + stack_rnd;
180 stack_region_end = stack_region_start + stack_region_size;
181
182 if (alias_rnd < stack_rnd) {
183 stack_region_start += alias_region_size;
184 stack_region_end += alias_region_size;
185 } else {
186 alias_region_start += stack_region_size;
187 alias_region_end += stack_region_size;
188 }
189
190 if (heap_rnd < stack_rnd) {
191 stack_region_start += heap_region_size;
192 stack_region_end += heap_region_size;
193 } else {
194 heap_region_start += stack_region_size;
195 heap_region_end += stack_region_size;
196 }
197 }
198
199 // Setup kernel map region
200 if (kernel_map_region_size) {
201 kernel_map_region_start = alloc_start + kmap_rnd;
202 kernel_map_region_end = kernel_map_region_start + kernel_map_region_size;
203
204 if (alias_rnd < kmap_rnd) {
205 kernel_map_region_start += alias_region_size;
206 kernel_map_region_end += alias_region_size;
207 } else {
208 alias_region_start += kernel_map_region_size;
209 alias_region_end += kernel_map_region_size;
210 }
211
212 if (heap_rnd < kmap_rnd) {
213 kernel_map_region_start += heap_region_size;
214 kernel_map_region_end += heap_region_size;
215 } else {
216 heap_region_start += kernel_map_region_size;
217 heap_region_end += kernel_map_region_size;
218 }
219
220 if (stack_region_size) {
221 if (stack_rnd < kmap_rnd) {
222 kernel_map_region_start += stack_region_size;
223 kernel_map_region_end += stack_region_size;
224 } else {
225 stack_region_start += kernel_map_region_size;
226 stack_region_end += kernel_map_region_size;
227 }
228 }
229 }
230
231 // Set heap members
232 current_heap_end = heap_region_start;
233 max_heap_size = 0;
234 max_physical_memory_size = 0;
235
236 // Ensure that we regions inside our address space
237 auto IsInAddressSpace = [&](VAddr addr) {
238 return address_space_start <= addr && addr <= address_space_end;
239 };
240 ASSERT(IsInAddressSpace(alias_region_start));
241 ASSERT(IsInAddressSpace(alias_region_end));
242 ASSERT(IsInAddressSpace(heap_region_start));
243 ASSERT(IsInAddressSpace(heap_region_end));
244 ASSERT(IsInAddressSpace(stack_region_start));
245 ASSERT(IsInAddressSpace(stack_region_end));
246 ASSERT(IsInAddressSpace(kernel_map_region_start));
247 ASSERT(IsInAddressSpace(kernel_map_region_end));
248
249 // Ensure that we selected regions that don't overlap
250 const VAddr alias_start{alias_region_start};
251 const VAddr alias_last{alias_region_end - 1};
252 const VAddr heap_start{heap_region_start};
253 const VAddr heap_last{heap_region_end - 1};
254 const VAddr stack_start{stack_region_start};
255 const VAddr stack_last{stack_region_end - 1};
256 const VAddr kmap_start{kernel_map_region_start};
257 const VAddr kmap_last{kernel_map_region_end - 1};
258 ASSERT(alias_last < heap_start || heap_last < alias_start);
259 ASSERT(alias_last < stack_start || stack_last < alias_start);
260 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
261 ASSERT(heap_last < stack_start || stack_last < heap_start);
262 ASSERT(heap_last < kmap_start || kmap_last < heap_start);
263
264 current_heap_addr = heap_region_start;
265 heap_capacity = 0;
266 physical_memory_usage = 0;
267 memory_pool = pool;
268
269 page_table_impl.Resize(address_space_width, PageBits, true);
270
271 return InitializeMemoryLayout(start, end);
272}
273
274ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemoryState state,
275 MemoryPermission perm) {
276 std::lock_guard lock{page_table_lock};
277
278 const u64 size{num_pages * PageSize};
279
280 if (!CanContain(addr, size, state)) {
281 return ERR_INVALID_ADDRESS_STATE;
282 }
283
284 if (IsRegionMapped(addr, size)) {
285 return ERR_INVALID_ADDRESS_STATE;
286 }
287
288 PageLinkedList page_linked_list;
289 CASCADE_CODE(
290 system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool));
291 CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
292
293 block_manager->Update(addr, num_pages, state, perm);
294
295 return RESULT_SUCCESS;
296}
297
298ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
299 std::lock_guard lock{page_table_lock};
300
301 const std::size_t num_pages{size / PageSize};
302
303 MemoryState state{};
304 MemoryPermission perm{};
305 CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, MemoryState::All,
306 MemoryState::Normal, MemoryPermission::Mask,
307 MemoryPermission::ReadAndWrite, MemoryAttribute::Mask,
308 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
309
310 if (IsRegionMapped(dst_addr, size)) {
311 return ERR_INVALID_ADDRESS_STATE;
312 }
313
314 PageLinkedList page_linked_list;
315 AddRegionToPages(src_addr, num_pages, page_linked_list);
316
317 {
318 auto block_guard = detail::ScopeExit(
319 [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); });
320
321 CASCADE_CODE(
322 Operate(src_addr, num_pages, MemoryPermission::None, OperationType::ChangePermissions));
323 CASCADE_CODE(MapPages(dst_addr, page_linked_list, MemoryPermission::None));
324
325 block_guard.Cancel();
326 }
327
328 block_manager->Update(src_addr, num_pages, state, MemoryPermission::None,
329 MemoryAttribute::Locked);
330 block_manager->Update(dst_addr, num_pages, MemoryState::AliasCode);
331
332 return RESULT_SUCCESS;
333}
334
335ResultCode PageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
336 std::lock_guard lock{page_table_lock};
337
338 if (!size) {
339 return RESULT_SUCCESS;
340 }
341
342 const std::size_t num_pages{size / PageSize};
343
344 CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, src_addr, size, MemoryState::All,
345 MemoryState::Normal, MemoryPermission::None,
346 MemoryPermission::None, MemoryAttribute::Mask,
347 MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped));
348
349 MemoryState state{};
350 CASCADE_CODE(CheckMemoryState(
351 &state, nullptr, nullptr, dst_addr, PageSize, MemoryState::FlagCanCodeAlias,
352 MemoryState::FlagCanCodeAlias, MemoryPermission::None, MemoryPermission::None,
353 MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
354 CASCADE_CODE(CheckMemoryState(dst_addr, size, MemoryState::All, state, MemoryPermission::None,
355 MemoryPermission::None, MemoryAttribute::Mask,
356 MemoryAttribute::None));
357 CASCADE_CODE(Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap));
358
359 block_manager->Update(dst_addr, num_pages, MemoryState::Free);
360 block_manager->Update(src_addr, num_pages, MemoryState::Normal, MemoryPermission::ReadAndWrite);
361
362 return RESULT_SUCCESS;
363}
364
365void PageTable::MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end) {
366 auto node{page_linked_list.Nodes().begin()};
367 PAddr map_addr{node->GetAddress()};
368 std::size_t src_num_pages{node->GetNumPages()};
369
370 block_manager->IterateForRange(start, end, [&](const MemoryInfo& info) {
371 if (info.state != MemoryState::Free) {
372 return;
373 }
374
375 std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize};
376 VAddr dst_addr{GetAddressInRange(info, start)};
377
378 while (dst_num_pages) {
379 if (!src_num_pages) {
380 node = std::next(node);
381 map_addr = node->GetAddress();
382 src_num_pages = node->GetNumPages();
383 }
384
385 const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
386 Operate(dst_addr, num_pages, MemoryPermission::ReadAndWrite, OperationType::Map,
387 map_addr);
388
389 dst_addr += num_pages * PageSize;
390 map_addr += num_pages * PageSize;
391 src_num_pages -= num_pages;
392 dst_num_pages -= num_pages;
393 }
394 });
395}
396
397ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
398 std::lock_guard lock{page_table_lock};
399
400 std::size_t mapped_size{};
401 const VAddr end_addr{addr + size};
402
403 block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
404 if (info.state != MemoryState::Free) {
405 mapped_size += GetSizeInRange(info, addr, end_addr);
406 }
407 });
408
409 if (mapped_size == size) {
410 return RESULT_SUCCESS;
411 }
412
413 auto process{system.Kernel().CurrentProcess()};
414 const std::size_t remaining_size{size - mapped_size};
415 const std::size_t remaining_pages{remaining_size / PageSize};
416
417 if (process->GetResourceLimit() &&
418 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
419 return ERR_RESOURCE_LIMIT_EXCEEDED;
420 }
421
422 PageLinkedList page_linked_list;
423 {
424 auto block_guard = detail::ScopeExit([&] {
425 system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool);
426 process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, remaining_size);
427 });
428
429 CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
430 memory_pool));
431
432 block_guard.Cancel();
433 }
434
435 MapPhysicalMemory(page_linked_list, addr, end_addr);
436
437 physical_memory_usage += remaining_size;
438
439 const std::size_t num_pages{size / PageSize};
440 block_manager->Update(addr, num_pages, MemoryState::Free, MemoryPermission::None,
441 MemoryAttribute::None, MemoryState::Normal,
442 MemoryPermission::ReadAndWrite, MemoryAttribute::None);
443
444 return RESULT_SUCCESS;
445}
446
447ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
448 std::lock_guard lock{page_table_lock};
449
450 const VAddr end_addr{addr + size};
451 ResultCode result{RESULT_SUCCESS};
452 std::size_t mapped_size{};
453
454 // Verify that the region can be unmapped
455 block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
456 if (info.state == MemoryState::Normal) {
457 if (info.attribute != MemoryAttribute::None) {
458 result = ERR_INVALID_ADDRESS_STATE;
459 return;
460 }
461 mapped_size += GetSizeInRange(info, addr, end_addr);
462 } else if (info.state != MemoryState::Free) {
463 result = ERR_INVALID_ADDRESS_STATE;
464 }
465 });
466
467 if (result.IsError()) {
468 return result;
469 }
470
471 if (!mapped_size) {
472 return RESULT_SUCCESS;
473 }
474
475 CASCADE_CODE(UnmapMemory(addr, size));
476
477 auto process{system.Kernel().CurrentProcess()};
478 process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, mapped_size);
479 physical_memory_usage -= mapped_size;
480
481 return RESULT_SUCCESS;
482}
483
484ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) {
485 std::lock_guard lock{page_table_lock};
486
487 const VAddr end_addr{addr + size};
488 ResultCode result{RESULT_SUCCESS};
489 PageLinkedList page_linked_list;
490
491 // Unmap each region within the range
492 block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
493 if (info.state == MemoryState::Normal) {
494 const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
495 const std::size_t block_num_pages{block_size / PageSize};
496 const VAddr block_addr{GetAddressInRange(info, addr)};
497
498 AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
499
500 if (result = Operate(block_addr, block_num_pages, MemoryPermission::None,
501 OperationType::Unmap);
502 result.IsError()) {
503 return;
504 }
505 }
506 });
507
508 if (result.IsError()) {
509 return result;
510 }
511
512 const std::size_t num_pages{size / PageSize};
513 system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool);
514
515 block_manager->Update(addr, num_pages, MemoryState::Free);
516
517 return RESULT_SUCCESS;
518}
519
520ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
521 std::lock_guard lock{page_table_lock};
522
523 MemoryState src_state{};
524 CASCADE_CODE(CheckMemoryState(
525 &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias,
526 MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::ReadAndWrite,
527 MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
528
529 if (IsRegionMapped(dst_addr, size)) {
530 return ERR_INVALID_ADDRESS_STATE;
531 }
532
533 PageLinkedList page_linked_list;
534 const std::size_t num_pages{size / PageSize};
535
536 AddRegionToPages(src_addr, num_pages, page_linked_list);
537
538 {
539 auto block_guard = detail::ScopeExit([&] {
540 Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite,
541 OperationType::ChangePermissions);
542 });
543
544 CASCADE_CODE(
545 Operate(src_addr, num_pages, MemoryPermission::None, OperationType::ChangePermissions));
546 CASCADE_CODE(MapPages(dst_addr, page_linked_list, MemoryPermission::ReadAndWrite));
547
548 block_guard.Cancel();
549 }
550
551 block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::None,
552 MemoryAttribute::Locked);
553 block_manager->Update(dst_addr, num_pages, MemoryState::Stack, MemoryPermission::ReadAndWrite);
554
555 return RESULT_SUCCESS;
556}
557
558ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
559 std::lock_guard lock{page_table_lock};
560
561 MemoryState src_state{};
562 CASCADE_CODE(CheckMemoryState(
563 &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias,
564 MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::None,
565 MemoryAttribute::Mask, MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped));
566
567 MemoryPermission dst_perm{};
568 CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, dst_addr, size, MemoryState::All,
569 MemoryState::Stack, MemoryPermission::None,
570 MemoryPermission::None, MemoryAttribute::Mask,
571 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
572
573 PageLinkedList src_pages;
574 PageLinkedList dst_pages;
575 const std::size_t num_pages{size / PageSize};
576
577 AddRegionToPages(src_addr, num_pages, src_pages);
578 AddRegionToPages(dst_addr, num_pages, dst_pages);
579
580 if (!dst_pages.IsEqual(src_pages)) {
581 return ERR_INVALID_MEMORY_RANGE;
582 }
583
584 {
585 auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); });
586
587 CASCADE_CODE(Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap));
588 CASCADE_CODE(Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite,
589 OperationType::ChangePermissions));
590
591 block_guard.Cancel();
592 }
593
594 block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::ReadAndWrite);
595 block_manager->Update(dst_addr, num_pages, MemoryState::Free);
596
597 return RESULT_SUCCESS;
598}
599
600ResultCode PageTable::MapPages(VAddr addr, const PageLinkedList& page_linked_list,
601 MemoryPermission perm) {
602 VAddr cur_addr{addr};
603
604 for (const auto& node : page_linked_list.Nodes()) {
605 if (const auto result{
606 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
607 result.IsError()) {
608 const MemoryInfo info{block_manager->FindBlock(cur_addr).GetMemoryInfo()};
609 const std::size_t num_pages{(addr - cur_addr) / PageSize};
610
611 ASSERT(
612 Operate(addr, num_pages, MemoryPermission::None, OperationType::Unmap).IsSuccess());
613
614 return result;
615 }
616
617 cur_addr += node.GetNumPages() * PageSize;
618 }
619
620 return RESULT_SUCCESS;
621}
622
623ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
624 MemoryPermission perm) {
625 std::lock_guard lock{page_table_lock};
626
627 const std::size_t num_pages{page_linked_list.GetNumPages()};
628 const std::size_t size{num_pages * PageSize};
629
630 if (!CanContain(addr, size, state)) {
631 return ERR_INVALID_ADDRESS_STATE;
632 }
633
634 if (IsRegionMapped(addr, num_pages * PageSize)) {
635 return ERR_INVALID_ADDRESS_STATE;
636 }
637
638 CASCADE_CODE(MapPages(addr, page_linked_list, perm));
639
640 block_manager->Update(addr, num_pages, state, perm);
641
642 return RESULT_SUCCESS;
643}
644
645ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm) {
646
647 std::lock_guard lock{page_table_lock};
648
649 MemoryState prev_state{};
650 MemoryPermission prev_perm{};
651
652 CASCADE_CODE(CheckMemoryState(
653 &prev_state, &prev_perm, nullptr, addr, size, MemoryState::FlagCode, MemoryState::FlagCode,
654 MemoryPermission::None, MemoryPermission::None, MemoryAttribute::Mask,
655 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
656
657 MemoryState state{prev_state};
658
659 // Ensure state is mutable if permission allows write
660 if ((perm & MemoryPermission::Write) != MemoryPermission::None) {
661 if (prev_state == MemoryState::Code) {
662 state = MemoryState::CodeData;
663 } else if (prev_state == MemoryState::AliasCode) {
664 state = MemoryState::AliasCodeData;
665 } else {
666 UNREACHABLE();
667 }
668 }
669
670 // Return early if there is nothing to change
671 if (state == prev_state && perm == prev_perm) {
672 return RESULT_SUCCESS;
673 }
674
675 const std::size_t num_pages{size / PageSize};
676 const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None
677 ? OperationType::ChangePermissionsAndRefresh
678 : OperationType::ChangePermissions};
679
680 CASCADE_CODE(Operate(addr, num_pages, perm, operation));
681
682 block_manager->Update(addr, num_pages, state, perm);
683
684 return RESULT_SUCCESS;
685}
686
687MemoryInfo PageTable::QueryInfoImpl(VAddr addr) {
688 std::lock_guard lock{page_table_lock};
689
690 return block_manager->FindBlock(addr).GetMemoryInfo();
691}
692
693MemoryInfo PageTable::QueryInfo(VAddr addr) {
694 if (!Contains(addr, 1)) {
695 return {address_space_end, 0 - address_space_end, MemoryState::Inaccessible,
696 MemoryPermission::None, MemoryAttribute::None, MemoryPermission::None};
697 }
698
699 return QueryInfoImpl(addr);
700}
701
702ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm) {
703 std::lock_guard lock{page_table_lock};
704
705 MemoryState state{};
706 MemoryAttribute attribute{};
707
708 CASCADE_CODE(CheckMemoryState(&state, nullptr, &attribute, addr, size,
709 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
710 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
711 MemoryPermission::Mask, MemoryPermission::ReadAndWrite,
712 MemoryAttribute::Mask, MemoryAttribute::None,
713 MemoryAttribute::IpcAndDeviceMapped));
714
715 block_manager->Update(addr, size / PageSize, state, perm, attribute | MemoryAttribute::Locked);
716
717 return RESULT_SUCCESS;
718}
719
720ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
721 std::lock_guard lock{page_table_lock};
722
723 MemoryState state{};
724
725 CASCADE_CODE(CheckMemoryState(&state, nullptr, nullptr, addr, size,
726 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
727 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
728 MemoryPermission::None, MemoryPermission::None,
729 MemoryAttribute::Mask, MemoryAttribute::Locked,
730 MemoryAttribute::IpcAndDeviceMapped));
731
732 block_manager->Update(addr, size / PageSize, state, MemoryPermission::ReadAndWrite);
733
734 return RESULT_SUCCESS;
735}
736
737ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
738 MemoryAttribute value) {
739 std::lock_guard lock{page_table_lock};
740
741 MemoryState state{};
742 MemoryPermission perm{};
743 MemoryAttribute attribute{};
744
745 CASCADE_CODE(CheckMemoryState(&state, &perm, &attribute, addr, size,
746 MemoryState::FlagCanChangeAttribute,
747 MemoryState::FlagCanChangeAttribute, MemoryPermission::None,
748 MemoryPermission::None, MemoryAttribute::LockedAndIpcLocked,
749 MemoryAttribute::None, MemoryAttribute::DeviceSharedAndUncached));
750
751 attribute = attribute & ~mask;
752 attribute = attribute | (mask & value);
753
754 block_manager->Update(addr, size / PageSize, state, perm, attribute);
755
756 return RESULT_SUCCESS;
757}
758
759ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) {
760 std::lock_guard lock{page_table_lock};
761 heap_capacity = new_heap_capacity;
762 return RESULT_SUCCESS;
763}
764
765ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
766
767 if (size > heap_region_end - heap_region_start) {
768 return ERR_OUT_OF_MEMORY;
769 }
770
771 const u64 previous_heap_size{GetHeapSize()};
772
773 UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented");
774
775 // Increase the heap size
776 {
777 std::lock_guard lock{page_table_lock};
778
779 const u64 delta{size - previous_heap_size};
780
781 auto process{system.Kernel().CurrentProcess()};
782 if (process->GetResourceLimit() && delta != 0 &&
783 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
784 return ERR_RESOURCE_LIMIT_EXCEEDED;
785 }
786
787 PageLinkedList page_linked_list;
788 const std::size_t num_pages{delta / PageSize};
789
790 CASCADE_CODE(
791 system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool));
792
793 if (IsRegionMapped(current_heap_addr, delta)) {
794 return ERR_INVALID_ADDRESS_STATE;
795 }
796
797 CASCADE_CODE(
798 Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup));
799
800 block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal,
801 MemoryPermission::ReadAndWrite);
802
803 current_heap_addr = heap_region_start + size;
804 }
805
806 return MakeResult<VAddr>(heap_region_start);
807}
808
809ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
810 bool is_map_only, VAddr region_start,
811 std::size_t region_num_pages, MemoryState state,
812 MemoryPermission perm, PAddr map_addr) {
813 std::lock_guard lock{page_table_lock};
814
815 if (!CanContain(region_start, region_num_pages * PageSize, state)) {
816 return ERR_INVALID_ADDRESS_STATE;
817 }
818
819 if (region_num_pages <= needed_num_pages) {
820 return ERR_OUT_OF_MEMORY;
821 }
822
823 const VAddr addr{
824 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
825 if (!addr) {
826 return ERR_OUT_OF_MEMORY;
827 }
828
829 if (is_map_only) {
830 CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
831 } else {
832 PageLinkedList page_group;
833 CASCADE_CODE(
834 system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool));
835 CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
836 }
837
838 block_manager->Update(addr, needed_num_pages, state, perm);
839
840 return MakeResult<VAddr>(addr);
841}
842
843ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
844 block_manager = std::make_unique<MemoryBlockManager>(start, end);
845
846 return RESULT_SUCCESS;
847}
848
849bool PageTable::IsRegionMapped(VAddr address, u64 size) {
850 return CheckMemoryState(address, size, MemoryState::All, MemoryState::Free,
851 MemoryPermission::Mask, MemoryPermission::None, MemoryAttribute::Mask,
852 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)
853 .IsError();
854}
855
856bool PageTable::IsRegionContiguous(VAddr addr, u64 size) const {
857 auto start_ptr = system.Memory().GetPointer(addr);
858 for (u64 offset{}; offset < size; offset += PageSize) {
859 if (start_ptr != system.Memory().GetPointer(addr + offset)) {
860 return false;
861 }
862 start_ptr += PageSize;
863 }
864 return true;
865}
866
867void PageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
868 PageLinkedList& page_linked_list) {
869 VAddr addr{start};
870 while (addr < start + (num_pages * PageSize)) {
871 const PAddr paddr{GetPhysicalAddr(addr)};
872 if (!paddr) {
873 UNREACHABLE();
874 }
875 page_linked_list.AddBlock(paddr, 1);
876 addr += PageSize;
877 }
878}
879
880VAddr PageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages,
881 u64 needed_num_pages, std::size_t align) {
882 if (is_aslr_enabled) {
883 UNIMPLEMENTED();
884 }
885 return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
886 IsKernel() ? 1 : 4);
887}
888
889ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
890 OperationType operation) {
891 std::lock_guard lock{page_table_lock};
892
893 ASSERT(Common::IsAligned(addr, PageSize));
894 ASSERT(num_pages > 0);
895 ASSERT(num_pages == page_group.GetNumPages());
896
897 for (const auto& node : page_group.Nodes()) {
898 const std::size_t size{node.GetNumPages() * PageSize};
899
900 switch (operation) {
901 case OperationType::MapGroup:
902 system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
903 break;
904 default:
905 UNREACHABLE();
906 }
907
908 addr += size;
909 }
910
911 return RESULT_SUCCESS;
912}
913
914ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
915 OperationType operation, PAddr map_addr) {
916 std::lock_guard lock{page_table_lock};
917
918 ASSERT(num_pages > 0);
919 ASSERT(Common::IsAligned(addr, PageSize));
920 ASSERT(ContainsPages(addr, num_pages));
921
922 switch (operation) {
923 case OperationType::Unmap:
924 system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize);
925 break;
926 case OperationType::Map: {
927 ASSERT(map_addr);
928 ASSERT(Common::IsAligned(map_addr, PageSize));
929 system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr);
930 break;
931 }
932 case OperationType::ChangePermissions:
933 case OperationType::ChangePermissionsAndRefresh:
934 break;
935 default:
936 UNREACHABLE();
937 }
938 return RESULT_SUCCESS;
939}
940
941constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const {
942 switch (state) {
943 case MemoryState::Free:
944 case MemoryState::Kernel:
945 return address_space_start;
946 case MemoryState::Normal:
947 return heap_region_start;
948 case MemoryState::Ipc:
949 case MemoryState::NonSecureIpc:
950 case MemoryState::NonDeviceIpc:
951 return alias_region_start;
952 case MemoryState::Stack:
953 return stack_region_start;
954 case MemoryState::Io:
955 case MemoryState::Static:
956 case MemoryState::ThreadLocal:
957 return kernel_map_region_start;
958 case MemoryState::Shared:
959 case MemoryState::AliasCode:
960 case MemoryState::AliasCodeData:
961 case MemoryState::Transfered:
962 case MemoryState::SharedTransfered:
963 case MemoryState::SharedCode:
964 case MemoryState::GeneratedCode:
965 case MemoryState::CodeOut:
966 return alias_code_region_start;
967 case MemoryState::Code:
968 case MemoryState::CodeData:
969 return code_region_start;
970 default:
971 UNREACHABLE();
972 return {};
973 }
974}
975
976constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const {
977 switch (state) {
978 case MemoryState::Free:
979 case MemoryState::Kernel:
980 return address_space_end - address_space_start;
981 case MemoryState::Normal:
982 return heap_region_end - heap_region_start;
983 case MemoryState::Ipc:
984 case MemoryState::NonSecureIpc:
985 case MemoryState::NonDeviceIpc:
986 return alias_region_end - alias_region_start;
987 case MemoryState::Stack:
988 return stack_region_end - stack_region_start;
989 case MemoryState::Io:
990 case MemoryState::Static:
991 case MemoryState::ThreadLocal:
992 return kernel_map_region_end - kernel_map_region_start;
993 case MemoryState::Shared:
994 case MemoryState::AliasCode:
995 case MemoryState::AliasCodeData:
996 case MemoryState::Transfered:
997 case MemoryState::SharedTransfered:
998 case MemoryState::SharedCode:
999 case MemoryState::GeneratedCode:
1000 case MemoryState::CodeOut:
1001 return alias_code_region_end - alias_code_region_start;
1002 case MemoryState::Code:
1003 case MemoryState::CodeData:
1004 return code_region_end - code_region_start;
1005 default:
1006 UNREACHABLE();
1007 return {};
1008 }
1009}
1010
1011constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState state) const {
1012 const VAddr end{addr + size};
1013 const VAddr last{end - 1};
1014 const VAddr region_start{GetRegionAddress(state)};
1015 const std::size_t region_size{GetRegionSize(state)};
1016 const bool is_in_region{region_start <= addr && addr < end &&
1017 last <= region_start + region_size - 1};
1018 const bool is_in_heap{!(end <= heap_region_start || heap_region_end <= addr)};
1019 const bool is_in_alias{!(end <= alias_region_start || alias_region_end <= addr)};
1020
1021 switch (state) {
1022 case MemoryState::Free:
1023 case MemoryState::Kernel:
1024 return is_in_region;
1025 case MemoryState::Io:
1026 case MemoryState::Static:
1027 case MemoryState::Code:
1028 case MemoryState::CodeData:
1029 case MemoryState::Shared:
1030 case MemoryState::AliasCode:
1031 case MemoryState::AliasCodeData:
1032 case MemoryState::Stack:
1033 case MemoryState::ThreadLocal:
1034 case MemoryState::Transfered:
1035 case MemoryState::SharedTransfered:
1036 case MemoryState::SharedCode:
1037 case MemoryState::GeneratedCode:
1038 case MemoryState::CodeOut:
1039 return is_in_region && !is_in_heap && !is_in_alias;
1040 case MemoryState::Normal:
1041 ASSERT(is_in_heap);
1042 return is_in_region && !is_in_alias;
1043 case MemoryState::Ipc:
1044 case MemoryState::NonSecureIpc:
1045 case MemoryState::NonDeviceIpc:
1046 ASSERT(is_in_alias);
1047 return is_in_region && !is_in_heap;
1048 default:
1049 return false;
1050 }
1051}
1052
1053constexpr ResultCode PageTable::CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
1054 MemoryState state, MemoryPermission perm_mask,
1055 MemoryPermission perm, MemoryAttribute attr_mask,
1056 MemoryAttribute attr) const {
1057 // Validate the states match expectation
1058 if ((info.state & state_mask) != state) {
1059 return ERR_INVALID_ADDRESS_STATE;
1060 }
1061 if ((info.perm & perm_mask) != perm) {
1062 return ERR_INVALID_ADDRESS_STATE;
1063 }
1064 if ((info.attribute & attr_mask) != attr) {
1065 return ERR_INVALID_ADDRESS_STATE;
1066 }
1067
1068 return RESULT_SUCCESS;
1069}
1070
1071ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
1072 MemoryAttribute* out_attr, VAddr addr, std::size_t size,
1073 MemoryState state_mask, MemoryState state,
1074 MemoryPermission perm_mask, MemoryPermission perm,
1075 MemoryAttribute attr_mask, MemoryAttribute attr,
1076 MemoryAttribute ignore_attr) {
1077 std::lock_guard lock{page_table_lock};
1078
1079 // Get information about the first block
1080 const VAddr last_addr{addr + size - 1};
1081 MemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)};
1082 MemoryInfo info{it->GetMemoryInfo()};
1083
1084 // Validate all blocks in the range have correct state
1085 const MemoryState first_state{info.state};
1086 const MemoryPermission first_perm{info.perm};
1087 const MemoryAttribute first_attr{info.attribute};
1088
1089 while (true) {
1090 // Validate the current block
1091 if (!(info.state == first_state)) {
1092 return ERR_INVALID_ADDRESS_STATE;
1093 }
1094 if (!(info.perm == first_perm)) {
1095 return ERR_INVALID_ADDRESS_STATE;
1096 }
1097 if (!((info.attribute | static_cast<MemoryAttribute>(ignore_attr)) ==
1098 (first_attr | static_cast<MemoryAttribute>(ignore_attr)))) {
1099 return ERR_INVALID_ADDRESS_STATE;
1100 }
1101
1102 // Validate against the provided masks
1103 CASCADE_CODE(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
1104
1105 // Break once we're done
1106 if (last_addr <= info.GetLastAddress()) {
1107 break;
1108 }
1109
1110 // Advance our iterator
1111 it++;
1112 ASSERT(it != block_manager->cend());
1113 info = it->GetMemoryInfo();
1114 }
1115
1116 // Write output state
1117 if (out_state) {
1118 *out_state = first_state;
1119 }
1120 if (out_perm) {
1121 *out_perm = first_perm;
1122 }
1123 if (out_attr) {
1124 *out_attr = first_attr & static_cast<MemoryAttribute>(~ignore_attr);
1125 }
1126
1127 return RESULT_SUCCESS;
1128}
1129
1130} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h
new file mode 100644
index 000000000..80384ab0f
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_table.h
@@ -0,0 +1,276 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <list>
8#include <memory>
9#include <mutex>
10
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13#include "common/page_table.h"
14#include "core/file_sys/program_metadata.h"
15#include "core/hle/kernel/memory/memory_block.h"
16#include "core/hle/kernel/memory/memory_manager.h"
17
18namespace Core {
19class System;
20}
21
22namespace Kernel::Memory {
23
24class MemoryBlockManager;
25
26class PageTable final : NonCopyable {
27public:
28 explicit PageTable(Core::System& system);
29
30 ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
31 VAddr code_addr, std::size_t code_size,
32 Memory::MemoryManager::Pool pool);
33 ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
34 MemoryPermission perm);
35 ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
36 ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
37 ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
38 ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
39 ResultCode UnmapMemory(VAddr addr, std::size_t size);
40 ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
41 ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
42 ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
43 MemoryPermission perm);
44 ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
45 MemoryInfo QueryInfo(VAddr addr);
46 ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
47 ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
48 ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
49 MemoryAttribute value);
50 ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
51 ResultVal<VAddr> SetHeapSize(std::size_t size);
52 ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
53 bool is_map_only, VAddr region_start,
54 std::size_t region_num_pages, MemoryState state,
55 MemoryPermission perm, PAddr map_addr = 0);
56
57 Common::PageTable& PageTableImpl() {
58 return page_table_impl;
59 }
60
61 const Common::PageTable& PageTableImpl() const {
62 return page_table_impl;
63 }
64
65private:
66 enum class OperationType : u32 {
67 Map,
68 MapGroup,
69 Unmap,
70 ChangePermissions,
71 ChangePermissionsAndRefresh,
72 };
73
74 static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
75 MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
76
77 ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
78 ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
79 void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
80 bool IsRegionMapped(VAddr address, u64 size);
81 bool IsRegionContiguous(VAddr addr, u64 size) const;
82 void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
83 MemoryInfo QueryInfoImpl(VAddr addr);
84 VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
85 std::size_t align);
86 ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
87 OperationType operation);
88 ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
89 OperationType operation, PAddr map_addr = 0);
90 constexpr VAddr GetRegionAddress(MemoryState state) const;
91 constexpr std::size_t GetRegionSize(MemoryState state) const;
92 constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
93
94 constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
95 MemoryState state, MemoryPermission perm_mask,
96 MemoryPermission perm, MemoryAttribute attr_mask,
97 MemoryAttribute attr) const;
98 ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
99 MemoryAttribute* out_attr, VAddr addr, std::size_t size,
100 MemoryState state_mask, MemoryState state,
101 MemoryPermission perm_mask, MemoryPermission perm,
102 MemoryAttribute attr_mask, MemoryAttribute attr,
103 MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
104 ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
105 MemoryState state, MemoryPermission perm_mask,
106 MemoryPermission perm, MemoryAttribute attr_mask,
107 MemoryAttribute attr,
108 MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
109 return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
110 perm, attr_mask, attr, ignore_attr);
111 }
112
113 std::recursive_mutex page_table_lock;
114 std::unique_ptr<MemoryBlockManager> block_manager;
115
116public:
117 constexpr VAddr GetAddressSpaceStart() const {
118 return address_space_start;
119 }
120 constexpr VAddr GetAddressSpaceEnd() const {
121 return address_space_end;
122 }
123 constexpr std::size_t GetAddressSpaceSize() const {
124 return address_space_end - address_space_start;
125 }
126 constexpr VAddr GetHeapRegionStart() const {
127 return heap_region_start;
128 }
129 constexpr VAddr GetHeapRegionEnd() const {
130 return heap_region_end;
131 }
132 constexpr std::size_t GetHeapRegionSize() const {
133 return heap_region_end - heap_region_start;
134 }
135 constexpr VAddr GetAliasRegionStart() const {
136 return alias_region_start;
137 }
138 constexpr VAddr GetAliasRegionEnd() const {
139 return alias_region_end;
140 }
141 constexpr std::size_t GetAliasRegionSize() const {
142 return alias_region_end - alias_region_start;
143 }
144 constexpr VAddr GetStackRegionStart() const {
145 return stack_region_start;
146 }
147 constexpr VAddr GetStackRegionEnd() const {
148 return stack_region_end;
149 }
150 constexpr std::size_t GetStackRegionSize() const {
151 return stack_region_end - stack_region_start;
152 }
153 constexpr VAddr GetKernelMapRegionStart() const {
154 return kernel_map_region_start;
155 }
156 constexpr VAddr GetKernelMapRegionEnd() const {
157 return kernel_map_region_end;
158 }
159 constexpr VAddr GetCodeRegionStart() const {
160 return code_region_start;
161 }
162 constexpr VAddr GetCodeRegionEnd() const {
163 return code_region_end;
164 }
165 constexpr VAddr GetAliasCodeRegionStart() const {
166 return alias_code_region_start;
167 }
168 constexpr VAddr GetAliasCodeRegionSize() const {
169 return alias_code_region_end - alias_code_region_start;
170 }
171 constexpr std::size_t GetAddressSpaceWidth() const {
172 return address_space_width;
173 }
174 constexpr std::size_t GetHeapSize() {
175 return current_heap_addr - heap_region_start;
176 }
177 constexpr std::size_t GetTotalHeapSize() {
178 return GetHeapSize() + physical_memory_usage;
179 }
180 constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
181 return address_space_start <= address && address + size - 1 <= address_space_end - 1;
182 }
183 constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
184 return alias_region_start > address || address + size - 1 > alias_region_end - 1;
185 }
186 constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
187 return stack_region_start > address || address + size - 1 > stack_region_end - 1;
188 }
189 constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
190 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
191 }
192 constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
193 return address + size > heap_region_start && heap_region_end > address;
194 }
195 constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
196 return address + size > alias_region_start && alias_region_end > address;
197 }
198 constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
199 if (IsInvalidRegion(address, size)) {
200 return true;
201 }
202 if (IsInsideHeapRegion(address, size)) {
203 return true;
204 }
205 if (IsInsideAliasRegion(address, size)) {
206 return true;
207 }
208 return {};
209 }
210 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
211 return !IsOutsideASLRRegion(address, size);
212 }
213 constexpr PAddr GetPhysicalAddr(VAddr addr) {
214 return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
215 }
216
217private:
218 constexpr bool Contains(VAddr addr) const {
219 return address_space_start <= addr && addr <= address_space_end - 1;
220 }
221 constexpr bool Contains(VAddr addr, std::size_t size) const {
222 return address_space_start <= addr && addr < addr + size &&
223 addr + size - 1 <= address_space_end - 1;
224 }
225 constexpr bool IsKernel() const {
226 return is_kernel;
227 }
228 constexpr bool IsAslrEnabled() const {
229 return is_aslr_enabled;
230 }
231
232 constexpr std::size_t GetNumGuardPages() const {
233 return IsKernel() ? 1 : 4;
234 }
235
236 constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
237 return (address_space_start <= addr) &&
238 (num_pages <= (address_space_end - address_space_start) / PageSize) &&
239 (addr + num_pages * PageSize - 1 <= address_space_end - 1);
240 }
241
242private:
243 VAddr address_space_start{};
244 VAddr address_space_end{};
245 VAddr heap_region_start{};
246 VAddr heap_region_end{};
247 VAddr current_heap_end{};
248 VAddr alias_region_start{};
249 VAddr alias_region_end{};
250 VAddr stack_region_start{};
251 VAddr stack_region_end{};
252 VAddr kernel_map_region_start{};
253 VAddr kernel_map_region_end{};
254 VAddr code_region_start{};
255 VAddr code_region_end{};
256 VAddr alias_code_region_start{};
257 VAddr alias_code_region_end{};
258 VAddr current_heap_addr{};
259
260 std::size_t heap_capacity{};
261 std::size_t physical_memory_usage{};
262 std::size_t max_heap_size{};
263 std::size_t max_physical_memory_size{};
264 std::size_t address_space_width{};
265
266 bool is_kernel{};
267 bool is_aslr_enabled{};
268
269 MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
270
271 Common::PageTable page_table_impl;
272
273 Core::System& system;
274};
275
276} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/slab_heap.h b/src/core/hle/kernel/memory/slab_heap.h
new file mode 100644
index 000000000..049403e15
--- /dev/null
+++ b/src/core/hle/kernel/memory/slab_heap.h
@@ -0,0 +1,164 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphère, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
7
8#pragma once
9
10#include <atomic>
11
12#include "common/assert.h"
13#include "common/common_funcs.h"
14#include "common/common_types.h"
15
16namespace Kernel::Memory {
17
18namespace impl {
19
20class SlabHeapImpl final : NonCopyable {
21public:
22 struct Node {
23 Node* next{};
24 };
25
26 constexpr SlabHeapImpl() = default;
27
28 void Initialize(std::size_t size) {
29 ASSERT(head == nullptr);
30 obj_size = size;
31 }
32
33 constexpr std::size_t GetObjectSize() const {
34 return obj_size;
35 }
36
37 Node* GetHead() const {
38 return head;
39 }
40
41 void* Allocate() {
42 Node* ret = head.load();
43
44 do {
45 if (ret == nullptr) {
46 break;
47 }
48 } while (!head.compare_exchange_weak(ret, ret->next));
49
50 return ret;
51 }
52
53 void Free(void* obj) {
54 Node* node = reinterpret_cast<Node*>(obj);
55
56 Node* cur_head = head.load();
57 do {
58 node->next = cur_head;
59 } while (!head.compare_exchange_weak(cur_head, node));
60 }
61
62private:
63 std::atomic<Node*> head{};
64 std::size_t obj_size{};
65};
66
67} // namespace impl
68
69class SlabHeapBase : NonCopyable {
70public:
71 constexpr SlabHeapBase() = default;
72
73 constexpr bool Contains(uintptr_t addr) const {
74 return start <= addr && addr < end;
75 }
76
77 constexpr std::size_t GetSlabHeapSize() const {
78 return (end - start) / GetObjectSize();
79 }
80
81 constexpr std::size_t GetObjectSize() const {
82 return impl.GetObjectSize();
83 }
84
85 constexpr uintptr_t GetSlabHeapAddress() const {
86 return start;
87 }
88
89 std::size_t GetObjectIndexImpl(const void* obj) const {
90 return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
91 }
92
93 std::size_t GetPeakIndex() const {
94 return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
95 }
96
97 void* AllocateImpl() {
98 return impl.Allocate();
99 }
100
101 void FreeImpl(void* obj) {
102 // Don't allow freeing an object that wasn't allocated from this heap
103 ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
104 impl.Free(obj);
105 }
106
107 void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
108 // Ensure we don't initialize a slab using null memory
109 ASSERT(memory != nullptr);
110
111 // Initialize the base allocator
112 impl.Initialize(obj_size);
113
114 // Set our tracking variables
115 const std::size_t num_obj = (memory_size / obj_size);
116 start = reinterpret_cast<uintptr_t>(memory);
117 end = start + num_obj * obj_size;
118 peak = start;
119
120 // Free the objects
121 u8* cur = reinterpret_cast<u8*>(end);
122
123 for (std::size_t i{}; i < num_obj; i++) {
124 cur -= obj_size;
125 impl.Free(cur);
126 }
127 }
128
129private:
130 using Impl = impl::SlabHeapImpl;
131
132 Impl impl;
133 uintptr_t peak{};
134 uintptr_t start{};
135 uintptr_t end{};
136};
137
138template <typename T>
139class SlabHeap final : public SlabHeapBase {
140public:
141 constexpr SlabHeap() : SlabHeapBase() {}
142
143 void Initialize(void* memory, std::size_t memory_size) {
144 InitializeImpl(sizeof(T), memory, memory_size);
145 }
146
147 T* Allocate() {
148 T* obj = reinterpret_cast<T*>(AllocateImpl());
149 if (obj != nullptr) {
150 new (obj) T();
151 }
152 return obj;
153 }
154
155 void Free(T* obj) {
156 FreeImpl(obj);
157 }
158
159 constexpr std::size_t GetObjectIndex(const T* obj) const {
160 return GetObjectIndexImpl(obj);
161 }
162};
163
164} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/system_control.cpp b/src/core/hle/kernel/memory/system_control.cpp
new file mode 100644
index 000000000..9cae3c6cb
--- /dev/null
+++ b/src/core/hle/kernel/memory/system_control.cpp
@@ -0,0 +1,41 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <random>
8
9#include "core/hle/kernel/memory/system_control.h"
10
11namespace Kernel::Memory::SystemControl {
12
13u64 GenerateRandomU64ForInit() {
14 static std::random_device device;
15 static std::mt19937 gen(device());
16 static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
17 return distribution(gen);
18}
19
20template <typename F>
21u64 GenerateUniformRange(u64 min, u64 max, F f) {
22 /* Handle the case where the difference is too large to represent. */
23 if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
24 return f();
25 }
26
27 /* Iterate until we get a value in range. */
28 const u64 range_size = ((max + 1) - min);
29 const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
30 while (true) {
31 if (const u64 rnd = f(); rnd < effective_max) {
32 return min + (rnd % range_size);
33 }
34 }
35}
36
37u64 GenerateRandomRange(u64 min, u64 max) {
38 return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
39}
40
41} // namespace Kernel::Memory::SystemControl
diff --git a/src/core/hle/kernel/memory/system_control.h b/src/core/hle/kernel/memory/system_control.h
new file mode 100644
index 000000000..3fa93111d
--- /dev/null
+++ b/src/core/hle/kernel/memory/system_control.h
@@ -0,0 +1,18 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9namespace Kernel::Memory::SystemControl {
10
11u64 GenerateRandomU64ForInit();
12
13template <typename F>
14u64 GenerateUniformRange(u64 min, u64 max, F f);
15
16u64 GenerateRandomRange(u64 min, u64 max);
17
18} // namespace Kernel::Memory::SystemControl
diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h
index b689e8e8b..7a0266780 100644
--- a/src/core/hle/kernel/physical_memory.h
+++ b/src/core/hle/kernel/physical_memory.h
@@ -4,6 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <vector>
8
7#include "common/alignment.h" 9#include "common/alignment.h"
8 10
9namespace Kernel { 11namespace Kernel {
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index edc414d69..36724569f 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -10,15 +10,18 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "core/core.h" 12#include "core/core.h"
13#include "core/device_memory.h"
13#include "core/file_sys/program_metadata.h" 14#include "core/file_sys/program_metadata.h"
14#include "core/hle/kernel/code_set.h" 15#include "core/hle/kernel/code_set.h"
15#include "core/hle/kernel/errors.h" 16#include "core/hle/kernel/errors.h"
16#include "core/hle/kernel/kernel.h" 17#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/memory/memory_block_manager.h"
19#include "core/hle/kernel/memory/page_table.h"
20#include "core/hle/kernel/memory/slab_heap.h"
17#include "core/hle/kernel/process.h" 21#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/resource_limit.h" 22#include "core/hle/kernel/resource_limit.h"
19#include "core/hle/kernel/scheduler.h" 23#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/thread.h" 24#include "core/hle/kernel/thread.h"
21#include "core/hle/kernel/vm_manager.h"
22#include "core/memory.h" 25#include "core/memory.h"
23#include "core/settings.h" 26#include "core/settings.h"
24 27
@@ -31,10 +34,8 @@ namespace {
31 * @param kernel The kernel instance to create the main thread under. 34 * @param kernel The kernel instance to create the main thread under.
32 * @param priority The priority to give the main thread 35 * @param priority The priority to give the main thread
33 */ 36 */
34void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { 37void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) {
35 const auto& vm_manager = owner_process.VMManager(); 38 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
36 const VAddr entry_point = vm_manager.GetCodeRegionBaseAddress();
37 const VAddr stack_top = vm_manager.GetTLSIORegionEndAddress();
38 auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, 39 auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0,
39 owner_process.GetIdealCore(), stack_top, owner_process); 40 owner_process.GetIdealCore(), stack_top, owner_process);
40 41
@@ -42,6 +43,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
42 43
43 // Register 1 must be a handle to the main thread 44 // Register 1 must be a handle to the main thread
44 const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); 45 const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
46 thread->GetContext32().cpu_registers[0] = 0;
47 thread->GetContext64().cpu_registers[0] = 0;
45 thread->GetContext32().cpu_registers[1] = thread_handle; 48 thread->GetContext32().cpu_registers[1] = thread_handle;
46 thread->GetContext64().cpu_registers[1] = thread_handle; 49 thread->GetContext64().cpu_registers[1] = thread_handle;
47 50
@@ -57,7 +60,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
57// (whichever page happens to have an available slot). 60// (whichever page happens to have an available slot).
58class TLSPage { 61class TLSPage {
59public: 62public:
60 static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE; 63 static constexpr std::size_t num_slot_entries =
64 Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
61 65
62 explicit TLSPage(VAddr address) : base_address{address} {} 66 explicit TLSPage(VAddr address) : base_address{address} {}
63 67
@@ -76,7 +80,7 @@ public:
76 } 80 }
77 81
78 is_slot_used[i] = true; 82 is_slot_used[i] = true;
79 return base_address + (i * Memory::TLS_ENTRY_SIZE); 83 return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
80 } 84 }
81 85
82 return std::nullopt; 86 return std::nullopt;
@@ -86,15 +90,15 @@ public:
86 // Ensure that all given addresses are consistent with how TLS pages 90 // Ensure that all given addresses are consistent with how TLS pages
87 // are intended to be used when releasing slots. 91 // are intended to be used when releasing slots.
88 ASSERT(IsWithinPage(address)); 92 ASSERT(IsWithinPage(address));
89 ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0); 93 ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
90 94
91 const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE; 95 const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
92 is_slot_used[index] = false; 96 is_slot_used[index] = false;
93 } 97 }
94 98
95private: 99private:
96 bool IsWithinPage(VAddr address) const { 100 bool IsWithinPage(VAddr address) const {
97 return base_address <= address && address < base_address + Memory::PAGE_SIZE; 101 return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
98 } 102 }
99 103
100 VAddr base_address; 104 VAddr base_address;
@@ -106,7 +110,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
106 110
107 std::shared_ptr<Process> process = std::make_shared<Process>(system); 111 std::shared_ptr<Process> process = std::make_shared<Process>(system);
108 process->name = std::move(name); 112 process->name = std::move(name);
109 process->resource_limit = kernel.GetSystemResourceLimit(); 113 process->resource_limit = ResourceLimit::Create(kernel);
110 process->status = ProcessStatus::Created; 114 process->status = ProcessStatus::Created;
111 process->program_id = 0; 115 process->program_id = 0;
112 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() 116 process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
@@ -127,7 +131,14 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
127} 131}
128 132
129u64 Process::GetTotalPhysicalMemoryAvailable() const { 133u64 Process::GetTotalPhysicalMemoryAvailable() const {
130 return vm_manager.GetTotalPhysicalMemoryAvailable(); 134 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
135 page_table->GetTotalHeapSize() + image_size + main_thread_stack_size};
136
137 if (capacity < memory_usage_capacity) {
138 return capacity;
139 }
140
141 return memory_usage_capacity;
131} 142}
132 143
133u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { 144u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
@@ -135,8 +146,7 @@ u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
135} 146}
136 147
137u64 Process::GetTotalPhysicalMemoryUsed() const { 148u64 Process::GetTotalPhysicalMemoryUsed() const {
138 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size + 149 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize();
139 GetSystemResourceUsage();
140} 150}
141 151
142u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { 152u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
@@ -209,33 +219,82 @@ ResultCode Process::ClearSignalState() {
209 return RESULT_SUCCESS; 219 return RESULT_SUCCESS;
210} 220}
211 221
212ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { 222ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
223 std::size_t code_size) {
213 program_id = metadata.GetTitleID(); 224 program_id = metadata.GetTitleID();
214 ideal_core = metadata.GetMainThreadCore(); 225 ideal_core = metadata.GetMainThreadCore();
215 is_64bit_process = metadata.Is64BitProgram(); 226 is_64bit_process = metadata.Is64BitProgram();
216 system_resource_size = metadata.GetSystemResourceSize(); 227 system_resource_size = metadata.GetSystemResourceSize();
228 image_size = code_size;
229
230 // Initialize proces address space
231 if (const ResultCode result{
232 page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
233 code_size, Memory::MemoryManager::Pool::Application)};
234 result.IsError()) {
235 return result;
236 }
217 237
218 vm_manager.Reset(metadata.GetAddressSpaceType()); 238 // Map process code region
239 if (const ResultCode result{page_table->MapProcessCode(
240 page_table->GetCodeRegionStart(), code_size / Memory::PageSize,
241 Memory::MemoryState::Code, Memory::MemoryPermission::None)};
242 result.IsError()) {
243 return result;
244 }
219 245
220 const auto& caps = metadata.GetKernelCapabilities(); 246 // Initialize process capabilities
221 const auto capability_init_result = 247 const auto& caps{metadata.GetKernelCapabilities()};
222 capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); 248 if (const ResultCode result{
223 if (capability_init_result.IsError()) { 249 capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
224 return capability_init_result; 250 result.IsError()) {
251 return result;
225 } 252 }
226 253
254 // Set memory usage capacity
255 switch (metadata.GetAddressSpaceType()) {
256 case FileSys::ProgramAddressSpaceType::Is32Bit:
257 case FileSys::ProgramAddressSpaceType::Is36Bit:
258 case FileSys::ProgramAddressSpaceType::Is39Bit:
259 memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
260 break;
261
262 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
263 memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
264 page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
265 break;
266
267 default:
268 UNREACHABLE();
269 }
270
271 // Set initial resource limits
272 resource_limit->SetLimitValue(
273 ResourceType::PhysicalMemory,
274 kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
275 resource_limit->SetLimitValue(ResourceType::Threads, 608);
276 resource_limit->SetLimitValue(ResourceType::Events, 700);
277 resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
278 resource_limit->SetLimitValue(ResourceType::Sessions, 894);
279 ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
280
281 // Create TLS region
282 tls_region_address = CreateTLSRegion();
283
227 return handle_table.SetSize(capabilities.GetHandleTableSize()); 284 return handle_table.SetSize(capabilities.GetHandleTableSize());
228} 285}
229 286
230void Process::Run(s32 main_thread_priority, u64 stack_size) { 287void Process::Run(s32 main_thread_priority, u64 stack_size) {
231 AllocateMainThreadStack(stack_size); 288 AllocateMainThreadStack(stack_size);
232 tls_region_address = CreateTLSRegion();
233 289
234 vm_manager.LogLayout(); 290 const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
291 ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
235 292
236 ChangeStatus(ProcessStatus::Running); 293 ChangeStatus(ProcessStatus::Running);
237 294
238 SetupMainThread(*this, kernel, main_thread_priority); 295 SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top);
296 resource_limit->Reserve(ResourceType::Threads, 1);
297 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
239} 298}
240 299
241void Process::PrepareForTermination() { 300void Process::PrepareForTermination() {
@@ -279,32 +338,37 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
279} 338}
280 339
281VAddr Process::CreateTLSRegion() { 340VAddr Process::CreateTLSRegion() {
282 auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages); 341 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
342 tls_page_iter != tls_pages.cend()) {
343 return *tls_page_iter->ReserveSlot();
344 }
283 345
284 if (tls_page_iter == tls_pages.cend()) { 346 Memory::Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
285 const auto region_address = 347 ASSERT(tls_page_ptr);
286 vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(),
287 vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE);
288 ASSERT(region_address.Succeeded());
289 348
290 const auto map_result = vm_manager.MapMemoryBlock( 349 const VAddr start{page_table->GetKernelMapRegionStart()};
291 *region_address, std::make_shared<PhysicalMemory>(Memory::PAGE_SIZE), 0, 350 const VAddr size{page_table->GetKernelMapRegionEnd() - start};
292 Memory::PAGE_SIZE, MemoryState::ThreadLocal); 351 const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
293 ASSERT(map_result.Succeeded()); 352 const VAddr tls_page_addr{
353 page_table
354 ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
355 Memory::MemoryState::ThreadLocal,
356 Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
357 .ValueOr(0)};
294 358
295 tls_pages.emplace_back(*region_address); 359 ASSERT(tls_page_addr);
296 360
297 const auto reserve_result = tls_pages.back().ReserveSlot(); 361 std::memset(tls_page_ptr, 0, Memory::PageSize);
298 ASSERT(reserve_result.has_value()); 362 tls_pages.emplace_back(tls_page_addr);
299 363
300 return *reserve_result; 364 const auto reserve_result{tls_pages.back().ReserveSlot()};
301 } 365 ASSERT(reserve_result.has_value());
302 366
303 return *tls_page_iter->ReserveSlot(); 367 return *reserve_result;
304} 368}
305 369
306void Process::FreeTLSRegion(VAddr tls_address) { 370void Process::FreeTLSRegion(VAddr tls_address) {
307 const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE); 371 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
308 auto iter = 372 auto iter =
309 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { 373 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
310 return page.GetBaseAddress() == aligned_address; 374 return page.GetBaseAddress() == aligned_address;
@@ -317,28 +381,22 @@ void Process::FreeTLSRegion(VAddr tls_address) {
317 iter->ReleaseSlot(tls_address); 381 iter->ReleaseSlot(tls_address);
318} 382}
319 383
320void Process::LoadModule(CodeSet module_, VAddr base_addr) { 384void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
321 code_memory_size += module_.memory.size(); 385 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
322 386 Memory::MemoryPermission permission) {
323 const auto memory = std::make_shared<PhysicalMemory>(std::move(module_.memory)); 387 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
324
325 const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
326 MemoryState memory_state) {
327 const auto vma = vm_manager
328 .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset,
329 segment.size, memory_state)
330 .Unwrap();
331 vm_manager.Reprotect(vma, permissions);
332 }; 388 };
333 389
334 // Map CodeSet segments 390 system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size());
335 MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code); 391
336 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); 392 ReprotectSegment(code_set.CodeSegment(), Memory::MemoryPermission::ReadAndExecute);
337 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); 393 ReprotectSegment(code_set.RODataSegment(), Memory::MemoryPermission::Read);
394 ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
338} 395}
339 396
340Process::Process(Core::System& system) 397Process::Process(Core::System& system)
341 : SynchronizationObject{system.Kernel()}, vm_manager{system}, 398 : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>(
399 system)},
342 address_arbiter{system}, mutex{system}, system{system} {} 400 address_arbiter{system}, mutex{system}, system{system} {}
343 401
344Process::~Process() = default; 402Process::~Process() = default;
@@ -361,16 +419,24 @@ void Process::ChangeStatus(ProcessStatus new_status) {
361 Signal(); 419 Signal();
362} 420}
363 421
364void Process::AllocateMainThreadStack(u64 stack_size) { 422ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
423 ASSERT(stack_size);
424
365 // The kernel always ensures that the given stack size is page aligned. 425 // The kernel always ensures that the given stack size is page aligned.
366 main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); 426 main_thread_stack_size = Common::AlignUp(stack_size, Memory::PageSize);
367 427
368 // Allocate and map the main thread stack 428 const VAddr start{page_table->GetStackRegionStart()};
369 const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; 429 const std::size_t size{page_table->GetStackRegionEnd() - start};
370 vm_manager 430
371 .MapMemoryBlock(mapping_address, std::make_shared<PhysicalMemory>(main_thread_stack_size), 431 CASCADE_RESULT(main_thread_stack_top,
372 0, main_thread_stack_size, MemoryState::Stack) 432 page_table->AllocateAndMapMemory(
373 .Unwrap(); 433 main_thread_stack_size / Memory::PageSize, Memory::PageSize, false, start,
434 size / Memory::PageSize, Memory::MemoryState::Stack,
435 Memory::MemoryPermission::ReadAndWrite));
436
437 main_thread_stack_top += main_thread_stack_size;
438
439 return RESULT_SUCCESS;
374} 440}
375 441
376} // namespace Kernel 442} // namespace Kernel
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 4887132a7..9dabe3568 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -16,7 +16,6 @@
16#include "core/hle/kernel/mutex.h" 16#include "core/hle/kernel/mutex.h"
17#include "core/hle/kernel/process_capability.h" 17#include "core/hle/kernel/process_capability.h"
18#include "core/hle/kernel/synchronization_object.h" 18#include "core/hle/kernel/synchronization_object.h"
19#include "core/hle/kernel/vm_manager.h"
20#include "core/hle/result.h" 19#include "core/hle/result.h"
21 20
22namespace Core { 21namespace Core {
@@ -36,6 +35,10 @@ class TLSPage;
36 35
37struct CodeSet; 36struct CodeSet;
38 37
38namespace Memory {
39class PageTable;
40}
41
39enum class MemoryRegion : u16 { 42enum class MemoryRegion : u16 {
40 APPLICATION = 1, 43 APPLICATION = 1,
41 SYSTEM = 2, 44 SYSTEM = 2,
@@ -100,14 +103,14 @@ public:
100 return HANDLE_TYPE; 103 return HANDLE_TYPE;
101 } 104 }
102 105
103 /// Gets a reference to the process' memory manager. 106 /// Gets a reference to the process' page table.
104 Kernel::VMManager& VMManager() { 107 Memory::PageTable& PageTable() {
105 return vm_manager; 108 return *page_table;
106 } 109 }
107 110
108 /// Gets a const reference to the process' memory manager. 111 /// Gets const a reference to the process' page table.
109 const Kernel::VMManager& VMManager() const { 112 const Memory::PageTable& PageTable() const {
110 return vm_manager; 113 return *page_table;
111 } 114 }
112 115
113 /// Gets a reference to the process' handle table. 116 /// Gets a reference to the process' handle table.
@@ -273,7 +276,7 @@ public:
273 * @returns RESULT_SUCCESS if all relevant metadata was able to be 276 * @returns RESULT_SUCCESS if all relevant metadata was able to be
274 * loaded and parsed. Otherwise, an error code is returned. 277 * loaded and parsed. Otherwise, an error code is returned.
275 */ 278 */
276 ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata); 279 ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
277 280
278 /** 281 /**
279 * Starts the main application thread for this process. 282 * Starts the main application thread for this process.
@@ -289,7 +292,7 @@ public:
289 */ 292 */
290 void PrepareForTermination(); 293 void PrepareForTermination();
291 294
292 void LoadModule(CodeSet module_, VAddr base_addr); 295 void LoadModule(CodeSet code_set, VAddr base_addr);
293 296
294 /////////////////////////////////////////////////////////////////////////////////////////////// 297 ///////////////////////////////////////////////////////////////////////////////////////////////
295 // Thread-local storage management 298 // Thread-local storage management
@@ -313,16 +316,10 @@ private:
313 void ChangeStatus(ProcessStatus new_status); 316 void ChangeStatus(ProcessStatus new_status);
314 317
315 /// Allocates the main thread stack for the process, given the stack size in bytes. 318 /// Allocates the main thread stack for the process, given the stack size in bytes.
316 void AllocateMainThreadStack(u64 stack_size); 319 ResultCode AllocateMainThreadStack(std::size_t stack_size);
317
318 /// Memory manager for this process.
319 Kernel::VMManager vm_manager;
320
321 /// Size of the main thread's stack in bytes.
322 u64 main_thread_stack_size = 0;
323 320
324 /// Size of the loaded code memory in bytes. 321 /// Memory manager for this process
325 u64 code_memory_size = 0; 322 std::unique_ptr<Memory::PageTable> page_table;
326 323
327 /// Current status of the process 324 /// Current status of the process
328 ProcessStatus status{}; 325 ProcessStatus status{};
@@ -390,6 +387,18 @@ private:
390 387
391 /// Name of this process 388 /// Name of this process
392 std::string name; 389 std::string name;
390
391 /// Address of the top of the main thread's stack
392 VAddr main_thread_stack_top{};
393
394 /// Size of the main thread's stack
395 std::size_t main_thread_stack_size{};
396
397 /// Memory usage capacity for the process
398 std::size_t memory_usage_capacity{};
399
400 /// Process total image size
401 std::size_t image_size{};
393}; 402};
394 403
395} // namespace Kernel 404} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 583e35b79..48e5ae682 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -5,8 +5,8 @@
5#include "common/bit_util.h" 5#include "common/bit_util.h"
6#include "core/hle/kernel/errors.h" 6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h" 7#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/memory/page_table.h"
8#include "core/hle/kernel/process_capability.h" 9#include "core/hle/kernel/process_capability.h"
9#include "core/hle/kernel/vm_manager.h"
10 10
11namespace Kernel { 11namespace Kernel {
12namespace { 12namespace {
@@ -66,7 +66,7 @@ u32 GetFlagBitOffset(CapabilityType type) {
66 66
67ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, 67ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
68 std::size_t num_capabilities, 68 std::size_t num_capabilities,
69 VMManager& vm_manager) { 69 Memory::PageTable& page_table) {
70 Clear(); 70 Clear();
71 71
72 // Allow all cores and priorities. 72 // Allow all cores and priorities.
@@ -74,15 +74,15 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
74 priority_mask = 0xFFFFFFFFFFFFFFFF; 74 priority_mask = 0xFFFFFFFFFFFFFFFF;
75 kernel_version = PackedKernelVersion; 75 kernel_version = PackedKernelVersion;
76 76
77 return ParseCapabilities(capabilities, num_capabilities, vm_manager); 77 return ParseCapabilities(capabilities, num_capabilities, page_table);
78} 78}
79 79
80ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, 80ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
81 std::size_t num_capabilities, 81 std::size_t num_capabilities,
82 VMManager& vm_manager) { 82 Memory::PageTable& page_table) {
83 Clear(); 83 Clear();
84 84
85 return ParseCapabilities(capabilities, num_capabilities, vm_manager); 85 return ParseCapabilities(capabilities, num_capabilities, page_table);
86} 86}
87 87
88void ProcessCapabilities::InitializeForMetadatalessProcess() { 88void ProcessCapabilities::InitializeForMetadatalessProcess() {
@@ -105,7 +105,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
105 105
106ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, 106ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
107 std::size_t num_capabilities, 107 std::size_t num_capabilities,
108 VMManager& vm_manager) { 108 Memory::PageTable& page_table) {
109 u32 set_flags = 0; 109 u32 set_flags = 0;
110 u32 set_svc_bits = 0; 110 u32 set_svc_bits = 0;
111 111
@@ -127,13 +127,13 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
127 return ERR_INVALID_COMBINATION; 127 return ERR_INVALID_COMBINATION;
128 } 128 }
129 129
130 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, vm_manager); 130 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
131 if (result.IsError()) { 131 if (result.IsError()) {
132 return result; 132 return result;
133 } 133 }
134 } else { 134 } else {
135 const auto result = 135 const auto result =
136 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, vm_manager); 136 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
137 if (result.IsError()) { 137 if (result.IsError()) {
138 return result; 138 return result;
139 } 139 }
@@ -144,7 +144,7 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
144} 144}
145 145
146ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, 146ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
147 u32 flag, VMManager& vm_manager) { 147 u32 flag, Memory::PageTable& page_table) {
148 const auto type = GetCapabilityType(flag); 148 const auto type = GetCapabilityType(flag);
149 149
150 if (type == CapabilityType::Unset) { 150 if (type == CapabilityType::Unset) {
@@ -172,7 +172,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
172 case CapabilityType::Syscall: 172 case CapabilityType::Syscall:
173 return HandleSyscallFlags(set_svc_bits, flag); 173 return HandleSyscallFlags(set_svc_bits, flag);
174 case CapabilityType::MapIO: 174 case CapabilityType::MapIO:
175 return HandleMapIOFlags(flag, vm_manager); 175 return HandleMapIOFlags(flag, page_table);
176 case CapabilityType::Interrupt: 176 case CapabilityType::Interrupt:
177 return HandleInterruptFlags(flag); 177 return HandleInterruptFlags(flag);
178 case CapabilityType::ProgramType: 178 case CapabilityType::ProgramType:
@@ -269,12 +269,12 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
269} 269}
270 270
271ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, 271ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
272 VMManager& vm_manager) { 272 Memory::PageTable& page_table) {
273 // TODO(Lioncache): Implement once the memory manager can handle this. 273 // TODO(Lioncache): Implement once the memory manager can handle this.
274 return RESULT_SUCCESS; 274 return RESULT_SUCCESS;
275} 275}
276 276
277ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, VMManager& vm_manager) { 277ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) {
278 // TODO(Lioncache): Implement once the memory manager can handle this. 278 // TODO(Lioncache): Implement once the memory manager can handle this.
279 return RESULT_SUCCESS; 279 return RESULT_SUCCESS;
280} 280}
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
index 5cdd80747..ea9d12c16 100644
--- a/src/core/hle/kernel/process_capability.h
+++ b/src/core/hle/kernel/process_capability.h
@@ -12,7 +12,9 @@ union ResultCode;
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15class VMManager; 15namespace Memory {
16class PageTable;
17}
16 18
17/// The possible types of programs that may be indicated 19/// The possible types of programs that may be indicated
18/// by the program type capability descriptor. 20/// by the program type capability descriptor.
@@ -81,27 +83,27 @@ public:
81 /// 83 ///
82 /// @param capabilities The capabilities to parse 84 /// @param capabilities The capabilities to parse
83 /// @param num_capabilities The number of capabilities to parse. 85 /// @param num_capabilities The number of capabilities to parse.
84 /// @param vm_manager The memory manager to use for handling any mapping-related 86 /// @param page_table The memory manager to use for handling any mapping-related
85 /// operations (such as mapping IO memory, etc). 87 /// operations (such as mapping IO memory, etc).
86 /// 88 ///
87 /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, 89 /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized,
88 /// otherwise, an error code upon failure. 90 /// otherwise, an error code upon failure.
89 /// 91 ///
90 ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, 92 ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
91 VMManager& vm_manager); 93 Memory::PageTable& page_table);
92 94
93 /// Initializes this process capabilities instance for a userland process. 95 /// Initializes this process capabilities instance for a userland process.
94 /// 96 ///
95 /// @param capabilities The capabilities to parse. 97 /// @param capabilities The capabilities to parse.
96 /// @param num_capabilities The total number of capabilities to parse. 98 /// @param num_capabilities The total number of capabilities to parse.
97 /// @param vm_manager The memory manager to use for handling any mapping-related 99 /// @param page_table The memory manager to use for handling any mapping-related
98 /// operations (such as mapping IO memory, etc). 100 /// operations (such as mapping IO memory, etc).
99 /// 101 ///
100 /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, 102 /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized,
101 /// otherwise, an error code upon failure. 103 /// otherwise, an error code upon failure.
102 /// 104 ///
103 ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, 105 ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
104 VMManager& vm_manager); 106 Memory::PageTable& page_table);
105 107
106 /// Initializes this process capabilities instance for a process that does not 108 /// Initializes this process capabilities instance for a process that does not
107 /// have any metadata to parse. 109 /// have any metadata to parse.
@@ -181,13 +183,13 @@ private:
181 /// 183 ///
182 /// @param capabilities The sequence of capability descriptors to parse. 184 /// @param capabilities The sequence of capability descriptors to parse.
183 /// @param num_capabilities The number of descriptors within the given sequence. 185 /// @param num_capabilities The number of descriptors within the given sequence.
184 /// @param vm_manager The memory manager that will perform any memory 186 /// @param page_table The memory manager that will perform any memory
185 /// mapping if necessary. 187 /// mapping if necessary.
186 /// 188 ///
187 /// @return RESULT_SUCCESS if no errors occur, otherwise an error code. 189 /// @return RESULT_SUCCESS if no errors occur, otherwise an error code.
188 /// 190 ///
189 ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, 191 ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
190 VMManager& vm_manager); 192 Memory::PageTable& page_table);
191 193
192 /// Attempts to parse a capability descriptor that is only represented by a 194 /// Attempts to parse a capability descriptor that is only represented by a
193 /// single flag set. 195 /// single flag set.
@@ -196,13 +198,13 @@ private:
196 /// flags being initialized more than once when they shouldn't be. 198 /// flags being initialized more than once when they shouldn't be.
197 /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask. 199 /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
198 /// @param flag The flag to attempt to parse. 200 /// @param flag The flag to attempt to parse.
199 /// @param vm_manager The memory manager that will perform any memory 201 /// @param page_table The memory manager that will perform any memory
200 /// mapping if necessary. 202 /// mapping if necessary.
201 /// 203 ///
202 /// @return RESULT_SUCCESS if no errors occurred, otherwise an error code. 204 /// @return RESULT_SUCCESS if no errors occurred, otherwise an error code.
203 /// 205 ///
204 ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, 206 ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
205 VMManager& vm_manager); 207 Memory::PageTable& page_table);
206 208
207 /// Clears the internal state of this process capability instance. Necessary, 209 /// Clears the internal state of this process capability instance. Necessary,
208 /// to have a sane starting point due to us allowing running executables without 210 /// to have a sane starting point due to us allowing running executables without
@@ -226,10 +228,10 @@ private:
226 ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags); 228 ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
227 229
228 /// Handles flags related to mapping physical memory pages. 230 /// Handles flags related to mapping physical memory pages.
229 ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, VMManager& vm_manager); 231 ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table);
230 232
231 /// Handles flags related to mapping IO pages. 233 /// Handles flags related to mapping IO pages.
232 ResultCode HandleMapIOFlags(u32 flags, VMManager& vm_manager); 234 ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table);
233 235
234 /// Handles flags related to the interrupt capability flags. 236 /// Handles flags related to the interrupt capability flags.
235 ResultCode HandleInterruptFlags(u32 flags); 237 ResultCode HandleInterruptFlags(u32 flags);
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index b53423462..96e5b9892 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -16,26 +16,60 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} 16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
17ResourceLimit::~ResourceLimit() = default; 17ResourceLimit::~ResourceLimit() = default;
18 18
19bool ResourceLimit::Reserve(ResourceType resource, s64 amount) {
20 return Reserve(resource, amount, 10000000000);
21}
22
23bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
24 const std::size_t index{ResourceTypeToIndex(resource)};
25
26 s64 new_value = current[index] + amount;
27 while (new_value > limit[index] && available[index] + amount <= limit[index]) {
28 // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
29 new_value = current[index] + amount;
30
31 if (timeout >= 0) {
32 break;
33 }
34 }
35
36 if (new_value <= limit[index]) {
37 current[index] = new_value;
38 return true;
39 }
40 return false;
41}
42
43void ResourceLimit::Release(ResourceType resource, u64 amount) {
44 Release(resource, amount, amount);
45}
46
47void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
48 const std::size_t index{ResourceTypeToIndex(resource)};
49
50 current[index] -= used_amount;
51 available[index] -= available_amount;
52}
53
19std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { 54std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
20 return std::make_shared<ResourceLimit>(kernel); 55 return std::make_shared<ResourceLimit>(kernel);
21} 56}
22 57
23s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { 58s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
24 return values.at(ResourceTypeToIndex(resource)); 59 return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource));
25} 60}
26 61
27s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const { 62s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
28 return limits.at(ResourceTypeToIndex(resource)); 63 return limit.at(ResourceTypeToIndex(resource));
29} 64}
30 65
31ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) { 66ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
32 const auto index = ResourceTypeToIndex(resource); 67 const std::size_t index{ResourceTypeToIndex(resource)};
33 68 if (current[index] <= value) {
34 if (value < values[index]) { 69 limit[index] = value;
70 return RESULT_SUCCESS;
71 } else {
35 return ERR_INVALID_STATE; 72 return ERR_INVALID_STATE;
36 } 73 }
37
38 values[index] = value;
39 return RESULT_SUCCESS;
40} 74}
41} // namespace Kernel 75} // namespace Kernel
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 53b89e621..936cc4d0f 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -51,6 +51,11 @@ public:
51 return HANDLE_TYPE; 51 return HANDLE_TYPE;
52 } 52 }
53 53
54 bool Reserve(ResourceType resource, s64 amount);
55 bool Reserve(ResourceType resource, s64 amount, u64 timeout);
56 void Release(ResourceType resource, u64 amount);
57 void Release(ResourceType resource, u64 used_amount, u64 available_amount);
58
54 /** 59 /**
55 * Gets the current value for the specified resource. 60 * Gets the current value for the specified resource.
56 * @param resource Requested resource type 61 * @param resource Requested resource type
@@ -91,10 +96,9 @@ private:
91 using ResourceArray = 96 using ResourceArray =
92 std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>; 97 std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>;
93 98
94 /// Maximum values a resource type may reach. 99 ResourceArray limit{};
95 ResourceArray limits{}; 100 ResourceArray current{};
96 /// Current resource limit values. 101 ResourceArray available{};
97 ResourceArray values{};
98}; 102};
99 103
100} // namespace Kernel 104} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 4604e35c5..0f102ca44 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -134,7 +134,8 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
134 return RESULT_SUCCESS; 134 return RESULT_SUCCESS;
135} 135}
136 136
137ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { 137ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
138 Core::Memory::Memory& memory) {
138 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; 139 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
139 std::shared_ptr<Kernel::HLERequestContext> context{ 140 std::shared_ptr<Kernel::HLERequestContext> context{
140 std::make_shared<Kernel::HLERequestContext>(SharedFrom(this), std::move(thread))}; 141 std::make_shared<Kernel::HLERequestContext>(SharedFrom(this), std::move(thread))};
@@ -178,7 +179,7 @@ ResultCode ServerSession::CompleteSyncRequest() {
178} 179}
179 180
180ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, 181ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
181 Memory::Memory& memory) { 182 Core::Memory::Memory& memory) {
182 Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); 183 Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {});
183 return QueueSyncRequest(std::move(thread), memory); 184 return QueueSyncRequest(std::move(thread), memory);
184} 185}
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index 77e4f6721..403aaf10b 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -13,7 +13,7 @@
13#include "core/hle/kernel/synchronization_object.h" 13#include "core/hle/kernel/synchronization_object.h"
14#include "core/hle/result.h" 14#include "core/hle/result.h"
15 15
16namespace Memory { 16namespace Core::Memory {
17class Memory; 17class Memory;
18} 18}
19 19
@@ -92,7 +92,7 @@ public:
92 * 92 *
93 * @returns ResultCode from the operation. 93 * @returns ResultCode from the operation.
94 */ 94 */
95 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); 95 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
96 96
97 bool ShouldWait(const Thread* thread) const override; 97 bool ShouldWait(const Thread* thread) const override;
98 98
@@ -126,7 +126,7 @@ public:
126 126
127private: 127private:
128 /// Queues a sync request from the emulated application. 128 /// Queues a sync request from the emulated application.
129 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); 129 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
130 130
131 /// Completes a sync request from the emulated application. 131 /// Completes a sync request from the emulated application.
132 ResultCode CompleteSyncRequest(); 132 ResultCode CompleteSyncRequest();
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index afb2e3fc2..c67696757 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -2,149 +2,56 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <utility>
6
7#include "common/assert.h" 5#include "common/assert.h"
8#include "common/logging/log.h" 6#include "core/core.h"
9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/kernel.h" 7#include "core/hle/kernel/kernel.h"
8#include "core/hle/kernel/memory/page_table.h"
11#include "core/hle/kernel/shared_memory.h" 9#include "core/hle/kernel/shared_memory.h"
12 10
13namespace Kernel { 11namespace Kernel {
14 12
15SharedMemory::SharedMemory(KernelCore& kernel) : Object{kernel} {} 13SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
16SharedMemory::~SharedMemory() = default; 14 : Object{kernel}, device_memory{device_memory} {}
17
18std::shared_ptr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process,
19 u64 size, MemoryPermission permissions,
20 MemoryPermission other_permissions,
21 VAddr address, MemoryRegion region,
22 std::string name) {
23 std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel);
24
25 shared_memory->owner_process = owner_process;
26 shared_memory->name = std::move(name);
27 shared_memory->size = size;
28 shared_memory->permissions = permissions;
29 shared_memory->other_permissions = other_permissions;
30
31 if (address == 0) {
32 shared_memory->backing_block = std::make_shared<Kernel::PhysicalMemory>(size);
33 shared_memory->backing_block_offset = 0;
34
35 // Refresh the address mappings for the current process.
36 if (kernel.CurrentProcess() != nullptr) {
37 kernel.CurrentProcess()->VMManager().RefreshMemoryBlockMappings(
38 shared_memory->backing_block.get());
39 }
40 } else {
41 const auto& vm_manager = shared_memory->owner_process->VMManager();
42 15
43 // The memory is already available and mapped in the owner process. 16SharedMemory::~SharedMemory() = default;
44 const auto vma = vm_manager.FindVMA(address);
45 ASSERT_MSG(vm_manager.IsValidHandle(vma), "Invalid memory address");
46 ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
47
48 // The returned VMA might be a bigger one encompassing the desired address.
49 const auto vma_offset = address - vma->first;
50 ASSERT_MSG(vma_offset + size <= vma->second.size,
51 "Shared memory exceeds bounds of mapped block");
52
53 shared_memory->backing_block = vma->second.backing_block;
54 shared_memory->backing_block_offset = vma->second.offset + vma_offset;
55 }
56
57 shared_memory->base_address = address;
58 17
59 return shared_memory; 18std::shared_ptr<SharedMemory> SharedMemory::Create(
60} 19 KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
20 Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
21 Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
22 std::string name) {
61 23
62std::shared_ptr<SharedMemory> SharedMemory::CreateForApplet( 24 std::shared_ptr<SharedMemory> shared_memory{
63 KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, 25 std::make_shared<SharedMemory>(kernel, device_memory)};
64 u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) {
65 std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel);
66 26
67 shared_memory->owner_process = nullptr; 27 shared_memory->owner_process = owner_process;
68 shared_memory->name = std::move(name); 28 shared_memory->page_list = std::move(page_list);
29 shared_memory->owner_permission = owner_permission;
30 shared_memory->user_permission = user_permission;
31 shared_memory->physical_address = physical_address;
69 shared_memory->size = size; 32 shared_memory->size = size;
70 shared_memory->permissions = permissions; 33 shared_memory->name = name;
71 shared_memory->other_permissions = other_permissions;
72 shared_memory->backing_block = std::move(heap_block);
73 shared_memory->backing_block_offset = offset;
74 shared_memory->base_address =
75 kernel.CurrentProcess()->VMManager().GetHeapRegionBaseAddress() + offset;
76 34
77 return shared_memory; 35 return shared_memory;
78} 36}
79 37
80ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions, 38ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
81 MemoryPermission other_permissions) { 39 Memory::MemoryPermission permission) {
82 const MemoryPermission own_other_permissions = 40 const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize};
83 &target_process == owner_process ? this->permissions : this->other_permissions;
84
85 // Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
86 if (base_address == 0 && other_permissions != MemoryPermission::DontCare) {
87 return ERR_INVALID_MEMORY_PERMISSIONS;
88 }
89
90 // Error out if the requested permissions don't match what the creator process allows.
91 if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
92 LOG_ERROR(Kernel, "cannot map id={}, address=0x{:X} name={}, permissions don't match",
93 GetObjectId(), address, name);
94 return ERR_INVALID_MEMORY_PERMISSIONS;
95 }
96 41
97 // Error out if the provided permissions are not compatible with what the creator process needs. 42 if (page_list.GetNumPages() != page_count) {
98 if (other_permissions != MemoryPermission::DontCare && 43 UNIMPLEMENTED_MSG("Page count does not match");
99 static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) {
100 LOG_ERROR(Kernel, "cannot map id={}, address=0x{:X} name={}, permissions don't match",
101 GetObjectId(), address, name);
102 return ERR_INVALID_MEMORY_PERMISSIONS;
103 } 44 }
104 45
105 VAddr target_address = address; 46 Memory::MemoryPermission expected =
47 &target_process == owner_process ? owner_permission : user_permission;
106 48
107 // Map the memory block into the target process 49 if (permission != expected) {
108 auto result = target_process.VMManager().MapMemoryBlock( 50 UNIMPLEMENTED_MSG("Permission does not match");
109 target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
110 if (result.Failed()) {
111 LOG_ERROR(
112 Kernel,
113 "cannot map id={}, target_address=0x{:X} name={}, error mapping to virtual memory",
114 GetObjectId(), target_address, name);
115 return result.Code();
116 } 51 }
117 52
118 return target_process.VMManager().ReprotectRange(target_address, size, 53 return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared,
119 ConvertPermissions(permissions)); 54 permission);
120}
121
122ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) {
123 if (unmap_size != size) {
124 LOG_ERROR(Kernel,
125 "Invalid size passed to Unmap. Size must be equal to the size of the "
126 "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}",
127 size, unmap_size);
128 return ERR_INVALID_SIZE;
129 }
130
131 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not
132 // mapped to a SharedMemory.
133 return target_process.VMManager().UnmapRange(address, size);
134}
135
136VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) {
137 u32 masked_permissions =
138 static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
139 return static_cast<VMAPermission>(masked_permissions);
140}
141
142u8* SharedMemory::GetPointer(std::size_t offset) {
143 return backing_block->data() + backing_block_offset + offset;
144}
145
146const u8* SharedMemory::GetPointer(std::size_t offset) const {
147 return backing_block->data() + backing_block_offset + offset;
148} 55}
149 56
150} // namespace Kernel 57} // namespace Kernel
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 014951d82..cd16d6412 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -8,8 +8,10 @@
8#include <string> 8#include <string>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/device_memory.h"
12#include "core/hle/kernel/memory/memory_block.h"
13#include "core/hle/kernel/memory/page_linked_list.h"
11#include "core/hle/kernel/object.h" 14#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/physical_memory.h"
13#include "core/hle/kernel/process.h" 15#include "core/hle/kernel/process.h"
14#include "core/hle/result.h" 16#include "core/hle/result.h"
15 17
@@ -17,63 +19,21 @@ namespace Kernel {
17 19
18class KernelCore; 20class KernelCore;
19 21
20/// Permissions for mapped shared memory blocks
21enum class MemoryPermission : u32 {
22 None = 0,
23 Read = (1u << 0),
24 Write = (1u << 1),
25 ReadWrite = (Read | Write),
26 Execute = (1u << 2),
27 ReadExecute = (Read | Execute),
28 WriteExecute = (Write | Execute),
29 ReadWriteExecute = (Read | Write | Execute),
30 DontCare = (1u << 28)
31};
32
33class SharedMemory final : public Object { 22class SharedMemory final : public Object {
34public: 23public:
35 explicit SharedMemory(KernelCore& kernel); 24 explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
36 ~SharedMemory() override; 25 ~SharedMemory() override;
37 26
38 /** 27 static std::shared_ptr<SharedMemory> Create(
39 * Creates a shared memory object. 28 KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
40 * @param kernel The kernel instance to create a shared memory instance under. 29 Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
41 * @param owner_process Process that created this shared memory object. 30 Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
42 * @param size Size of the memory block. Must be page-aligned. 31 std::string name);
43 * @param permissions Permission restrictions applied to the process which created the block.
44 * @param other_permissions Permission restrictions applied to other processes mapping the
45 * block.
46 * @param address The address from which to map the Shared Memory.
47 * @param region If the address is 0, the shared memory will be allocated in this region of the
48 * linear heap.
49 * @param name Optional object name, used for debugging purposes.
50 */
51 static std::shared_ptr<SharedMemory> Create(KernelCore& kernel, Process* owner_process,
52 u64 size, MemoryPermission permissions,
53 MemoryPermission other_permissions,
54 VAddr address = 0,
55 MemoryRegion region = MemoryRegion::BASE,
56 std::string name = "Unknown");
57
58 /**
59 * Creates a shared memory object from a block of memory managed by an HLE applet.
60 * @param kernel The kernel instance to create a shared memory instance under.
61 * @param heap_block Heap block of the HLE applet.
62 * @param offset The offset into the heap block that the SharedMemory will map.
63 * @param size Size of the memory block. Must be page-aligned.
64 * @param permissions Permission restrictions applied to the process which created the block.
65 * @param other_permissions Permission restrictions applied to other processes mapping the
66 * block.
67 * @param name Optional object name, used for debugging purposes.
68 */
69 static std::shared_ptr<SharedMemory> CreateForApplet(
70 KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset,
71 u64 size, MemoryPermission permissions, MemoryPermission other_permissions,
72 std::string name = "Unknown Applet");
73 32
74 std::string GetTypeName() const override { 33 std::string GetTypeName() const override {
75 return "SharedMemory"; 34 return "SharedMemory";
76 } 35 }
36
77 std::string GetName() const override { 37 std::string GetName() const override {
78 return name; 38 return name;
79 } 39 }
@@ -83,71 +43,42 @@ public:
83 return HANDLE_TYPE; 43 return HANDLE_TYPE;
84 } 44 }
85 45
86 /// Gets the size of the underlying memory block in bytes.
87 u64 GetSize() const {
88 return size;
89 }
90
91 /**
92 * Converts the specified MemoryPermission into the equivalent VMAPermission.
93 * @param permission The MemoryPermission to convert.
94 */
95 static VMAPermission ConvertPermissions(MemoryPermission permission);
96
97 /** 46 /**
98 * Maps a shared memory block to an address in the target process' address space 47 * Maps a shared memory block to an address in the target process' address space
99 * @param target_process Process on which to map the memory block. 48 * @param target_process Process on which to map the memory block
100 * @param address Address in system memory to map shared memory block to 49 * @param address Address in system memory to map shared memory block to
50 * @param size Size of the shared memory block to map
101 * @param permissions Memory block map permissions (specified by SVC field) 51 * @param permissions Memory block map permissions (specified by SVC field)
102 * @param other_permissions Memory block map other permissions (specified by SVC field)
103 */
104 ResultCode Map(Process& target_process, VAddr address, MemoryPermission permissions,
105 MemoryPermission other_permissions);
106
107 /**
108 * Unmaps a shared memory block from the specified address in system memory
109 *
110 * @param target_process Process from which to unmap the memory block.
111 * @param address Address in system memory where the shared memory block is mapped.
112 * @param unmap_size The amount of bytes to unmap from this shared memory instance.
113 *
114 * @return Result code of the unmap operation
115 *
116 * @pre The given size to unmap must be the same size as the amount of memory managed by
117 * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned.
118 */ 52 */
119 ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size); 53 ResultCode Map(Process& target_process, VAddr address, std::size_t size,
54 Memory::MemoryPermission permission);
120 55
121 /** 56 /**
122 * Gets a pointer to the shared memory block 57 * Gets a pointer to the shared memory block
123 * @param offset Offset from the start of the shared memory block to get pointer 58 * @param offset Offset from the start of the shared memory block to get pointer
124 * @return A pointer to the shared memory block from the specified offset 59 * @return A pointer to the shared memory block from the specified offset
125 */ 60 */
126 u8* GetPointer(std::size_t offset = 0); 61 u8* GetPointer(std::size_t offset = 0) {
62 return device_memory.GetPointer(physical_address + offset);
63 }
127 64
128 /** 65 /**
129 * Gets a constant pointer to the shared memory block 66 * Gets a pointer to the shared memory block
130 * @param offset Offset from the start of the shared memory block to get pointer 67 * @param offset Offset from the start of the shared memory block to get pointer
131 * @return A constant pointer to the shared memory block from the specified offset 68 * @return A pointer to the shared memory block from the specified offset
132 */ 69 */
133 const u8* GetPointer(std::size_t offset = 0) const; 70 const u8* GetPointer(std::size_t offset = 0) const {
71 return device_memory.GetPointer(physical_address + offset);
72 }
134 73
135private: 74private:
136 /// Backing memory for this shared memory block. 75 Core::DeviceMemory& device_memory;
137 std::shared_ptr<PhysicalMemory> backing_block; 76 Process* owner_process{};
138 /// Offset into the backing block for this shared memory. 77 Memory::PageLinkedList page_list;
139 std::size_t backing_block_offset = 0; 78 Memory::MemoryPermission owner_permission{};
140 /// Size of the memory block. Page-aligned. 79 Memory::MemoryPermission user_permission{};
141 u64 size = 0; 80 PAddr physical_address{};
142 /// Permission restrictions applied to the process which created the block. 81 std::size_t size{};
143 MemoryPermission permissions{};
144 /// Permission restrictions applied to other processes mapping the block.
145 MemoryPermission other_permissions{};
146 /// Process that created this shared memory block.
147 Process* owner_process;
148 /// Address of shared memory block in the owner process if specified.
149 VAddr base_address = 0;
150 /// Name of shared memory object.
151 std::string name; 82 std::string name;
152}; 83};
153 84
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4ffc113c2..4134acf65 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -24,6 +24,8 @@
24#include "core/hle/kernel/errors.h" 24#include "core/hle/kernel/errors.h"
25#include "core/hle/kernel/handle_table.h" 25#include "core/hle/kernel/handle_table.h"
26#include "core/hle/kernel/kernel.h" 26#include "core/hle/kernel/kernel.h"
27#include "core/hle/kernel/memory/memory_block.h"
28#include "core/hle/kernel/memory/page_table.h"
27#include "core/hle/kernel/mutex.h" 29#include "core/hle/kernel/mutex.h"
28#include "core/hle/kernel/process.h" 30#include "core/hle/kernel/process.h"
29#include "core/hle/kernel/readable_event.h" 31#include "core/hle/kernel/readable_event.h"
@@ -31,6 +33,7 @@
31#include "core/hle/kernel/scheduler.h" 33#include "core/hle/kernel/scheduler.h"
32#include "core/hle/kernel/shared_memory.h" 34#include "core/hle/kernel/shared_memory.h"
33#include "core/hle/kernel/svc.h" 35#include "core/hle/kernel/svc.h"
36#include "core/hle/kernel/svc_types.h"
34#include "core/hle/kernel/svc_wrap.h" 37#include "core/hle/kernel/svc_wrap.h"
35#include "core/hle/kernel/synchronization.h" 38#include "core/hle/kernel/synchronization.h"
36#include "core/hle/kernel/thread.h" 39#include "core/hle/kernel/thread.h"
@@ -42,7 +45,7 @@
42#include "core/memory.h" 45#include "core/memory.h"
43#include "core/reporter.h" 46#include "core/reporter.h"
44 47
45namespace Kernel { 48namespace Kernel::Svc {
46namespace { 49namespace {
47 50
48// Checks if address + size is greater than the given address 51// Checks if address + size is greater than the given address
@@ -58,8 +61,8 @@ constexpr u64 MAIN_MEMORY_SIZE = 0x200000000;
58// Helper function that performs the common sanity checks for svcMapMemory 61// Helper function that performs the common sanity checks for svcMapMemory
59// and svcUnmapMemory. This is doable, as both functions perform their sanitizing 62// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
60// in the same order. 63// in the same order.
61ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_addr, VAddr src_addr, 64ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr dst_addr,
62 u64 size) { 65 VAddr src_addr, u64 size) {
63 if (!Common::Is4KBAligned(dst_addr)) { 66 if (!Common::Is4KBAligned(dst_addr)) {
64 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); 67 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
65 return ERR_INVALID_ADDRESS; 68 return ERR_INVALID_ADDRESS;
@@ -93,36 +96,33 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
93 return ERR_INVALID_ADDRESS_STATE; 96 return ERR_INVALID_ADDRESS_STATE;
94 } 97 }
95 98
96 if (!vm_manager.IsWithinAddressSpace(src_addr, size)) { 99 if (!manager.IsInsideAddressSpace(src_addr, size)) {
97 LOG_ERROR(Kernel_SVC, 100 LOG_ERROR(Kernel_SVC,
98 "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", 101 "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
99 src_addr, size); 102 src_addr, size);
100 return ERR_INVALID_ADDRESS_STATE; 103 return ERR_INVALID_ADDRESS_STATE;
101 } 104 }
102 105
103 if (!vm_manager.IsWithinStackRegion(dst_addr, size)) { 106 if (manager.IsOutsideStackRegion(dst_addr, size)) {
104 LOG_ERROR(Kernel_SVC, 107 LOG_ERROR(Kernel_SVC,
105 "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", 108 "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
106 dst_addr, size); 109 dst_addr, size);
107 return ERR_INVALID_MEMORY_RANGE; 110 return ERR_INVALID_MEMORY_RANGE;
108 } 111 }
109 112
110 const VAddr dst_end_address = dst_addr + size; 113 if (manager.IsInsideHeapRegion(dst_addr, size)) {
111 if (dst_end_address > vm_manager.GetHeapRegionBaseAddress() &&
112 vm_manager.GetHeapRegionEndAddress() > dst_addr) {
113 LOG_ERROR(Kernel_SVC, 114 LOG_ERROR(Kernel_SVC,
114 "Destination does not fit within the heap region, addr=0x{:016X}, " 115 "Destination does not fit within the heap region, addr=0x{:016X}, "
115 "size=0x{:016X}, end_addr=0x{:016X}", 116 "size=0x{:016X}",
116 dst_addr, size, dst_end_address); 117 dst_addr, size);
117 return ERR_INVALID_MEMORY_RANGE; 118 return ERR_INVALID_MEMORY_RANGE;
118 } 119 }
119 120
120 if (dst_end_address > vm_manager.GetMapRegionBaseAddress() && 121 if (manager.IsInsideAliasRegion(dst_addr, size)) {
121 vm_manager.GetMapRegionEndAddress() > dst_addr) {
122 LOG_ERROR(Kernel_SVC, 122 LOG_ERROR(Kernel_SVC,
123 "Destination does not fit within the map region, addr=0x{:016X}, " 123 "Destination does not fit within the map region, addr=0x{:016X}, "
124 "size=0x{:016X}, end_addr=0x{:016X}", 124 "size=0x{:016X}",
125 dst_addr, size, dst_end_address); 125 dst_addr, size);
126 return ERR_INVALID_MEMORY_RANGE; 126 return ERR_INVALID_MEMORY_RANGE;
127 } 127 }
128 128
@@ -177,13 +177,10 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_s
177 return ERR_INVALID_SIZE; 177 return ERR_INVALID_SIZE;
178 } 178 }
179 179
180 auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); 180 auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
181 const auto alloc_result = vm_manager.SetHeapSize(heap_size); 181
182 if (alloc_result.Failed()) { 182 CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size));
183 return alloc_result.Code();
184 }
185 183
186 *heap_addr = *alloc_result;
187 return RESULT_SUCCESS; 184 return RESULT_SUCCESS;
188} 185}
189 186
@@ -194,63 +191,6 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s
194 return result; 191 return result;
195} 192}
196 193
197static ResultCode SetMemoryPermission(Core::System& system, VAddr addr, u64 size, u32 prot) {
198 LOG_TRACE(Kernel_SVC, "called, addr=0x{:X}, size=0x{:X}, prot=0x{:X}", addr, size, prot);
199
200 if (!Common::Is4KBAligned(addr)) {
201 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
202 return ERR_INVALID_ADDRESS;
203 }
204
205 if (size == 0) {
206 LOG_ERROR(Kernel_SVC, "Size is 0");
207 return ERR_INVALID_SIZE;
208 }
209
210 if (!Common::Is4KBAligned(size)) {
211 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
212 return ERR_INVALID_SIZE;
213 }
214
215 if (!IsValidAddressRange(addr, size)) {
216 LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
217 addr, size);
218 return ERR_INVALID_ADDRESS_STATE;
219 }
220
221 const auto permission = static_cast<MemoryPermission>(prot);
222 if (permission != MemoryPermission::None && permission != MemoryPermission::Read &&
223 permission != MemoryPermission::ReadWrite) {
224 LOG_ERROR(Kernel_SVC, "Invalid memory permission specified, Got memory permission=0x{:08X}",
225 static_cast<u32>(permission));
226 return ERR_INVALID_MEMORY_PERMISSIONS;
227 }
228
229 auto* const current_process = system.Kernel().CurrentProcess();
230 auto& vm_manager = current_process->VMManager();
231
232 if (!vm_manager.IsWithinAddressSpace(addr, size)) {
233 LOG_ERROR(Kernel_SVC,
234 "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
235 size);
236 return ERR_INVALID_ADDRESS_STATE;
237 }
238
239 const VMManager::VMAHandle iter = vm_manager.FindVMA(addr);
240 if (!vm_manager.IsValidHandle(iter)) {
241 LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr);
242 return ERR_INVALID_ADDRESS_STATE;
243 }
244
245 LOG_WARNING(Kernel_SVC, "Uniformity check on protected memory is not implemented.");
246 // TODO: Performs a uniformity check to make sure only protected memory is changed (it doesn't
247 // make sense to allow changing permissions on kernel memory itself, etc).
248
249 const auto converted_permissions = SharedMemory::ConvertPermissions(permission);
250
251 return vm_manager.ReprotectRange(addr, size, converted_permissions);
252}
253
254static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, 194static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
255 u32 attribute) { 195 u32 attribute) {
256 LOG_DEBUG(Kernel_SVC, 196 LOG_DEBUG(Kernel_SVC,
@@ -274,30 +214,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
274 return ERR_INVALID_ADDRESS_STATE; 214 return ERR_INVALID_ADDRESS_STATE;
275 } 215 }
276 216
277 const auto mem_attribute = static_cast<MemoryAttribute>(attribute); 217 const auto attributes{static_cast<Memory::MemoryAttribute>(mask | attribute)};
278 const auto mem_mask = static_cast<MemoryAttribute>(mask); 218 if (attributes != static_cast<Memory::MemoryAttribute>(mask) ||
279 const auto attribute_with_mask = mem_attribute | mem_mask; 219 (attributes | Memory::MemoryAttribute::Uncached) != Memory::MemoryAttribute::Uncached) {
280
281 if (attribute_with_mask != mem_mask) {
282 LOG_ERROR(Kernel_SVC, 220 LOG_ERROR(Kernel_SVC,
283 "Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}", 221 "Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
284 attribute, mask); 222 attribute, mask);
285 return ERR_INVALID_COMBINATION; 223 return ERR_INVALID_COMBINATION;
286 } 224 }
287 225
288 if ((attribute_with_mask | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) { 226 auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
289 LOG_ERROR(Kernel_SVC, "Specified attribute isn't equal to MemoryAttributeUncached (8).");
290 return ERR_INVALID_COMBINATION;
291 }
292
293 auto& vm_manager = system.Kernel().CurrentProcess()->VMManager();
294 if (!vm_manager.IsWithinAddressSpace(address, size)) {
295 LOG_ERROR(Kernel_SVC,
296 "Given address (0x{:016X}) is outside the bounds of the address space.", address);
297 return ERR_INVALID_ADDRESS_STATE;
298 }
299 227
300 return vm_manager.SetMemoryAttribute(address, size, mem_mask, mem_attribute); 228 return page_table.SetMemoryAttribute(address, size, static_cast<Memory::MemoryAttribute>(mask),
229 static_cast<Memory::MemoryAttribute>(attribute));
301} 230}
302 231
303/// Maps a memory range into a different range. 232/// Maps a memory range into a different range.
@@ -305,14 +234,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
305 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 234 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
306 src_addr, size); 235 src_addr, size);
307 236
308 auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); 237 auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
309 const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size);
310 238
311 if (result.IsError()) { 239 if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
240 result.IsError()) {
312 return result; 241 return result;
313 } 242 }
314 243
315 return vm_manager.MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack); 244 return page_table.Map(dst_addr, src_addr, size);
316} 245}
317 246
318/// Unmaps a region that was previously mapped with svcMapMemory 247/// Unmaps a region that was previously mapped with svcMapMemory
@@ -320,21 +249,14 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
320 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, 249 LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
321 src_addr, size); 250 src_addr, size);
322 251
323 auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); 252 auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
324 const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size);
325 253
326 if (result.IsError()) { 254 if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
255 result.IsError()) {
327 return result; 256 return result;
328 } 257 }
329 258
330 const auto unmap_res = vm_manager.UnmapRange(dst_addr, size); 259 return page_table.Unmap(dst_addr, src_addr, size);
331
332 // Reprotect the source mapping on success
333 if (unmap_res.IsSuccess()) {
334 ASSERT(vm_manager.ReprotectRange(src_addr, size, VMAPermission::ReadWrite).IsSuccess());
335 }
336
337 return unmap_res;
338} 260}
339 261
340/// Connect to an OS service given the port name, returns the handle to the port to out 262/// Connect to an OS service given the port name, returns the handle to the port to out
@@ -367,6 +289,8 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
367 return ERR_NOT_FOUND; 289 return ERR_NOT_FOUND;
368 } 290 }
369 291
292 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Sessions, 1));
293
370 auto client_port = it->second; 294 auto client_port = it->second;
371 295
372 std::shared_ptr<ClientSession> client_session; 296 std::shared_ptr<ClientSession> client_session;
@@ -538,7 +462,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
538 "requesting_current_thread_handle=0x{:08X}", 462 "requesting_current_thread_handle=0x{:08X}",
539 holding_thread_handle, mutex_addr, requesting_thread_handle); 463 holding_thread_handle, mutex_addr, requesting_thread_handle);
540 464
541 if (Memory::IsKernelVirtualAddress(mutex_addr)) { 465 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
542 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", 466 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
543 mutex_addr); 467 mutex_addr);
544 return ERR_INVALID_ADDRESS_STATE; 468 return ERR_INVALID_ADDRESS_STATE;
@@ -558,7 +482,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
558static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { 482static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
559 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); 483 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
560 484
561 if (Memory::IsKernelVirtualAddress(mutex_addr)) { 485 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
562 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", 486 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
563 mutex_addr); 487 mutex_addr);
564 return ERR_INVALID_ADDRESS_STATE; 488 return ERR_INVALID_ADDRESS_STATE;
@@ -683,7 +607,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
683 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 607 auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
684 const auto thread_processor_id = current_thread->GetProcessorID(); 608 const auto thread_processor_id = current_thread->GetProcessorID();
685 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 609 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
686 ASSERT(false);
687 610
688 system.Kernel().CurrentProcess()->PrepareForTermination(); 611 system.Kernel().CurrentProcess()->PrepareForTermination();
689 612
@@ -785,35 +708,35 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
785 return RESULT_SUCCESS; 708 return RESULT_SUCCESS;
786 709
787 case GetInfoType::MapRegionBaseAddr: 710 case GetInfoType::MapRegionBaseAddr:
788 *result = process->VMManager().GetMapRegionBaseAddress(); 711 *result = process->PageTable().GetAliasRegionStart();
789 return RESULT_SUCCESS; 712 return RESULT_SUCCESS;
790 713
791 case GetInfoType::MapRegionSize: 714 case GetInfoType::MapRegionSize:
792 *result = process->VMManager().GetMapRegionSize(); 715 *result = process->PageTable().GetAliasRegionSize();
793 return RESULT_SUCCESS; 716 return RESULT_SUCCESS;
794 717
795 case GetInfoType::HeapRegionBaseAddr: 718 case GetInfoType::HeapRegionBaseAddr:
796 *result = process->VMManager().GetHeapRegionBaseAddress(); 719 *result = process->PageTable().GetHeapRegionStart();
797 return RESULT_SUCCESS; 720 return RESULT_SUCCESS;
798 721
799 case GetInfoType::HeapRegionSize: 722 case GetInfoType::HeapRegionSize:
800 *result = process->VMManager().GetHeapRegionSize(); 723 *result = process->PageTable().GetHeapRegionSize();
801 return RESULT_SUCCESS; 724 return RESULT_SUCCESS;
802 725
803 case GetInfoType::ASLRRegionBaseAddr: 726 case GetInfoType::ASLRRegionBaseAddr:
804 *result = process->VMManager().GetASLRRegionBaseAddress(); 727 *result = process->PageTable().GetAliasCodeRegionStart();
805 return RESULT_SUCCESS; 728 return RESULT_SUCCESS;
806 729
807 case GetInfoType::ASLRRegionSize: 730 case GetInfoType::ASLRRegionSize:
808 *result = process->VMManager().GetASLRRegionSize(); 731 *result = process->PageTable().GetAliasCodeRegionSize();
809 return RESULT_SUCCESS; 732 return RESULT_SUCCESS;
810 733
811 case GetInfoType::StackRegionBaseAddr: 734 case GetInfoType::StackRegionBaseAddr:
812 *result = process->VMManager().GetStackRegionBaseAddress(); 735 *result = process->PageTable().GetStackRegionStart();
813 return RESULT_SUCCESS; 736 return RESULT_SUCCESS;
814 737
815 case GetInfoType::StackRegionSize: 738 case GetInfoType::StackRegionSize:
816 *result = process->VMManager().GetStackRegionSize(); 739 *result = process->PageTable().GetStackRegionSize();
817 return RESULT_SUCCESS; 740 return RESULT_SUCCESS;
818 741
819 case GetInfoType::TotalPhysicalMemoryAvailable: 742 case GetInfoType::TotalPhysicalMemoryAvailable:
@@ -987,20 +910,29 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
987 return ERR_INVALID_MEMORY_RANGE; 910 return ERR_INVALID_MEMORY_RANGE;
988 } 911 }
989 912
990 Process* const current_process = system.Kernel().CurrentProcess(); 913 Process* const current_process{system.Kernel().CurrentProcess()};
991 auto& vm_manager = current_process->VMManager(); 914 auto& page_table{current_process->PageTable()};
992 915
993 if (current_process->GetSystemResourceSize() == 0) { 916 if (current_process->GetSystemResourceSize() == 0) {
994 LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); 917 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
995 return ERR_INVALID_STATE; 918 return ERR_INVALID_STATE;
996 } 919 }
997 920
998 if (!vm_manager.IsWithinMapRegion(addr, size)) { 921 if (!page_table.IsInsideAddressSpace(addr, size)) {
999 LOG_ERROR(Kernel_SVC, "Range not within map region"); 922 LOG_ERROR(Kernel_SVC,
923 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
924 size);
925 return ERR_INVALID_MEMORY_RANGE;
926 }
927
928 if (page_table.IsOutsideAliasRegion(addr, size)) {
929 LOG_ERROR(Kernel_SVC,
930 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
931 size);
1000 return ERR_INVALID_MEMORY_RANGE; 932 return ERR_INVALID_MEMORY_RANGE;
1001 } 933 }
1002 934
1003 return vm_manager.MapPhysicalMemory(addr, size); 935 return page_table.MapPhysicalMemory(addr, size);
1004} 936}
1005 937
1006/// Unmaps memory previously mapped via MapPhysicalMemory 938/// Unmaps memory previously mapped via MapPhysicalMemory
@@ -1027,20 +959,29 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
1027 return ERR_INVALID_MEMORY_RANGE; 959 return ERR_INVALID_MEMORY_RANGE;
1028 } 960 }
1029 961
1030 Process* const current_process = system.Kernel().CurrentProcess(); 962 Process* const current_process{system.Kernel().CurrentProcess()};
1031 auto& vm_manager = current_process->VMManager(); 963 auto& page_table{current_process->PageTable()};
1032 964
1033 if (current_process->GetSystemResourceSize() == 0) { 965 if (current_process->GetSystemResourceSize() == 0) {
1034 LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); 966 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
1035 return ERR_INVALID_STATE; 967 return ERR_INVALID_STATE;
1036 } 968 }
1037 969
1038 if (!vm_manager.IsWithinMapRegion(addr, size)) { 970 if (!page_table.IsInsideAddressSpace(addr, size)) {
1039 LOG_ERROR(Kernel_SVC, "Range not within map region"); 971 LOG_ERROR(Kernel_SVC,
972 "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
973 size);
974 return ERR_INVALID_MEMORY_RANGE;
975 }
976
977 if (page_table.IsOutsideAliasRegion(addr, size)) {
978 LOG_ERROR(Kernel_SVC,
979 "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
980 size);
1040 return ERR_INVALID_MEMORY_RANGE; 981 return ERR_INVALID_MEMORY_RANGE;
1041 } 982 }
1042 983
1043 return vm_manager.UnmapPhysicalMemory(addr, size); 984 return page_table.UnmapPhysicalMemory(addr, size);
1044} 985}
1045 986
1046/// Sets the thread activity 987/// Sets the thread activity
@@ -1197,74 +1138,49 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
1197 return ERR_INVALID_ADDRESS_STATE; 1138 return ERR_INVALID_ADDRESS_STATE;
1198 } 1139 }
1199 1140
1200 const auto permissions_type = static_cast<MemoryPermission>(permissions); 1141 const auto permission_type = static_cast<Memory::MemoryPermission>(permissions);
1201 if (permissions_type != MemoryPermission::Read && 1142 if ((permission_type | Memory::MemoryPermission::Write) !=
1202 permissions_type != MemoryPermission::ReadWrite) { 1143 Memory::MemoryPermission::ReadAndWrite) {
1203 LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}", 1144 LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
1204 permissions); 1145 permissions);
1205 return ERR_INVALID_MEMORY_PERMISSIONS; 1146 return ERR_INVALID_MEMORY_PERMISSIONS;
1206 } 1147 }
1207 1148
1208 auto* const current_process = system.Kernel().CurrentProcess(); 1149 auto* const current_process{system.Kernel().CurrentProcess()};
1209 auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle); 1150 auto& page_table{current_process->PageTable()};
1210 if (!shared_memory) {
1211 LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
1212 shared_memory_handle);
1213 return ERR_INVALID_HANDLE;
1214 }
1215 1151
1216 const auto& vm_manager = current_process->VMManager(); 1152 if (page_table.IsInvalidRegion(addr, size)) {
1217 if (!vm_manager.IsWithinASLRRegion(addr, size)) { 1153 LOG_ERROR(Kernel_SVC,
1218 LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}", 1154 "Addr does not fit within the valid region, addr=0x{:016X}, "
1155 "size=0x{:016X}",
1219 addr, size); 1156 addr, size);
1220 return ERR_INVALID_MEMORY_RANGE; 1157 return ERR_INVALID_MEMORY_RANGE;
1221 } 1158 }
1222 1159
1223 return shared_memory->Map(*current_process, addr, permissions_type, MemoryPermission::DontCare); 1160 if (page_table.IsInsideHeapRegion(addr, size)) {
1224} 1161 LOG_ERROR(Kernel_SVC,
1225 1162 "Addr does not fit within the heap region, addr=0x{:016X}, "
1226static ResultCode UnmapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, 1163 "size=0x{:016X}",
1227 u64 size) { 1164 addr, size);
1228 LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}", 1165 return ERR_INVALID_MEMORY_RANGE;
1229 shared_memory_handle, addr, size);
1230
1231 if (!Common::Is4KBAligned(addr)) {
1232 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
1233 return ERR_INVALID_ADDRESS;
1234 }
1235
1236 if (size == 0) {
1237 LOG_ERROR(Kernel_SVC, "Size is 0");
1238 return ERR_INVALID_SIZE;
1239 }
1240
1241 if (!Common::Is4KBAligned(size)) {
1242 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
1243 return ERR_INVALID_SIZE;
1244 } 1166 }
1245 1167
1246 if (!IsValidAddressRange(addr, size)) { 1168 if (page_table.IsInsideAliasRegion(addr, size)) {
1247 LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", 1169 LOG_ERROR(Kernel_SVC,
1170 "Address does not fit within the map region, addr=0x{:016X}, "
1171 "size=0x{:016X}",
1248 addr, size); 1172 addr, size);
1249 return ERR_INVALID_ADDRESS_STATE; 1173 return ERR_INVALID_MEMORY_RANGE;
1250 } 1174 }
1251 1175
1252 auto* const current_process = system.Kernel().CurrentProcess(); 1176 auto shared_memory{current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle)};
1253 auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle);
1254 if (!shared_memory) { 1177 if (!shared_memory) {
1255 LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", 1178 LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
1256 shared_memory_handle); 1179 shared_memory_handle);
1257 return ERR_INVALID_HANDLE; 1180 return ERR_INVALID_HANDLE;
1258 } 1181 }
1259 1182
1260 const auto& vm_manager = current_process->VMManager(); 1183 return shared_memory->Map(*current_process, addr, size, permission_type);
1261 if (!vm_manager.IsWithinASLRRegion(addr, size)) {
1262 LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}",
1263 addr, size);
1264 return ERR_INVALID_MEMORY_RANGE;
1265 }
1266
1267 return shared_memory->Unmap(*current_process, addr, size);
1268} 1184}
1269 1185
1270static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, 1186static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
@@ -1279,18 +1195,17 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
1279 return ERR_INVALID_HANDLE; 1195 return ERR_INVALID_HANDLE;
1280 } 1196 }
1281 1197
1282 auto& memory = system.Memory(); 1198 auto& memory{system.Memory()};
1283 const auto& vm_manager = process->VMManager(); 1199 const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
1284 const MemoryInfo memory_info = vm_manager.QueryMemory(address); 1200
1285 1201 memory.Write64(memory_info_address + 0x00, memory_info.addr);
1286 memory.Write64(memory_info_address, memory_info.base_address); 1202 memory.Write64(memory_info_address + 0x08, memory_info.size);
1287 memory.Write64(memory_info_address + 8, memory_info.size); 1203 memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff);
1288 memory.Write32(memory_info_address + 16, memory_info.state); 1204 memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr));
1289 memory.Write32(memory_info_address + 20, memory_info.attributes); 1205 memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm));
1290 memory.Write32(memory_info_address + 24, memory_info.permission); 1206 memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount);
1291 memory.Write32(memory_info_address + 32, memory_info.ipc_ref_count); 1207 memory.Write32(memory_info_address + 0x20, memory_info.device_refcount);
1292 memory.Write32(memory_info_address + 28, memory_info.device_ref_count); 1208 memory.Write32(memory_info_address + 0x24, 0);
1293 memory.Write32(memory_info_address + 36, 0);
1294 1209
1295 // Page info appears to be currently unused by the kernel and is always set to zero. 1210 // Page info appears to be currently unused by the kernel and is always set to zero.
1296 memory.Write32(page_info_address, 0); 1211 memory.Write32(page_info_address, 0);
@@ -1314,142 +1229,6 @@ static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
1314 return QueryMemory(system, memory_info_address, page_info_address, query_address); 1229 return QueryMemory(system, memory_info_address, page_info_address, query_address);
1315} 1230}
1316 1231
1317static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
1318 u64 src_address, u64 size) {
1319 LOG_DEBUG(Kernel_SVC,
1320 "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
1321 "src_address=0x{:016X}, size=0x{:016X}",
1322 process_handle, dst_address, src_address, size);
1323
1324 if (!Common::Is4KBAligned(src_address)) {
1325 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
1326 src_address);
1327 return ERR_INVALID_ADDRESS;
1328 }
1329
1330 if (!Common::Is4KBAligned(dst_address)) {
1331 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
1332 dst_address);
1333 return ERR_INVALID_ADDRESS;
1334 }
1335
1336 if (size == 0 || !Common::Is4KBAligned(size)) {
1337 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
1338 return ERR_INVALID_SIZE;
1339 }
1340
1341 if (!IsValidAddressRange(dst_address, size)) {
1342 LOG_ERROR(Kernel_SVC,
1343 "Destination address range overflows the address space (dst_address=0x{:016X}, "
1344 "size=0x{:016X}).",
1345 dst_address, size);
1346 return ERR_INVALID_ADDRESS_STATE;
1347 }
1348
1349 if (!IsValidAddressRange(src_address, size)) {
1350 LOG_ERROR(Kernel_SVC,
1351 "Source address range overflows the address space (src_address=0x{:016X}, "
1352 "size=0x{:016X}).",
1353 src_address, size);
1354 return ERR_INVALID_ADDRESS_STATE;
1355 }
1356
1357 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1358 auto process = handle_table.Get<Process>(process_handle);
1359 if (!process) {
1360 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
1361 process_handle);
1362 return ERR_INVALID_HANDLE;
1363 }
1364
1365 auto& vm_manager = process->VMManager();
1366 if (!vm_manager.IsWithinAddressSpace(src_address, size)) {
1367 LOG_ERROR(Kernel_SVC,
1368 "Source address range is not within the address space (src_address=0x{:016X}, "
1369 "size=0x{:016X}).",
1370 src_address, size);
1371 return ERR_INVALID_ADDRESS_STATE;
1372 }
1373
1374 if (!vm_manager.IsWithinASLRRegion(dst_address, size)) {
1375 LOG_ERROR(Kernel_SVC,
1376 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
1377 "size=0x{:016X}).",
1378 dst_address, size);
1379 return ERR_INVALID_MEMORY_RANGE;
1380 }
1381
1382 return vm_manager.MapCodeMemory(dst_address, src_address, size);
1383}
1384
1385static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
1386 u64 dst_address, u64 src_address, u64 size) {
1387 LOG_DEBUG(Kernel_SVC,
1388 "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
1389 "size=0x{:016X}",
1390 process_handle, dst_address, src_address, size);
1391
1392 if (!Common::Is4KBAligned(dst_address)) {
1393 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
1394 dst_address);
1395 return ERR_INVALID_ADDRESS;
1396 }
1397
1398 if (!Common::Is4KBAligned(src_address)) {
1399 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
1400 src_address);
1401 return ERR_INVALID_ADDRESS;
1402 }
1403
1404 if (size == 0 || Common::Is4KBAligned(size)) {
1405 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
1406 return ERR_INVALID_SIZE;
1407 }
1408
1409 if (!IsValidAddressRange(dst_address, size)) {
1410 LOG_ERROR(Kernel_SVC,
1411 "Destination address range overflows the address space (dst_address=0x{:016X}, "
1412 "size=0x{:016X}).",
1413 dst_address, size);
1414 return ERR_INVALID_ADDRESS_STATE;
1415 }
1416
1417 if (!IsValidAddressRange(src_address, size)) {
1418 LOG_ERROR(Kernel_SVC,
1419 "Source address range overflows the address space (src_address=0x{:016X}, "
1420 "size=0x{:016X}).",
1421 src_address, size);
1422 return ERR_INVALID_ADDRESS_STATE;
1423 }
1424
1425 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1426 auto process = handle_table.Get<Process>(process_handle);
1427 if (!process) {
1428 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
1429 process_handle);
1430 return ERR_INVALID_HANDLE;
1431 }
1432
1433 auto& vm_manager = process->VMManager();
1434 if (!vm_manager.IsWithinAddressSpace(src_address, size)) {
1435 LOG_ERROR(Kernel_SVC,
1436 "Source address range is not within the address space (src_address=0x{:016X}, "
1437 "size=0x{:016X}).",
1438 src_address, size);
1439 return ERR_INVALID_ADDRESS_STATE;
1440 }
1441
1442 if (!vm_manager.IsWithinASLRRegion(dst_address, size)) {
1443 LOG_ERROR(Kernel_SVC,
1444 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
1445 "size=0x{:016X}).",
1446 dst_address, size);
1447 return ERR_INVALID_MEMORY_RANGE;
1448 }
1449
1450 return vm_manager.UnmapCodeMemory(dst_address, src_address, size);
1451}
1452
1453/// Exits the current process 1232/// Exits the current process
1454static void ExitProcess(Core::System& system) { 1233static void ExitProcess(Core::System& system) {
1455 auto* current_process = system.Kernel().CurrentProcess(); 1234 auto* current_process = system.Kernel().CurrentProcess();
@@ -1506,6 +1285,9 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1506 } 1285 }
1507 1286
1508 auto& kernel = system.Kernel(); 1287 auto& kernel = system.Kernel();
1288
1289 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
1290
1509 CASCADE_RESULT(std::shared_ptr<Thread> thread, 1291 CASCADE_RESULT(std::shared_ptr<Thread> thread,
1510 Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, 1292 Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top,
1511 *current_process)); 1293 *current_process));
@@ -1610,7 +1392,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1610 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", 1392 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
1611 mutex_addr, condition_variable_addr, thread_handle, nano_seconds); 1393 mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
1612 1394
1613 if (Memory::IsKernelVirtualAddress(mutex_addr)) { 1395 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
1614 LOG_ERROR( 1396 LOG_ERROR(
1615 Kernel_SVC, 1397 Kernel_SVC,
1616 "Given mutex address must not be within the kernel address space. address=0x{:016X}", 1398 "Given mutex address must not be within the kernel address space. address=0x{:016X}",
@@ -1741,7 +1523,7 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
1741 type, value, timeout); 1523 type, value, timeout);
1742 1524
1743 // If the passed address is a kernel virtual address, return invalid memory state. 1525 // If the passed address is a kernel virtual address, return invalid memory state.
1744 if (Memory::IsKernelVirtualAddress(address)) { 1526 if (Core::Memory::IsKernelVirtualAddress(address)) {
1745 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); 1527 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1746 return ERR_INVALID_ADDRESS_STATE; 1528 return ERR_INVALID_ADDRESS_STATE;
1747 } 1529 }
@@ -1769,7 +1551,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
1769 address, type, value, num_to_wake); 1551 address, type, value, num_to_wake);
1770 1552
1771 // If the passed address is a kernel virtual address, return invalid memory state. 1553 // If the passed address is a kernel virtual address, return invalid memory state.
1772 if (Memory::IsKernelVirtualAddress(address)) { 1554 if (Core::Memory::IsKernelVirtualAddress(address)) {
1773 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); 1555 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1774 return ERR_INVALID_ADDRESS_STATE; 1556 return ERR_INVALID_ADDRESS_STATE;
1775 } 1557 }
@@ -1865,9 +1647,9 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
1865 return ERR_INVALID_ADDRESS_STATE; 1647 return ERR_INVALID_ADDRESS_STATE;
1866 } 1648 }
1867 1649
1868 const auto perms = static_cast<MemoryPermission>(permissions); 1650 const auto perms{static_cast<Memory::MemoryPermission>(permissions)};
1869 if (perms != MemoryPermission::None && perms != MemoryPermission::Read && 1651 if (perms > Memory::MemoryPermission::ReadAndWrite ||
1870 perms != MemoryPermission::ReadWrite) { 1652 perms == Memory::MemoryPermission::Write) {
1871 LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})", 1653 LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})",
1872 permissions); 1654 permissions);
1873 return ERR_INVALID_MEMORY_PERMISSIONS; 1655 return ERR_INVALID_MEMORY_PERMISSIONS;
@@ -1890,111 +1672,6 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
1890 return RESULT_SUCCESS; 1672 return RESULT_SUCCESS;
1891} 1673}
1892 1674
1893static ResultCode MapTransferMemory(Core::System& system, Handle handle, VAddr address, u64 size,
1894 u32 permission_raw) {
1895 LOG_DEBUG(Kernel_SVC,
1896 "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
1897 handle, address, size, permission_raw);
1898
1899 if (!Common::Is4KBAligned(address)) {
1900 LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
1901 address);
1902 return ERR_INVALID_ADDRESS;
1903 }
1904
1905 if (size == 0 || !Common::Is4KBAligned(size)) {
1906 LOG_ERROR(Kernel_SVC,
1907 "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
1908 size);
1909 return ERR_INVALID_SIZE;
1910 }
1911
1912 if (!IsValidAddressRange(address, size)) {
1913 LOG_ERROR(Kernel_SVC,
1914 "Given address and size overflows the 64-bit range (address=0x{:016X}, "
1915 "size=0x{:016X}).",
1916 address, size);
1917 return ERR_INVALID_ADDRESS_STATE;
1918 }
1919
1920 const auto permissions = static_cast<MemoryPermission>(permission_raw);
1921 if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
1922 permissions != MemoryPermission::ReadWrite) {
1923 LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
1924 permission_raw);
1925 return ERR_INVALID_STATE;
1926 }
1927
1928 const auto& kernel = system.Kernel();
1929 const auto* const current_process = kernel.CurrentProcess();
1930 const auto& handle_table = current_process->GetHandleTable();
1931
1932 auto transfer_memory = handle_table.Get<TransferMemory>(handle);
1933 if (!transfer_memory) {
1934 LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
1935 handle);
1936 return ERR_INVALID_HANDLE;
1937 }
1938
1939 if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
1940 LOG_ERROR(Kernel_SVC,
1941 "Given address and size don't fully fit within the ASLR region "
1942 "(address=0x{:016X}, size=0x{:016X}).",
1943 address, size);
1944 return ERR_INVALID_MEMORY_RANGE;
1945 }
1946
1947 return transfer_memory->MapMemory(address, size, permissions);
1948}
1949
1950static ResultCode UnmapTransferMemory(Core::System& system, Handle handle, VAddr address,
1951 u64 size) {
1952 LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
1953 address, size);
1954
1955 if (!Common::Is4KBAligned(address)) {
1956 LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
1957 address);
1958 return ERR_INVALID_ADDRESS;
1959 }
1960
1961 if (size == 0 || !Common::Is4KBAligned(size)) {
1962 LOG_ERROR(Kernel_SVC,
1963 "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
1964 size);
1965 return ERR_INVALID_SIZE;
1966 }
1967
1968 if (!IsValidAddressRange(address, size)) {
1969 LOG_ERROR(Kernel_SVC,
1970 "Given address and size overflows the 64-bit range (address=0x{:016X}, "
1971 "size=0x{:016X}).",
1972 address, size);
1973 return ERR_INVALID_ADDRESS_STATE;
1974 }
1975
1976 const auto& kernel = system.Kernel();
1977 const auto* const current_process = kernel.CurrentProcess();
1978 const auto& handle_table = current_process->GetHandleTable();
1979
1980 auto transfer_memory = handle_table.Get<TransferMemory>(handle);
1981 if (!transfer_memory) {
1982 LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
1983 handle);
1984 return ERR_INVALID_HANDLE;
1985 }
1986
1987 if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
1988 LOG_ERROR(Kernel_SVC,
1989 "Given address and size don't fully fit within the ASLR region "
1990 "(address=0x{:016X}, size=0x{:016X}).",
1991 address, size);
1992 return ERR_INVALID_MEMORY_RANGE;
1993 }
1994
1995 return transfer_memory->UnmapMemory(address, size);
1996}
1997
1998static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, 1675static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
1999 u64* mask) { 1676 u64* mask) {
2000 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1677 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -2073,52 +1750,6 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
2073 return RESULT_SUCCESS; 1750 return RESULT_SUCCESS;
2074} 1751}
2075 1752
2076static ResultCode CreateSharedMemory(Core::System& system, Handle* handle, u64 size,
2077 u32 local_permissions, u32 remote_permissions) {
2078 LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
2079 local_permissions, remote_permissions);
2080 if (size == 0) {
2081 LOG_ERROR(Kernel_SVC, "Size is 0");
2082 return ERR_INVALID_SIZE;
2083 }
2084 if (!Common::Is4KBAligned(size)) {
2085 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
2086 return ERR_INVALID_SIZE;
2087 }
2088
2089 if (size >= MAIN_MEMORY_SIZE) {
2090 LOG_ERROR(Kernel_SVC, "Size is not less than 8GB, 0x{:016X}", size);
2091 return ERR_INVALID_SIZE;
2092 }
2093
2094 const auto local_perms = static_cast<MemoryPermission>(local_permissions);
2095 if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
2096 LOG_ERROR(Kernel_SVC,
2097 "Invalid local memory permissions, expected Read or ReadWrite but got "
2098 "local_permissions={}",
2099 static_cast<u32>(local_permissions));
2100 return ERR_INVALID_MEMORY_PERMISSIONS;
2101 }
2102
2103 const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
2104 if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
2105 remote_perms != MemoryPermission::DontCare) {
2106 LOG_ERROR(Kernel_SVC,
2107 "Invalid remote memory permissions, expected Read, ReadWrite or DontCare but got "
2108 "remote_permissions={}",
2109 static_cast<u32>(remote_permissions));
2110 return ERR_INVALID_MEMORY_PERMISSIONS;
2111 }
2112
2113 auto& kernel = system.Kernel();
2114 auto process = kernel.CurrentProcess();
2115 auto& handle_table = process->GetHandleTable();
2116 auto shared_mem_handle = SharedMemory::Create(kernel, process, size, local_perms, remote_perms);
2117
2118 CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
2119 return RESULT_SUCCESS;
2120}
2121
2122static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { 1753static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
2123 LOG_DEBUG(Kernel_SVC, "called"); 1754 LOG_DEBUG(Kernel_SVC, "called");
2124 1755
@@ -2305,11 +1936,10 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
2305 } 1936 }
2306 1937
2307 const auto& kernel = system.Kernel(); 1938 const auto& kernel = system.Kernel();
2308 const auto& vm_manager = kernel.CurrentProcess()->VMManager();
2309 const auto total_copy_size = out_process_ids_size * sizeof(u64); 1939 const auto total_copy_size = out_process_ids_size * sizeof(u64);
2310 1940
2311 if (out_process_ids_size > 0 && 1941 if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace(
2312 !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) { 1942 out_process_ids, total_copy_size)) {
2313 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", 1943 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2314 out_process_ids, out_process_ids + total_copy_size); 1944 out_process_ids, out_process_ids + total_copy_size);
2315 return ERR_INVALID_ADDRESS_STATE; 1945 return ERR_INVALID_ADDRESS_STATE;
@@ -2345,11 +1975,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
2345 } 1975 }
2346 1976
2347 const auto* const current_process = system.Kernel().CurrentProcess(); 1977 const auto* const current_process = system.Kernel().CurrentProcess();
2348 const auto& vm_manager = current_process->VMManager();
2349 const auto total_copy_size = out_thread_ids_size * sizeof(u64); 1978 const auto total_copy_size = out_thread_ids_size * sizeof(u64);
2350 1979
2351 if (out_thread_ids_size > 0 && 1980 if (out_thread_ids_size > 0 &&
2352 !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) { 1981 !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
2353 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", 1982 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2354 out_thread_ids, out_thread_ids + total_copy_size); 1983 out_thread_ids, out_thread_ids + total_copy_size);
2355 return ERR_INVALID_ADDRESS_STATE; 1984 return ERR_INVALID_ADDRESS_STATE;
@@ -2510,7 +2139,7 @@ static const FunctionDef SVC_Table_32[] = {
2510static const FunctionDef SVC_Table_64[] = { 2139static const FunctionDef SVC_Table_64[] = {
2511 {0x00, nullptr, "Unknown"}, 2140 {0x00, nullptr, "Unknown"},
2512 {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, 2141 {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"},
2513 {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, 2142 {0x02, nullptr, "SetMemoryPermission"},
2514 {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, 2143 {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"},
2515 {0x04, SvcWrap64<MapMemory>, "MapMemory"}, 2144 {0x04, SvcWrap64<MapMemory>, "MapMemory"},
2516 {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"}, 2145 {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"},
@@ -2528,7 +2157,7 @@ static const FunctionDef SVC_Table_64[] = {
2528 {0x11, SvcWrap64<SignalEvent>, "SignalEvent"}, 2157 {0x11, SvcWrap64<SignalEvent>, "SignalEvent"},
2529 {0x12, SvcWrap64<ClearEvent>, "ClearEvent"}, 2158 {0x12, SvcWrap64<ClearEvent>, "ClearEvent"},
2530 {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"}, 2159 {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"},
2531 {0x14, SvcWrap64<UnmapSharedMemory>, "UnmapSharedMemory"}, 2160 {0x14, nullptr, "UnmapSharedMemory"},
2532 {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"}, 2161 {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"},
2533 {0x16, SvcWrap64<CloseHandle>, "CloseHandle"}, 2162 {0x16, SvcWrap64<CloseHandle>, "CloseHandle"},
2534 {0x17, SvcWrap64<ResetSignal>, "ResetSignal"}, 2163 {0x17, SvcWrap64<ResetSignal>, "ResetSignal"},
@@ -2588,9 +2217,9 @@ static const FunctionDef SVC_Table_64[] = {
2588 {0x4D, nullptr, "SleepSystem"}, 2217 {0x4D, nullptr, "SleepSystem"},
2589 {0x4E, nullptr, "ReadWriteRegister"}, 2218 {0x4E, nullptr, "ReadWriteRegister"},
2590 {0x4F, nullptr, "SetProcessActivity"}, 2219 {0x4F, nullptr, "SetProcessActivity"},
2591 {0x50, SvcWrap64<CreateSharedMemory>, "CreateSharedMemory"}, 2220 {0x50, nullptr, "CreateSharedMemory"},
2592 {0x51, SvcWrap64<MapTransferMemory>, "MapTransferMemory"}, 2221 {0x51, nullptr, "MapTransferMemory"},
2593 {0x52, SvcWrap64<UnmapTransferMemory>, "UnmapTransferMemory"}, 2222 {0x52, nullptr, "UnmapTransferMemory"},
2594 {0x53, nullptr, "CreateInterruptEvent"}, 2223 {0x53, nullptr, "CreateInterruptEvent"},
2595 {0x54, nullptr, "QueryPhysicalAddress"}, 2224 {0x54, nullptr, "QueryPhysicalAddress"},
2596 {0x55, nullptr, "QueryIoMapping"}, 2225 {0x55, nullptr, "QueryIoMapping"},
@@ -2627,8 +2256,8 @@ static const FunctionDef SVC_Table_64[] = {
2627 {0x74, nullptr, "MapProcessMemory"}, 2256 {0x74, nullptr, "MapProcessMemory"},
2628 {0x75, nullptr, "UnmapProcessMemory"}, 2257 {0x75, nullptr, "UnmapProcessMemory"},
2629 {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"}, 2258 {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
2630 {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"}, 2259 {0x77, nullptr, "MapProcessCodeMemory"},
2631 {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"}, 2260 {0x78, nullptr, "UnmapProcessCodeMemory"},
2632 {0x79, nullptr, "CreateProcess"}, 2261 {0x79, nullptr, "CreateProcess"},
2633 {0x7A, nullptr, "StartProcess"}, 2262 {0x7A, nullptr, "StartProcess"},
2634 {0x7B, nullptr, "TerminateProcess"}, 2263 {0x7B, nullptr, "TerminateProcess"},
@@ -2656,7 +2285,7 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) {
2656 2285
2657MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); 2286MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
2658 2287
2659void CallSVC(Core::System& system, u32 immediate) { 2288void Call(Core::System& system, u32 immediate) {
2660 MICROPROFILE_SCOPE(Kernel_SVC); 2289 MICROPROFILE_SCOPE(Kernel_SVC);
2661 2290
2662 // Lock the global kernel mutex when we enter the kernel HLE. 2291 // Lock the global kernel mutex when we enter the kernel HLE.
@@ -2675,4 +2304,4 @@ void CallSVC(Core::System& system, u32 immediate) {
2675 } 2304 }
2676} 2305}
2677 2306
2678} // namespace Kernel 2307} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h
index c5539ac1c..46e64277e 100644
--- a/src/core/hle/kernel/svc.h
+++ b/src/core/hle/kernel/svc.h
@@ -10,8 +10,8 @@ namespace Core {
10class System; 10class System;
11} 11}
12 12
13namespace Kernel { 13namespace Kernel::Svc {
14 14
15void CallSVC(Core::System& system, u32 immediate); 15void Call(Core::System& system, u32 immediate);
16 16
17} // namespace Kernel 17} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
new file mode 100644
index 000000000..986724beb
--- /dev/null
+++ b/src/core/hle/kernel/svc_types.h
@@ -0,0 +1,68 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_funcs.h"
8#include "common/common_types.h"
9
10namespace Kernel::Svc {
11
12enum class MemoryState : u32 {
13 Free = 0x00,
14 Io = 0x01,
15 Static = 0x02,
16 Code = 0x03,
17 CodeData = 0x04,
18 Normal = 0x05,
19 Shared = 0x06,
20 Alias = 0x07,
21 AliasCode = 0x08,
22 AliasCodeData = 0x09,
23 Ipc = 0x0A,
24 Stack = 0x0B,
25 ThreadLocal = 0x0C,
26 Transfered = 0x0D,
27 SharedTransfered = 0x0E,
28 SharedCode = 0x0F,
29 Inaccessible = 0x10,
30 NonSecureIpc = 0x11,
31 NonDeviceIpc = 0x12,
32 Kernel = 0x13,
33 GeneratedCode = 0x14,
34 CodeOut = 0x15,
35};
36DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
37
38enum class MemoryAttribute : u32 {
39 Locked = (1 << 0),
40 IpcLocked = (1 << 1),
41 DeviceShared = (1 << 2),
42 Uncached = (1 << 3),
43};
44DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
45
46enum class MemoryPermission : u32 {
47 None = (0 << 0),
48 Read = (1 << 0),
49 Write = (1 << 1),
50 Execute = (1 << 2),
51 ReadWrite = Read | Write,
52 ReadExecute = Read | Execute,
53 DontCare = (1 << 28),
54};
55DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
56
57struct MemoryInfo {
58 u64 addr{};
59 u64 size{};
60 MemoryState state{};
61 MemoryAttribute attr{};
62 MemoryPermission perm{};
63 u32 ipc_refcount{};
64 u32 device_refcount{};
65 u32 padding{};
66};
67
68} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 83e956036..4c0451c01 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -85,6 +85,7 @@ void Thread::ResumeFromWait() {
85 ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); 85 ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects");
86 86
87 switch (status) { 87 switch (status) {
88 case ThreadStatus::Paused:
88 case ThreadStatus::WaitSynch: 89 case ThreadStatus::WaitSynch:
89 case ThreadStatus::WaitHLEEvent: 90 case ThreadStatus::WaitHLEEvent:
90 case ThreadStatus::WaitSleep: 91 case ThreadStatus::WaitSleep:
@@ -92,6 +93,7 @@ void Thread::ResumeFromWait() {
92 case ThreadStatus::WaitMutex: 93 case ThreadStatus::WaitMutex:
93 case ThreadStatus::WaitCondVar: 94 case ThreadStatus::WaitCondVar:
94 case ThreadStatus::WaitArb: 95 case ThreadStatus::WaitArb:
96 case ThreadStatus::Dormant:
95 break; 97 break;
96 98
97 case ThreadStatus::Ready: 99 case ThreadStatus::Ready:
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
index f2d3f8b49..765f408c3 100644
--- a/src/core/hle/kernel/transfer_memory.cpp
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -2,17 +2,16 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/hle/kernel/errors.h"
6#include "core/hle/kernel/kernel.h" 5#include "core/hle/kernel/kernel.h"
6#include "core/hle/kernel/memory/page_table.h"
7#include "core/hle/kernel/process.h" 7#include "core/hle/kernel/process.h"
8#include "core/hle/kernel/shared_memory.h"
9#include "core/hle/kernel/transfer_memory.h" 8#include "core/hle/kernel/transfer_memory.h"
10#include "core/hle/result.h" 9#include "core/hle/result.h"
11#include "core/memory.h" 10#include "core/memory.h"
12 11
13namespace Kernel { 12namespace Kernel {
14 13
15TransferMemory::TransferMemory(KernelCore& kernel, Memory::Memory& memory) 14TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory)
16 : Object{kernel}, memory{memory} {} 15 : Object{kernel}, memory{memory} {}
17 16
18TransferMemory::~TransferMemory() { 17TransferMemory::~TransferMemory() {
@@ -20,14 +19,15 @@ TransferMemory::~TransferMemory() {
20 Reset(); 19 Reset();
21} 20}
22 21
23std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel, Memory::Memory& memory, 22std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
24 VAddr base_address, u64 size, 23 Core::Memory::Memory& memory,
25 MemoryPermission permissions) { 24 VAddr base_address, std::size_t size,
25 Memory::MemoryPermission permissions) {
26 std::shared_ptr<TransferMemory> transfer_memory{ 26 std::shared_ptr<TransferMemory> transfer_memory{
27 std::make_shared<TransferMemory>(kernel, memory)}; 27 std::make_shared<TransferMemory>(kernel, memory)};
28 28
29 transfer_memory->base_address = base_address; 29 transfer_memory->base_address = base_address;
30 transfer_memory->memory_size = size; 30 transfer_memory->size = size;
31 transfer_memory->owner_permissions = permissions; 31 transfer_memory->owner_permissions = permissions;
32 transfer_memory->owner_process = kernel.CurrentProcess(); 32 transfer_memory->owner_process = kernel.CurrentProcess();
33 33
@@ -38,98 +38,12 @@ const u8* TransferMemory::GetPointer() const {
38 return memory.GetPointer(base_address); 38 return memory.GetPointer(base_address);
39} 39}
40 40
41u64 TransferMemory::GetSize() const {
42 return memory_size;
43}
44
45ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission permissions) {
46 if (memory_size != size) {
47 return ERR_INVALID_SIZE;
48 }
49
50 if (owner_permissions != permissions) {
51 return ERR_INVALID_STATE;
52 }
53
54 if (is_mapped) {
55 return ERR_INVALID_STATE;
56 }
57
58 backing_block = std::make_shared<PhysicalMemory>(size);
59
60 const auto map_state = owner_permissions == MemoryPermission::None
61 ? MemoryState::TransferMemoryIsolated
62 : MemoryState::TransferMemory;
63 auto& vm_manager = owner_process->VMManager();
64 const auto map_result = vm_manager.MapMemoryBlock(address, backing_block, 0, size, map_state);
65 if (map_result.Failed()) {
66 return map_result.Code();
67 }
68
69 is_mapped = true;
70 return RESULT_SUCCESS;
71}
72
73ResultCode TransferMemory::Reserve() { 41ResultCode TransferMemory::Reserve() {
74 auto& vm_manager{owner_process->VMManager()}; 42 return owner_process->PageTable().ReserveTransferMemory(base_address, size, owner_permissions);
75 const auto check_range_result{vm_manager.CheckRangeState(
76 base_address, memory_size, MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated,
77 MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, VMAPermission::All,
78 VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None,
79 MemoryAttribute::IpcAndDeviceMapped)};
80
81 if (check_range_result.Failed()) {
82 return check_range_result.Code();
83 }
84
85 auto [state_, permissions_, attribute] = *check_range_result;
86
87 if (const auto result{vm_manager.ReprotectRange(
88 base_address, memory_size, SharedMemory::ConvertPermissions(owner_permissions))};
89 result.IsError()) {
90 return result;
91 }
92
93 return vm_manager.SetMemoryAttribute(base_address, memory_size, MemoryAttribute::Mask,
94 attribute | MemoryAttribute::Locked);
95} 43}
96 44
97ResultCode TransferMemory::Reset() { 45ResultCode TransferMemory::Reset() {
98 auto& vm_manager{owner_process->VMManager()}; 46 return owner_process->PageTable().ResetTransferMemory(base_address, size);
99 if (const auto result{vm_manager.CheckRangeState(
100 base_address, memory_size,
101 MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated,
102 MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, VMAPermission::None,
103 VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked,
104 MemoryAttribute::IpcAndDeviceMapped)};
105 result.Failed()) {
106 return result.Code();
107 }
108
109 if (const auto result{
110 vm_manager.ReprotectRange(base_address, memory_size, VMAPermission::ReadWrite)};
111 result.IsError()) {
112 return result;
113 }
114
115 return vm_manager.SetMemoryAttribute(base_address, memory_size, MemoryAttribute::Mask,
116 MemoryAttribute::None);
117}
118
119ResultCode TransferMemory::UnmapMemory(VAddr address, u64 size) {
120 if (memory_size != size) {
121 return ERR_INVALID_SIZE;
122 }
123
124 auto& vm_manager = owner_process->VMManager();
125 const auto result = vm_manager.UnmapRange(address, size);
126
127 if (result.IsError()) {
128 return result;
129 }
130
131 is_mapped = false;
132 return RESULT_SUCCESS;
133} 47}
134 48
135} // namespace Kernel 49} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
index 6e388536a..05e9f7464 100644
--- a/src/core/hle/kernel/transfer_memory.h
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -6,12 +6,13 @@
6 6
7#include <memory> 7#include <memory>
8 8
9#include "core/hle/kernel/memory/memory_block.h"
9#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
10#include "core/hle/kernel/physical_memory.h" 11#include "core/hle/kernel/physical_memory.h"
11 12
12union ResultCode; 13union ResultCode;
13 14
14namespace Memory { 15namespace Core::Memory {
15class Memory; 16class Memory;
16} 17}
17 18
@@ -20,8 +21,6 @@ namespace Kernel {
20class KernelCore; 21class KernelCore;
21class Process; 22class Process;
22 23
23enum class MemoryPermission : u32;
24
25/// Defines the interface for transfer memory objects. 24/// Defines the interface for transfer memory objects.
26/// 25///
27/// Transfer memory is typically used for the purpose of 26/// Transfer memory is typically used for the purpose of
@@ -30,14 +29,14 @@ enum class MemoryPermission : u32;
30/// 29///
31class TransferMemory final : public Object { 30class TransferMemory final : public Object {
32public: 31public:
33 explicit TransferMemory(KernelCore& kernel, Memory::Memory& memory); 32 explicit TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory);
34 ~TransferMemory() override; 33 ~TransferMemory() override;
35 34
36 static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; 35 static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
37 36
38 static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Memory::Memory& memory, 37 static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Core::Memory::Memory& memory,
39 VAddr base_address, u64 size, 38 VAddr base_address, std::size_t size,
40 MemoryPermission permissions); 39 Memory::MemoryPermission permissions);
41 40
42 TransferMemory(const TransferMemory&) = delete; 41 TransferMemory(const TransferMemory&) = delete;
43 TransferMemory& operator=(const TransferMemory&) = delete; 42 TransferMemory& operator=(const TransferMemory&) = delete;
@@ -61,29 +60,9 @@ public:
61 const u8* GetPointer() const; 60 const u8* GetPointer() const;
62 61
63 /// Gets the size of the memory backing this instance in bytes. 62 /// Gets the size of the memory backing this instance in bytes.
64 u64 GetSize() const; 63 constexpr std::size_t GetSize() const {
65 64 return size;
66 /// Attempts to map transfer memory with the given range and memory permissions. 65 }
67 ///
68 /// @param address The base address to being mapping memory at.
69 /// @param size The size of the memory to map, in bytes.
70 /// @param permissions The memory permissions to check against when mapping memory.
71 ///
72 /// @pre The given address, size, and memory permissions must all match
73 /// the same values that were given when creating the transfer memory
74 /// instance.
75 ///
76 ResultCode MapMemory(VAddr address, u64 size, MemoryPermission permissions);
77
78 /// Unmaps the transfer memory with the given range
79 ///
80 /// @param address The base address to begin unmapping memory at.
81 /// @param size The size of the memory to unmap, in bytes.
82 ///
83 /// @pre The given address and size must be the same as the ones used
84 /// to create the transfer memory instance.
85 ///
86 ResultCode UnmapMemory(VAddr address, u64 size);
87 66
88 /// Reserves the region to be used for the transfer memory, called after the transfer memory is 67 /// Reserves the region to be used for the transfer memory, called after the transfer memory is
89 /// created. 68 /// created.
@@ -94,25 +73,19 @@ public:
94 ResultCode Reset(); 73 ResultCode Reset();
95 74
96private: 75private:
97 /// Memory block backing this instance.
98 std::shared_ptr<PhysicalMemory> backing_block;
99
100 /// The base address for the memory managed by this instance. 76 /// The base address for the memory managed by this instance.
101 VAddr base_address = 0; 77 VAddr base_address{};
102 78
103 /// Size of the memory, in bytes, that this instance manages. 79 /// Size of the memory, in bytes, that this instance manages.
104 u64 memory_size = 0; 80 std::size_t size{};
105 81
106 /// The memory permissions that are applied to this instance. 82 /// The memory permissions that are applied to this instance.
107 MemoryPermission owner_permissions{}; 83 Memory::MemoryPermission owner_permissions{};
108 84
109 /// The process that this transfer memory instance was created under. 85 /// The process that this transfer memory instance was created under.
110 Process* owner_process = nullptr; 86 Process* owner_process{};
111
112 /// Whether or not this transfer memory instance has mapped memory.
113 bool is_mapped = false;
114 87
115 Memory::Memory& memory; 88 Core::Memory::Memory& memory;
116}; 89};
117 90
118} // namespace Kernel 91} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
deleted file mode 100644
index 024c22901..000000000
--- a/src/core/hle/kernel/vm_manager.cpp
+++ /dev/null
@@ -1,1175 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cstring>
7#include <iterator>
8#include <utility>
9#include "common/alignment.h"
10#include "common/assert.h"
11#include "common/logging/log.h"
12#include "common/memory_hook.h"
13#include "core/core.h"
14#include "core/file_sys/program_metadata.h"
15#include "core/hle/kernel/errors.h"
16#include "core/hle/kernel/process.h"
17#include "core/hle/kernel/resource_limit.h"
18#include "core/hle/kernel/vm_manager.h"
19#include "core/memory.h"
20
21namespace Kernel {
22namespace {
23const char* GetMemoryStateName(MemoryState state) {
24 static constexpr const char* names[] = {
25 "Unmapped", "Io",
26 "Normal", "Code",
27 "CodeData", "Heap",
28 "Shared", "Unknown1",
29 "ModuleCode", "ModuleCodeData",
30 "IpcBuffer0", "Stack",
31 "ThreadLocal", "TransferMemoryIsolated",
32 "TransferMemory", "ProcessMemory",
33 "Inaccessible", "IpcBuffer1",
34 "IpcBuffer3", "KernelStack",
35 };
36
37 return names[ToSvcMemoryState(state)];
38}
39
40// Checks if a given address range lies within a larger address range.
41constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
42 VAddr address_range_end) {
43 const VAddr end_address = address + size - 1;
44 return address_range_begin <= address && end_address <= address_range_end - 1;
45}
46} // Anonymous namespace
47
48bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
49 ASSERT(base + size == next.base);
50 if (permissions != next.permissions || state != next.state || attribute != next.attribute ||
51 type != next.type) {
52 return false;
53 }
54 if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) {
55 // TODO: Can device mapped memory be merged sanely?
56 // Not merging it may cause inaccuracies versus hardware when memory layout is queried.
57 return false;
58 }
59 if (type == VMAType::AllocatedMemoryBlock) {
60 return true;
61 }
62 if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
63 return false;
64 }
65 if (type == VMAType::MMIO && paddr + size != next.paddr) {
66 return false;
67 }
68 return true;
69}
70
71VMManager::VMManager(Core::System& system) : system{system} {
72 // Default to assuming a 39-bit address space. This way we have a sane
73 // starting point with executables that don't provide metadata.
74 Reset(FileSys::ProgramAddressSpaceType::Is39Bit);
75}
76
77VMManager::~VMManager() = default;
78
79void VMManager::Reset(FileSys::ProgramAddressSpaceType type) {
80 Clear();
81
82 InitializeMemoryRegionRanges(type);
83
84 page_table.Resize(address_space_width);
85
86 // Initialize the map with a single free region covering the entire managed space.
87 VirtualMemoryArea initial_vma;
88 initial_vma.size = address_space_end;
89 vma_map.emplace(initial_vma.base, initial_vma);
90
91 UpdatePageTableForVMA(initial_vma);
92}
93
94VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
95 if (target >= address_space_end) {
96 return vma_map.end();
97 } else {
98 return std::prev(vma_map.upper_bound(target));
99 }
100}
101
102bool VMManager::IsValidHandle(VMAHandle handle) const {
103 return handle != vma_map.cend();
104}
105
106ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
107 std::shared_ptr<PhysicalMemory> block,
108 std::size_t offset, u64 size,
109 MemoryState state, VMAPermission perm) {
110 ASSERT(block != nullptr);
111 ASSERT(offset + size <= block->size());
112
113 // This is the appropriately sized VMA that will turn into our allocation.
114 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
115 VirtualMemoryArea& final_vma = vma_handle->second;
116 ASSERT(final_vma.size == size);
117
118 final_vma.type = VMAType::AllocatedMemoryBlock;
119 final_vma.permissions = perm;
120 final_vma.state = state;
121 final_vma.backing_block = std::move(block);
122 final_vma.offset = offset;
123 UpdatePageTableForVMA(final_vma);
124
125 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
126}
127
128ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size,
129 MemoryState state) {
130 ASSERT(memory != nullptr);
131
132 // This is the appropriately sized VMA that will turn into our allocation.
133 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
134 VirtualMemoryArea& final_vma = vma_handle->second;
135 ASSERT(final_vma.size == size);
136
137 final_vma.type = VMAType::BackingMemory;
138 final_vma.permissions = VMAPermission::ReadWrite;
139 final_vma.state = state;
140 final_vma.backing_memory = memory;
141 UpdatePageTableForVMA(final_vma);
142
143 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
144}
145
146ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
147 return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size);
148}
149
150ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const {
151 ASSERT(begin < end);
152 ASSERT(size <= end - begin);
153
154 const VMAHandle vma_handle =
155 std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) {
156 if (vma.second.type != VMAType::Free) {
157 return false;
158 }
159 const VAddr vma_base = vma.second.base;
160 const VAddr vma_end = vma_base + vma.second.size;
161 const VAddr assumed_base = (begin < vma_base) ? vma_base : begin;
162 const VAddr used_range = assumed_base + size;
163
164 return vma_base <= assumed_base && assumed_base < used_range && used_range < end &&
165 used_range <= vma_end;
166 });
167
168 if (vma_handle == vma_map.cend()) {
169 // TODO(Subv): Find the correct error code here.
170 return RESULT_UNKNOWN;
171 }
172
173 const VAddr target = std::max(begin, vma_handle->second.base);
174 return MakeResult<VAddr>(target);
175}
176
177ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
178 MemoryState state,
179 Common::MemoryHookPointer mmio_handler) {
180 // This is the appropriately sized VMA that will turn into our allocation.
181 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
182 VirtualMemoryArea& final_vma = vma_handle->second;
183 ASSERT(final_vma.size == size);
184
185 final_vma.type = VMAType::MMIO;
186 final_vma.permissions = VMAPermission::ReadWrite;
187 final_vma.state = state;
188 final_vma.paddr = paddr;
189 final_vma.mmio_handler = std::move(mmio_handler);
190 UpdatePageTableForVMA(final_vma);
191
192 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
193}
194
195VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
196 VirtualMemoryArea& vma = vma_handle->second;
197 vma.type = VMAType::Free;
198 vma.permissions = VMAPermission::None;
199 vma.state = MemoryState::Unmapped;
200 vma.attribute = MemoryAttribute::None;
201
202 vma.backing_block = nullptr;
203 vma.offset = 0;
204 vma.backing_memory = nullptr;
205 vma.paddr = 0;
206
207 UpdatePageTableForVMA(vma);
208
209 return MergeAdjacent(vma_handle);
210}
211
212ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
213 CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
214 const VAddr target_end = target + size;
215
216 const VMAIter end = vma_map.end();
217 // The comparison against the end of the range must be done using addresses since VMAs can be
218 // merged during this process, causing invalidation of the iterators.
219 while (vma != end && vma->second.base < target_end) {
220 vma = std::next(Unmap(vma));
221 }
222
223 ASSERT(FindVMA(target)->second.size >= size);
224
225 return RESULT_SUCCESS;
226}
227
228VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
229 VMAIter iter = StripIterConstness(vma_handle);
230
231 VirtualMemoryArea& vma = iter->second;
232 vma.permissions = new_perms;
233 UpdatePageTableForVMA(vma);
234
235 return MergeAdjacent(iter);
236}
237
238ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
239 CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
240 const VAddr target_end = target + size;
241
242 const VMAIter end = vma_map.end();
243 // The comparison against the end of the range must be done using addresses since VMAs can be
244 // merged during this process, causing invalidation of the iterators.
245 while (vma != end && vma->second.base < target_end) {
246 vma = std::next(StripIterConstness(Reprotect(vma, new_perms)));
247 }
248
249 return RESULT_SUCCESS;
250}
251
252ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
253 if (size > GetHeapRegionSize()) {
254 return ERR_OUT_OF_MEMORY;
255 }
256
257 // No need to do any additional work if the heap is already the given size.
258 if (size == GetCurrentHeapSize()) {
259 return MakeResult(heap_region_base);
260 }
261
262 if (heap_memory == nullptr) {
263 // Initialize heap
264 heap_memory = std::make_shared<PhysicalMemory>(size);
265 heap_end = heap_region_base + size;
266 } else {
267 UnmapRange(heap_region_base, GetCurrentHeapSize());
268 }
269
270 // If necessary, expand backing vector to cover new heap extents in
271 // the case of allocating. Otherwise, shrink the backing memory,
272 // if a smaller heap has been requested.
273 heap_memory->resize(size);
274 heap_memory->shrink_to_fit();
275 RefreshMemoryBlockMappings(heap_memory.get());
276
277 heap_end = heap_region_base + size;
278 ASSERT(GetCurrentHeapSize() == heap_memory->size());
279
280 const auto mapping_result =
281 MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
282 if (mapping_result.Failed()) {
283 return mapping_result.Code();
284 }
285
286 return MakeResult<VAddr>(heap_region_base);
287}
288
289ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
290 // Check how much memory we've already mapped.
291 const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size);
292 if (mapped_size_result.Failed()) {
293 return mapped_size_result.Code();
294 }
295
296 // If we've already mapped the desired amount, return early.
297 const std::size_t mapped_size = *mapped_size_result;
298 if (mapped_size == size) {
299 return RESULT_SUCCESS;
300 }
301
302 // Check that we can map the memory we want.
303 const auto res_limit = system.CurrentProcess()->GetResourceLimit();
304 const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) -
305 res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory);
306 if (physmem_remaining < (size - mapped_size)) {
307 return ERR_RESOURCE_LIMIT_EXCEEDED;
308 }
309
310 // Keep track of the memory regions we unmap.
311 std::vector<std::pair<u64, u64>> mapped_regions;
312 ResultCode result = RESULT_SUCCESS;
313
314 // Iterate, trying to map memory.
315 {
316 const auto end_addr = target + size;
317 const auto last_addr = end_addr - 1;
318 VAddr cur_addr = target;
319
320 auto iter = FindVMA(target);
321 ASSERT(iter != vma_map.end());
322
323 while (true) {
324 const auto& vma = iter->second;
325 const auto vma_start = vma.base;
326 const auto vma_end = vma_start + vma.size;
327 const auto vma_last = vma_end - 1;
328
329 // Map the memory block
330 const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
331 if (vma.state == MemoryState::Unmapped) {
332 const auto map_res =
333 MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size), 0,
334 map_size, MemoryState::Heap, VMAPermission::ReadWrite);
335 result = map_res.Code();
336 if (result.IsError()) {
337 break;
338 }
339
340 mapped_regions.emplace_back(cur_addr, map_size);
341 }
342
343 // Break once we hit the end of the range.
344 if (last_addr <= vma_last) {
345 break;
346 }
347
348 // Advance to the next block.
349 cur_addr = vma_end;
350 iter = FindVMA(cur_addr);
351 ASSERT(iter != vma_map.end());
352 }
353 }
354
355 // If we failed, unmap memory.
356 if (result.IsError()) {
357 for (const auto [unmap_address, unmap_size] : mapped_regions) {
358 ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(),
359 "Failed to unmap memory range.");
360 }
361
362 return result;
363 }
364
365 // Update amount of mapped physical memory.
366 physical_memory_mapped += size - mapped_size;
367
368 return RESULT_SUCCESS;
369}
370
371ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
372 // Check how much memory is currently mapped.
373 const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size);
374 if (mapped_size_result.Failed()) {
375 return mapped_size_result.Code();
376 }
377
378 // If we've already unmapped all the memory, return early.
379 const std::size_t mapped_size = *mapped_size_result;
380 if (mapped_size == 0) {
381 return RESULT_SUCCESS;
382 }
383
384 // Keep track of the memory regions we unmap.
385 std::vector<std::pair<u64, u64>> unmapped_regions;
386 ResultCode result = RESULT_SUCCESS;
387
388 // Try to unmap regions.
389 {
390 const auto end_addr = target + size;
391 const auto last_addr = end_addr - 1;
392 VAddr cur_addr = target;
393
394 auto iter = FindVMA(target);
395 ASSERT(iter != vma_map.end());
396
397 while (true) {
398 const auto& vma = iter->second;
399 const auto vma_start = vma.base;
400 const auto vma_end = vma_start + vma.size;
401 const auto vma_last = vma_end - 1;
402
403 // Unmap the memory block
404 const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
405 if (vma.state == MemoryState::Heap) {
406 result = UnmapRange(cur_addr, unmap_size);
407 if (result.IsError()) {
408 break;
409 }
410
411 unmapped_regions.emplace_back(cur_addr, unmap_size);
412 }
413
414 // Break once we hit the end of the range.
415 if (last_addr <= vma_last) {
416 break;
417 }
418
419 // Advance to the next block.
420 cur_addr = vma_end;
421 iter = FindVMA(cur_addr);
422 ASSERT(iter != vma_map.end());
423 }
424 }
425
426 // If we failed, re-map regions.
427 // TODO: Preserve memory contents?
428 if (result.IsError()) {
429 for (const auto [map_address, map_size] : unmapped_regions) {
430 const auto remap_res =
431 MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size), 0, map_size,
432 MemoryState::Heap, VMAPermission::None);
433 ASSERT_MSG(remap_res.Succeeded(), "Failed to remap a memory block.");
434 }
435
436 return result;
437 }
438
439 // Update mapped amount
440 physical_memory_mapped -= mapped_size;
441
442 return RESULT_SUCCESS;
443}
444
445ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) {
446 constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped;
447 const auto src_check_result = CheckRangeState(
448 src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::All,
449 VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute);
450
451 if (src_check_result.Failed()) {
452 return src_check_result.Code();
453 }
454
455 const auto mirror_result =
456 MirrorMemory(dst_address, src_address, size, MemoryState::ModuleCode);
457 if (mirror_result.IsError()) {
458 return mirror_result;
459 }
460
461 // Ensure we lock the source memory region.
462 const auto src_vma_result = CarveVMARange(src_address, size);
463 if (src_vma_result.Failed()) {
464 return src_vma_result.Code();
465 }
466 auto src_vma_iter = *src_vma_result;
467 src_vma_iter->second.attribute = MemoryAttribute::Locked;
468 Reprotect(src_vma_iter, VMAPermission::Read);
469
470 // The destination memory region is fine as is, however we need to make it read-only.
471 return ReprotectRange(dst_address, size, VMAPermission::Read);
472}
473
474ResultCode VMManager::UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) {
475 constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped;
476 const auto src_check_result = CheckRangeState(
477 src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::None,
478 VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, ignore_attribute);
479
480 if (src_check_result.Failed()) {
481 return src_check_result.Code();
482 }
483
484 // Yes, the kernel only checks the first page of the region.
485 const auto dst_check_result =
486 CheckRangeState(dst_address, Memory::PAGE_SIZE, MemoryState::FlagModule,
487 MemoryState::FlagModule, VMAPermission::None, VMAPermission::None,
488 MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute);
489
490 if (dst_check_result.Failed()) {
491 return dst_check_result.Code();
492 }
493
494 const auto dst_memory_state = std::get<MemoryState>(*dst_check_result);
495 const auto dst_contiguous_check_result = CheckRangeState(
496 dst_address, size, MemoryState::All, dst_memory_state, VMAPermission::None,
497 VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute);
498
499 if (dst_contiguous_check_result.Failed()) {
500 return dst_contiguous_check_result.Code();
501 }
502
503 const auto unmap_result = UnmapRange(dst_address, size);
504 if (unmap_result.IsError()) {
505 return unmap_result;
506 }
507
508 // With the mirrored portion unmapped, restore the original region's traits.
509 const auto src_vma_result = CarveVMARange(src_address, size);
510 if (src_vma_result.Failed()) {
511 return src_vma_result.Code();
512 }
513 auto src_vma_iter = *src_vma_result;
514 src_vma_iter->second.state = MemoryState::Heap;
515 src_vma_iter->second.attribute = MemoryAttribute::None;
516 Reprotect(src_vma_iter, VMAPermission::ReadWrite);
517
518 if (dst_memory_state == MemoryState::ModuleCode) {
519 system.InvalidateCpuInstructionCaches();
520 }
521
522 return unmap_result;
523}
524
525MemoryInfo VMManager::QueryMemory(VAddr address) const {
526 const auto vma = FindVMA(address);
527 MemoryInfo memory_info{};
528
529 if (IsValidHandle(vma)) {
530 memory_info.base_address = vma->second.base;
531 memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute);
532 memory_info.permission = static_cast<u32>(vma->second.permissions);
533 memory_info.size = vma->second.size;
534 memory_info.state = ToSvcMemoryState(vma->second.state);
535 } else {
536 memory_info.base_address = address_space_end;
537 memory_info.permission = static_cast<u32>(VMAPermission::None);
538 memory_info.size = 0 - address_space_end;
539 memory_info.state = static_cast<u32>(MemoryState::Inaccessible);
540 }
541
542 return memory_info;
543}
544
545ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask,
546 MemoryAttribute attribute) {
547 constexpr auto ignore_mask =
548 MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped | MemoryAttribute::Locked;
549 constexpr auto attribute_mask = ~ignore_mask;
550
551 const auto result = CheckRangeState(
552 address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None,
553 VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask);
554
555 if (result.Failed()) {
556 return result.Code();
557 }
558
559 const auto [prev_state, prev_permissions, prev_attributes] = *result;
560 const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute);
561
562 const auto carve_result = CarveVMARange(address, size);
563 if (carve_result.Failed()) {
564 return carve_result.Code();
565 }
566
567 auto vma_iter = *carve_result;
568 vma_iter->second.attribute = new_attribute;
569
570 MergeAdjacent(vma_iter);
571 return RESULT_SUCCESS;
572}
573
574ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) {
575 const auto vma = FindVMA(src_addr);
576
577 ASSERT_MSG(vma != vma_map.end(), "Invalid memory address");
578 ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
579
580 // The returned VMA might be a bigger one encompassing the desired address.
581 const auto vma_offset = src_addr - vma->first;
582 ASSERT_MSG(vma_offset + size <= vma->second.size,
583 "Shared memory exceeds bounds of mapped block");
584
585 const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block;
586 const std::size_t backing_block_offset = vma->second.offset + vma_offset;
587
588 CASCADE_RESULT(auto new_vma,
589 MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, state));
590 // Protect mirror with permissions from old region
591 Reprotect(new_vma, vma->second.permissions);
592 // Remove permissions from old region
593 ReprotectRange(src_addr, size, VMAPermission::None);
594
595 return RESULT_SUCCESS;
596}
597
598void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) {
599 // If this ever proves to have a noticeable performance impact, allow users of the function to
600 // specify a specific range of addresses to limit the scan to.
601 for (const auto& p : vma_map) {
602 const VirtualMemoryArea& vma = p.second;
603 if (block == vma.backing_block.get()) {
604 UpdatePageTableForVMA(vma);
605 }
606 }
607}
608
609void VMManager::LogLayout() const {
610 for (const auto& p : vma_map) {
611 const VirtualMemoryArea& vma = p.second;
612 LOG_DEBUG(Kernel, "{:016X} - {:016X} size: {:016X} {}{}{} {}", vma.base,
613 vma.base + vma.size, vma.size,
614 (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
615 (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
616 (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-',
617 GetMemoryStateName(vma.state));
618 }
619}
620
621VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
622 // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
623 // non-const access to its container.
624 return vma_map.erase(iter, iter); // Erases an empty range of elements
625}
626
627ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) {
628 ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
629 ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", base);
630
631 VMAIter vma_handle = StripIterConstness(FindVMA(base));
632 if (vma_handle == vma_map.end()) {
633 // Target address is outside the range managed by the kernel
634 return ERR_INVALID_ADDRESS;
635 }
636
637 const VirtualMemoryArea& vma = vma_handle->second;
638 if (vma.type != VMAType::Free) {
639 // Region is already allocated
640 return ERR_INVALID_ADDRESS_STATE;
641 }
642
643 const VAddr start_in_vma = base - vma.base;
644 const VAddr end_in_vma = start_in_vma + size;
645
646 if (end_in_vma > vma.size) {
647 // Requested allocation doesn't fit inside VMA
648 return ERR_INVALID_ADDRESS_STATE;
649 }
650
651 if (end_in_vma != vma.size) {
652 // Split VMA at the end of the allocated region
653 SplitVMA(vma_handle, end_in_vma);
654 }
655 if (start_in_vma != 0) {
656 // Split VMA at the start of the allocated region
657 vma_handle = SplitVMA(vma_handle, start_in_vma);
658 }
659
660 return MakeResult<VMAIter>(vma_handle);
661}
662
663ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
664 ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
665 ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target);
666
667 const VAddr target_end = target + size;
668 ASSERT(target_end >= target);
669 ASSERT(target_end <= address_space_end);
670 ASSERT(size > 0);
671
672 VMAIter begin_vma = StripIterConstness(FindVMA(target));
673 const VMAIter i_end = vma_map.lower_bound(target_end);
674 if (std::any_of(begin_vma, i_end,
675 [](const auto& entry) { return entry.second.type == VMAType::Free; })) {
676 return ERR_INVALID_ADDRESS_STATE;
677 }
678
679 if (target != begin_vma->second.base) {
680 begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
681 }
682
683 VMAIter end_vma = StripIterConstness(FindVMA(target_end));
684 if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
685 end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
686 }
687
688 return MakeResult<VMAIter>(begin_vma);
689}
690
691VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
692 VirtualMemoryArea& old_vma = vma_handle->second;
693 VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
694
695 // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
696 // a bug. This restriction might be removed later.
697 ASSERT(offset_in_vma < old_vma.size);
698 ASSERT(offset_in_vma > 0);
699
700 old_vma.size = offset_in_vma;
701 new_vma.base += offset_in_vma;
702 new_vma.size -= offset_in_vma;
703
704 switch (new_vma.type) {
705 case VMAType::Free:
706 break;
707 case VMAType::AllocatedMemoryBlock:
708 new_vma.offset += offset_in_vma;
709 break;
710 case VMAType::BackingMemory:
711 new_vma.backing_memory += offset_in_vma;
712 break;
713 case VMAType::MMIO:
714 new_vma.paddr += offset_in_vma;
715 break;
716 }
717
718 ASSERT(old_vma.CanBeMergedWith(new_vma));
719
720 return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
721}
722
723VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
724 const VMAIter next_vma = std::next(iter);
725 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
726 MergeAdjacentVMA(iter->second, next_vma->second);
727 vma_map.erase(next_vma);
728 }
729
730 if (iter != vma_map.begin()) {
731 VMAIter prev_vma = std::prev(iter);
732 if (prev_vma->second.CanBeMergedWith(iter->second)) {
733 MergeAdjacentVMA(prev_vma->second, iter->second);
734 vma_map.erase(iter);
735 iter = prev_vma;
736 }
737 }
738
739 return iter;
740}
741
742void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) {
743 ASSERT(left.CanBeMergedWith(right));
744
745 // Always merge allocated memory blocks, even when they don't share the same backing block.
746 if (left.type == VMAType::AllocatedMemoryBlock &&
747 (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) {
748
749 // Check if we can save work.
750 if (left.offset == 0 && left.size == left.backing_block->size()) {
751 // Fast case: left is an entire backing block.
752 left.backing_block->resize(left.size + right.size);
753 std::memcpy(left.backing_block->data() + left.size,
754 right.backing_block->data() + right.offset, right.size);
755 } else {
756 // Slow case: make a new memory block for left and right.
757 auto new_memory = std::make_shared<PhysicalMemory>();
758 new_memory->resize(left.size + right.size);
759 std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size);
760 std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset,
761 right.size);
762
763 left.backing_block = std::move(new_memory);
764 left.offset = 0;
765 }
766
767 // Page table update is needed, because backing memory changed.
768 left.size += right.size;
769 UpdatePageTableForVMA(left);
770 } else {
771 // Just update the size.
772 left.size += right.size;
773 }
774}
775
776void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
777 auto& memory = system.Memory();
778
779 switch (vma.type) {
780 case VMAType::Free:
781 memory.UnmapRegion(page_table, vma.base, vma.size);
782 break;
783 case VMAType::AllocatedMemoryBlock:
784 memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset);
785 break;
786 case VMAType::BackingMemory:
787 memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
788 break;
789 case VMAType::MMIO:
790 memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
791 break;
792 }
793}
794
795void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) {
796 u64 map_region_size = 0;
797 u64 heap_region_size = 0;
798 u64 stack_region_size = 0;
799 u64 tls_io_region_size = 0;
800
801 u64 stack_and_tls_io_end = 0;
802
803 switch (type) {
804 case FileSys::ProgramAddressSpaceType::Is32Bit:
805 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
806 address_space_width = 32;
807 code_region_base = 0x200000;
808 code_region_end = code_region_base + 0x3FE00000;
809 aslr_region_base = 0x200000;
810 aslr_region_end = aslr_region_base + 0xFFE00000;
811 if (type == FileSys::ProgramAddressSpaceType::Is32Bit) {
812 map_region_size = 0x40000000;
813 heap_region_size = 0x40000000;
814 } else {
815 map_region_size = 0;
816 heap_region_size = 0x80000000;
817 }
818 stack_and_tls_io_end = 0x40000000;
819 break;
820 case FileSys::ProgramAddressSpaceType::Is36Bit:
821 address_space_width = 36;
822 code_region_base = 0x8000000;
823 code_region_end = code_region_base + 0x78000000;
824 aslr_region_base = 0x8000000;
825 aslr_region_end = aslr_region_base + 0xFF8000000;
826 map_region_size = 0x180000000;
827 heap_region_size = 0x180000000;
828 stack_and_tls_io_end = 0x80000000;
829 break;
830 case FileSys::ProgramAddressSpaceType::Is39Bit:
831 address_space_width = 39;
832 code_region_base = 0x8000000;
833 code_region_end = code_region_base + 0x80000000;
834 aslr_region_base = 0x8000000;
835 aslr_region_end = aslr_region_base + 0x7FF8000000;
836 map_region_size = 0x1000000000;
837 heap_region_size = 0x180000000;
838 stack_region_size = 0x80000000;
839 tls_io_region_size = 0x1000000000;
840 break;
841 default:
842 UNREACHABLE_MSG("Invalid address space type specified: {}", static_cast<u32>(type));
843 return;
844 }
845
846 const u64 stack_and_tls_io_begin = aslr_region_base;
847
848 address_space_base = 0;
849 address_space_end = 1ULL << address_space_width;
850
851 map_region_base = code_region_end;
852 map_region_end = map_region_base + map_region_size;
853
854 heap_region_base = map_region_end;
855 heap_region_end = heap_region_base + heap_region_size;
856 heap_end = heap_region_base;
857
858 stack_region_base = heap_region_end;
859 stack_region_end = stack_region_base + stack_region_size;
860
861 tls_io_region_base = stack_region_end;
862 tls_io_region_end = tls_io_region_base + tls_io_region_size;
863
864 if (stack_region_size == 0) {
865 stack_region_base = stack_and_tls_io_begin;
866 stack_region_end = stack_and_tls_io_end;
867 }
868
869 if (tls_io_region_size == 0) {
870 tls_io_region_base = stack_and_tls_io_begin;
871 tls_io_region_end = stack_and_tls_io_end;
872 }
873}
874
875void VMManager::Clear() {
876 ClearVMAMap();
877 ClearPageTable();
878}
879
880void VMManager::ClearVMAMap() {
881 vma_map.clear();
882}
883
884void VMManager::ClearPageTable() {
885 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
886 page_table.special_regions.clear();
887 std::fill(page_table.attributes.begin(), page_table.attributes.end(),
888 Common::PageType::Unmapped);
889}
890
891VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
892 MemoryState state, VMAPermission permission_mask,
893 VMAPermission permissions,
894 MemoryAttribute attribute_mask,
895 MemoryAttribute attribute,
896 MemoryAttribute ignore_mask) const {
897 auto iter = FindVMA(address);
898
899 // If we don't have a valid VMA handle at this point, then it means this is
900 // being called with an address outside of the address space, which is definitely
901 // indicative of a bug, as this function only operates on mapped memory regions.
902 DEBUG_ASSERT(IsValidHandle(iter));
903
904 const VAddr end_address = address + size - 1;
905 const MemoryAttribute initial_attributes = iter->second.attribute;
906 const VMAPermission initial_permissions = iter->second.permissions;
907 const MemoryState initial_state = iter->second.state;
908
909 while (true) {
910 // The iterator should be valid throughout the traversal. Hitting the end of
911 // the mapped VMA regions is unquestionably indicative of a bug.
912 DEBUG_ASSERT(IsValidHandle(iter));
913
914 const auto& vma = iter->second;
915
916 if (vma.state != initial_state) {
917 return ERR_INVALID_ADDRESS_STATE;
918 }
919
920 if ((vma.state & state_mask) != state) {
921 return ERR_INVALID_ADDRESS_STATE;
922 }
923
924 if (vma.permissions != initial_permissions) {
925 return ERR_INVALID_ADDRESS_STATE;
926 }
927
928 if ((vma.permissions & permission_mask) != permissions) {
929 return ERR_INVALID_ADDRESS_STATE;
930 }
931
932 if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) {
933 return ERR_INVALID_ADDRESS_STATE;
934 }
935
936 if ((vma.attribute & attribute_mask) != attribute) {
937 return ERR_INVALID_ADDRESS_STATE;
938 }
939
940 if (end_address <= vma.EndAddress()) {
941 break;
942 }
943
944 ++iter;
945 }
946
947 return MakeResult(
948 std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask));
949}
950
951ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
952 std::size_t size) const {
953 const VAddr end_addr = address + size;
954 const VAddr last_addr = end_addr - 1;
955 std::size_t mapped_size = 0;
956
957 VAddr cur_addr = address;
958 auto iter = FindVMA(cur_addr);
959 ASSERT(iter != vma_map.end());
960
961 while (true) {
962 const auto& vma = iter->second;
963 const VAddr vma_start = vma.base;
964 const VAddr vma_end = vma_start + vma.size;
965 const VAddr vma_last = vma_end - 1;
966
967 // Add size if relevant.
968 if (vma.state != MemoryState::Unmapped) {
969 mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
970 }
971
972 // Break once we hit the end of the range.
973 if (last_addr <= vma_last) {
974 break;
975 }
976
977 // Advance to the next block.
978 cur_addr = vma_end;
979 iter = std::next(iter);
980 ASSERT(iter != vma_map.end());
981 }
982
983 return MakeResult(mapped_size);
984}
985
986ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
987 std::size_t size) const {
988 const VAddr end_addr = address + size;
989 const VAddr last_addr = end_addr - 1;
990 std::size_t mapped_size = 0;
991
992 VAddr cur_addr = address;
993 auto iter = FindVMA(cur_addr);
994 ASSERT(iter != vma_map.end());
995
996 while (true) {
997 const auto& vma = iter->second;
998 const auto vma_start = vma.base;
999 const auto vma_end = vma_start + vma.size;
1000 const auto vma_last = vma_end - 1;
1001 const auto state = vma.state;
1002 const auto attr = vma.attribute;
1003
1004 // Memory within region must be free or mapped heap.
1005 if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) ||
1006 (state == MemoryState::Unmapped))) {
1007 return ERR_INVALID_ADDRESS_STATE;
1008 }
1009
1010 // Add size if relevant.
1011 if (state != MemoryState::Unmapped) {
1012 mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
1013 }
1014
1015 // Break once we hit the end of the range.
1016 if (last_addr <= vma_last) {
1017 break;
1018 }
1019
1020 // Advance to the next block.
1021 cur_addr = vma_end;
1022 iter = std::next(iter);
1023 ASSERT(iter != vma_map.end());
1024 }
1025
1026 return MakeResult(mapped_size);
1027}
1028
1029u64 VMManager::GetTotalPhysicalMemoryAvailable() const {
1030 LOG_WARNING(Kernel, "(STUBBED) called");
1031 return 0xF8000000;
1032}
1033
1034VAddr VMManager::GetAddressSpaceBaseAddress() const {
1035 return address_space_base;
1036}
1037
1038VAddr VMManager::GetAddressSpaceEndAddress() const {
1039 return address_space_end;
1040}
1041
1042u64 VMManager::GetAddressSpaceSize() const {
1043 return address_space_end - address_space_base;
1044}
1045
1046u64 VMManager::GetAddressSpaceWidth() const {
1047 return address_space_width;
1048}
1049
1050bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const {
1051 return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(),
1052 GetAddressSpaceEndAddress());
1053}
1054
1055VAddr VMManager::GetASLRRegionBaseAddress() const {
1056 return aslr_region_base;
1057}
1058
1059VAddr VMManager::GetASLRRegionEndAddress() const {
1060 return aslr_region_end;
1061}
1062
1063u64 VMManager::GetASLRRegionSize() const {
1064 return aslr_region_end - aslr_region_base;
1065}
1066
1067bool VMManager::IsWithinASLRRegion(VAddr begin, u64 size) const {
1068 const VAddr range_end = begin + size;
1069 const VAddr aslr_start = GetASLRRegionBaseAddress();
1070 const VAddr aslr_end = GetASLRRegionEndAddress();
1071
1072 if (aslr_start > begin || begin > range_end || range_end - 1 > aslr_end - 1) {
1073 return false;
1074 }
1075
1076 if (range_end > heap_region_base && heap_region_end > begin) {
1077 return false;
1078 }
1079
1080 if (range_end > map_region_base && map_region_end > begin) {
1081 return false;
1082 }
1083
1084 return true;
1085}
1086
1087VAddr VMManager::GetCodeRegionBaseAddress() const {
1088 return code_region_base;
1089}
1090
1091VAddr VMManager::GetCodeRegionEndAddress() const {
1092 return code_region_end;
1093}
1094
1095u64 VMManager::GetCodeRegionSize() const {
1096 return code_region_end - code_region_base;
1097}
1098
1099bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const {
1100 return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(),
1101 GetCodeRegionEndAddress());
1102}
1103
1104VAddr VMManager::GetHeapRegionBaseAddress() const {
1105 return heap_region_base;
1106}
1107
1108VAddr VMManager::GetHeapRegionEndAddress() const {
1109 return heap_region_end;
1110}
1111
1112u64 VMManager::GetHeapRegionSize() const {
1113 return heap_region_end - heap_region_base;
1114}
1115
1116u64 VMManager::GetCurrentHeapSize() const {
1117 return heap_end - heap_region_base;
1118}
1119
1120bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
1121 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
1122 GetHeapRegionEndAddress());
1123}
1124
1125VAddr VMManager::GetMapRegionBaseAddress() const {
1126 return map_region_base;
1127}
1128
1129VAddr VMManager::GetMapRegionEndAddress() const {
1130 return map_region_end;
1131}
1132
1133u64 VMManager::GetMapRegionSize() const {
1134 return map_region_end - map_region_base;
1135}
1136
1137bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const {
1138 return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
1139}
1140
1141VAddr VMManager::GetStackRegionBaseAddress() const {
1142 return stack_region_base;
1143}
1144
1145VAddr VMManager::GetStackRegionEndAddress() const {
1146 return stack_region_end;
1147}
1148
1149u64 VMManager::GetStackRegionSize() const {
1150 return stack_region_end - stack_region_base;
1151}
1152
1153bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const {
1154 return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(),
1155 GetStackRegionEndAddress());
1156}
1157
1158VAddr VMManager::GetTLSIORegionBaseAddress() const {
1159 return tls_io_region_base;
1160}
1161
1162VAddr VMManager::GetTLSIORegionEndAddress() const {
1163 return tls_io_region_end;
1164}
1165
1166u64 VMManager::GetTLSIORegionSize() const {
1167 return tls_io_region_end - tls_io_region_base;
1168}
1169
1170bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const {
1171 return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(),
1172 GetTLSIORegionEndAddress());
1173}
1174
1175} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
deleted file mode 100644
index 90b4b006a..000000000
--- a/src/core/hle/kernel/vm_manager.h
+++ /dev/null
@@ -1,796 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <map>
8#include <memory>
9#include <tuple>
10#include <vector>
11#include "common/common_types.h"
12#include "common/memory_hook.h"
13#include "common/page_table.h"
14#include "core/hle/kernel/physical_memory.h"
15#include "core/hle/result.h"
16#include "core/memory.h"
17
18namespace Core {
19class System;
20}
21
22namespace FileSys {
23enum class ProgramAddressSpaceType : u8;
24}
25
26namespace Kernel {
27
28enum class VMAType : u8 {
29 /// VMA represents an unmapped region of the address space.
30 Free,
31 /// VMA is backed by a ref-counted allocate memory block.
32 AllocatedMemoryBlock,
33 /// VMA is backed by a raw, unmanaged pointer.
34 BackingMemory,
35 /// VMA is mapped to MMIO registers at a fixed PAddr.
36 MMIO,
37 // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP
38};
39
40/// Permissions for mapped memory blocks
41enum class VMAPermission : u8 {
42 None = 0,
43 Read = 1,
44 Write = 2,
45 Execute = 4,
46
47 ReadWrite = Read | Write,
48 ReadExecute = Read | Execute,
49 WriteExecute = Write | Execute,
50 ReadWriteExecute = Read | Write | Execute,
51
52 // Used as a wildcard when checking permissions across memory ranges
53 All = 0xFF,
54};
55
56constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) {
57 return static_cast<VMAPermission>(u32(lhs) | u32(rhs));
58}
59
60constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) {
61 return static_cast<VMAPermission>(u32(lhs) & u32(rhs));
62}
63
64constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) {
65 return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs));
66}
67
68constexpr VMAPermission operator~(VMAPermission permission) {
69 return static_cast<VMAPermission>(~u32(permission));
70}
71
72constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) {
73 lhs = lhs | rhs;
74 return lhs;
75}
76
77constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) {
78 lhs = lhs & rhs;
79 return lhs;
80}
81
82constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) {
83 lhs = lhs ^ rhs;
84 return lhs;
85}
86
87/// Attribute flags that can be applied to a VMA
88enum class MemoryAttribute : u32 {
89 Mask = 0xFF,
90
91 /// No particular qualities
92 None = 0,
93 /// Memory locked/borrowed for use. e.g. This would be used by transfer memory.
94 Locked = 1,
95 /// Memory locked for use by IPC-related internals.
96 LockedForIPC = 2,
97 /// Mapped as part of the device address space.
98 DeviceMapped = 4,
99 /// Uncached memory
100 Uncached = 8,
101
102 IpcAndDeviceMapped = LockedForIPC | DeviceMapped,
103};
104
105constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) {
106 return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs));
107}
108
109constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) {
110 return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs));
111}
112
113constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) {
114 return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs));
115}
116
117constexpr MemoryAttribute operator~(MemoryAttribute attribute) {
118 return static_cast<MemoryAttribute>(~u32(attribute));
119}
120
121constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) {
122 lhs = lhs | rhs;
123 return lhs;
124}
125
126constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) {
127 lhs = lhs & rhs;
128 return lhs;
129}
130
131constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) {
132 lhs = lhs ^ rhs;
133 return lhs;
134}
135
136constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) {
137 return static_cast<u32>(attribute & MemoryAttribute::Mask);
138}
139
140// clang-format off
141/// Represents memory states and any relevant flags, as used by the kernel.
142/// svcQueryMemory interprets these by masking away all but the first eight
143/// bits when storing memory state into a MemoryInfo instance.
144enum class MemoryState : u32 {
145 Mask = 0xFF,
146 FlagProtect = 1U << 8,
147 FlagDebug = 1U << 9,
148 FlagIPC0 = 1U << 10,
149 FlagIPC3 = 1U << 11,
150 FlagIPC1 = 1U << 12,
151 FlagMapped = 1U << 13,
152 FlagCode = 1U << 14,
153 FlagAlias = 1U << 15,
154 FlagModule = 1U << 16,
155 FlagTransfer = 1U << 17,
156 FlagQueryPhysicalAddressAllowed = 1U << 18,
157 FlagSharedDevice = 1U << 19,
158 FlagSharedDeviceAligned = 1U << 20,
159 FlagIPCBuffer = 1U << 21,
160 FlagMemoryPoolAllocated = 1U << 22,
161 FlagMapProcess = 1U << 23,
162 FlagUncached = 1U << 24,
163 FlagCodeMemory = 1U << 25,
164
165 // Wildcard used in range checking to indicate all states.
166 All = 0xFFFFFFFF,
167
168 // Convenience flag sets to reduce repetition
169 IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1,
170
171 CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed |
172 FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
173
174 DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer |
175 FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned |
176 FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached,
177
178 Unmapped = 0x00,
179 Io = 0x01 | FlagMapped,
180 Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
181 Code = 0x03 | CodeFlags | FlagMapProcess,
182 CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
183 Heap = 0x05 | DataFlags | FlagCodeMemory,
184 Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
185 ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
186 ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
187
188 IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
189 IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
190
191 Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed |
192 FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
193
194 ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated,
195
196 TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed |
197 FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated |
198 FlagUncached,
199
200 TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
201 FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
202
203 ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated,
204
205 // Used to signify an inaccessible or invalid memory region with memory queries
206 Inaccessible = 0x10,
207
208 IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
209 FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
210
211 IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed |
212 FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
213
214 KernelStack = 0x13 | FlagMapped,
215};
216// clang-format on
217
218constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) {
219 return static_cast<MemoryState>(u32(lhs) | u32(rhs));
220}
221
222constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) {
223 return static_cast<MemoryState>(u32(lhs) & u32(rhs));
224}
225
226constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) {
227 return static_cast<MemoryState>(u32(lhs) ^ u32(rhs));
228}
229
230constexpr MemoryState operator~(MemoryState lhs) {
231 return static_cast<MemoryState>(~u32(lhs));
232}
233
234constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) {
235 lhs = lhs | rhs;
236 return lhs;
237}
238
239constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) {
240 lhs = lhs & rhs;
241 return lhs;
242}
243
244constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) {
245 lhs = lhs ^ rhs;
246 return lhs;
247}
248
249constexpr u32 ToSvcMemoryState(MemoryState state) {
250 return static_cast<u32>(state & MemoryState::Mask);
251}
252
253struct MemoryInfo {
254 u64 base_address;
255 u64 size;
256 u32 state;
257 u32 attributes;
258 u32 permission;
259 u32 ipc_ref_count;
260 u32 device_ref_count;
261};
262static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size.");
263
264struct PageInfo {
265 u32 flags;
266};
267
268/**
269 * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
270 * with homogeneous attributes across its extents. In this particular implementation each VMA is
271 * also backed by a single host memory allocation.
272 */
273struct VirtualMemoryArea {
274 /// Gets the starting (base) address of this VMA.
275 VAddr StartAddress() const {
276 return base;
277 }
278
279 /// Gets the ending address of this VMA.
280 VAddr EndAddress() const {
281 return base + size - 1;
282 }
283
284 /// Virtual base address of the region.
285 VAddr base = 0;
286 /// Size of the region.
287 u64 size = 0;
288
289 VMAType type = VMAType::Free;
290 VMAPermission permissions = VMAPermission::None;
291 MemoryState state = MemoryState::Unmapped;
292 MemoryAttribute attribute = MemoryAttribute::None;
293
294 // Settings for type = AllocatedMemoryBlock
295 /// Memory block backing this VMA.
296 std::shared_ptr<PhysicalMemory> backing_block = nullptr;
297 /// Offset into the backing_memory the mapping starts from.
298 std::size_t offset = 0;
299
300 // Settings for type = BackingMemory
301 /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
302 u8* backing_memory = nullptr;
303
304 // Settings for type = MMIO
305 /// Physical address of the register area this VMA maps to.
306 PAddr paddr = 0;
307 Common::MemoryHookPointer mmio_handler = nullptr;
308
309 /// Tests if this area can be merged to the right with `next`.
310 bool CanBeMergedWith(const VirtualMemoryArea& next) const;
311};
312
313/**
314 * Manages a process' virtual addressing space. This class maintains a list of allocated and free
315 * regions in the address space, along with their attributes, and allows kernel clients to
316 * manipulate it, adjusting the page table to match.
317 *
318 * This is similar in idea and purpose to the VM manager present in operating system kernels, with
319 * the main difference being that it doesn't have to support swapping or memory mapping of files.
320 * The implementation is also simplified by not having to allocate page frames. See these articles
321 * about the Linux kernel for an explantion of the concept and implementation:
322 * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
323 * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
324 */
325class VMManager final {
326 using VMAMap = std::map<VAddr, VirtualMemoryArea>;
327
328public:
329 using VMAHandle = VMAMap::const_iterator;
330
331 explicit VMManager(Core::System& system);
332 ~VMManager();
333
334 /// Clears the address space map, re-initializing with a single free area.
335 void Reset(FileSys::ProgramAddressSpaceType type);
336
337 /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
338 VMAHandle FindVMA(VAddr target) const;
339
340 /// Indicates whether or not the given handle is within the VMA map.
341 bool IsValidHandle(VMAHandle handle) const;
342
343 // TODO(yuriks): Should these functions actually return the handle?
344
345 /**
346 * Maps part of a ref-counted block of memory at a given address.
347 *
348 * @param target The guest address to start the mapping at.
349 * @param block The block to be mapped.
350 * @param offset Offset into `block` to map from.
351 * @param size Size of the mapping.
352 * @param state MemoryState tag to attach to the VMA.
353 */
354 ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block,
355 std::size_t offset, u64 size, MemoryState state,
356 VMAPermission perm = VMAPermission::ReadWrite);
357
358 /**
359 * Maps an unmanaged host memory pointer at a given address.
360 *
361 * @param target The guest address to start the mapping at.
362 * @param memory The memory to be mapped.
363 * @param size Size of the mapping.
364 * @param state MemoryState tag to attach to the VMA.
365 */
366 ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state);
367
368 /**
369 * Finds the first free memory region of the given size within
370 * the user-addressable ASLR memory region.
371 *
372 * @param size The size of the desired region in bytes.
373 *
374 * @returns If successful, the base address of the free region with
375 * the given size.
376 */
377 ResultVal<VAddr> FindFreeRegion(u64 size) const;
378
379 /**
380 * Finds the first free address range that can hold a region of the desired size
381 *
382 * @param begin The starting address of the range.
383 * This is treated as an inclusive beginning address.
384 *
385 * @param end The ending address of the range.
386 * This is treated as an exclusive ending address.
387 *
388 * @param size The size of the free region to attempt to locate,
389 * in bytes.
390 *
391 * @returns If successful, the base address of the free region with
392 * the given size.
393 *
394 * @returns If unsuccessful, a result containing an error code.
395 *
396 * @pre The starting address must be less than the ending address.
397 * @pre The size must not exceed the address range itself.
398 */
399 ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const;
400
401 /**
402 * Maps a memory-mapped IO region at a given address.
403 *
404 * @param target The guest address to start the mapping at.
405 * @param paddr The physical address where the registers are present.
406 * @param size Size of the mapping.
407 * @param state MemoryState tag to attach to the VMA.
408 * @param mmio_handler The handler that will implement read and write for this MMIO region.
409 */
410 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
411 Common::MemoryHookPointer mmio_handler);
412
413 /// Unmaps a range of addresses, splitting VMAs as necessary.
414 ResultCode UnmapRange(VAddr target, u64 size);
415
416 /// Changes the permissions of the given VMA.
417 VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms);
418
419 /// Changes the permissions of a range of addresses, splitting VMAs as necessary.
420 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
421
422 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
423
424 /// Attempts to allocate a heap with the given size.
425 ///
426 /// @param size The size of the heap to allocate in bytes.
427 ///
428 /// @note If a heap is currently allocated, and this is called
429 /// with a size that is equal to the size of the current heap,
430 /// then this function will do nothing and return the current
431 /// heap's starting address, as there's no need to perform
432 /// any additional heap allocation work.
433 ///
434 /// @note If a heap is currently allocated, and this is called
435 /// with a size less than the current heap's size, then
436 /// this function will attempt to shrink the heap.
437 ///
438 /// @note If a heap is currently allocated, and this is called
439 /// with a size larger than the current heap's size, then
440 /// this function will attempt to extend the size of the heap.
441 ///
442 /// @returns A result indicating either success or failure.
443 /// <p>
444 /// If successful, this function will return a result
445 /// containing the starting address to the allocated heap.
446 /// <p>
447 /// If unsuccessful, this function will return a result
448 /// containing an error code.
449 ///
450 /// @pre The given size must lie within the allowable heap
451 /// memory region managed by this VMManager instance.
452 /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
453 /// being returned as the result.
454 ///
455 ResultVal<VAddr> SetHeapSize(u64 size);
456
457 /// Maps memory at a given address.
458 ///
459 /// @param target The virtual address to map memory at.
460 /// @param size The amount of memory to map.
461 ///
462 /// @note The destination address must lie within the Map region.
463 ///
464 /// @note This function requires that SystemResourceSize be non-zero,
465 /// however, this is just because if it were not then the
466 /// resulting page tables could be exploited on hardware by
467 /// a malicious program. SystemResource usage does not need
468 /// to be explicitly checked or updated here.
469 ResultCode MapPhysicalMemory(VAddr target, u64 size);
470
471 /// Unmaps memory at a given address.
472 ///
473 /// @param target The virtual address to unmap memory at.
474 /// @param size The amount of memory to unmap.
475 ///
476 /// @note The destination address must lie within the Map region.
477 ///
478 /// @note This function requires that SystemResourceSize be non-zero,
479 /// however, this is just because if it were not then the
480 /// resulting page tables could be exploited on hardware by
481 /// a malicious program. SystemResource usage does not need
482 /// to be explicitly checked or updated here.
483 ResultCode UnmapPhysicalMemory(VAddr target, u64 size);
484
485 /// Maps a region of memory as code memory.
486 ///
487 /// @param dst_address The base address of the region to create the aliasing memory region.
488 /// @param src_address The base address of the region to be aliased.
489 /// @param size The total amount of memory to map in bytes.
490 ///
491 /// @pre Both memory regions lie within the actual addressable address space.
492 ///
493 /// @post After this function finishes execution, assuming success, then the address range
494 /// [dst_address, dst_address+size) will alias the memory region,
495 /// [src_address, src_address+size).
496 /// <p>
497 /// What this also entails is as follows:
498 /// 1. The aliased region gains the Locked memory attribute.
499 /// 2. The aliased region becomes read-only.
500 /// 3. The aliasing region becomes read-only.
501 /// 4. The aliasing region is created with a memory state of MemoryState::CodeModule.
502 ///
503 ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size);
504
505 /// Unmaps a region of memory designated as code module memory.
506 ///
507 /// @param dst_address The base address of the memory region aliasing the source memory region.
508 /// @param src_address The base address of the memory region being aliased.
509 /// @param size The size of the memory region to unmap in bytes.
510 ///
511 /// @pre Both memory ranges lie within the actual addressable address space.
512 ///
513 /// @pre The memory region being unmapped has been previously been mapped
514 /// by a call to MapCodeMemory.
515 ///
516 /// @post After execution of the function, if successful. the aliasing memory region
517 /// will be unmapped and the aliased region will have various traits about it
518 /// restored to what they were prior to the original mapping call preceding
519 /// this function call.
520 /// <p>
521 /// What this also entails is as follows:
522 /// 1. The state of the memory region will now indicate a general heap region.
523 /// 2. All memory attributes for the memory region are cleared.
524 /// 3. Memory permissions for the region are restored to user read/write.
525 ///
526 ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size);
527
528 /// Queries the memory manager for information about the given address.
529 ///
530 /// @param address The address to query the memory manager about for information.
531 ///
532 /// @return A MemoryInfo instance containing information about the given address.
533 ///
534 MemoryInfo QueryMemory(VAddr address) const;
535
536 /// Sets an attribute across the given address range.
537 ///
538 /// @param address The starting address
539 /// @param size The size of the range to set the attribute on.
540 /// @param mask The attribute mask
541 /// @param attribute The attribute to set across the given address range
542 ///
543 /// @returns RESULT_SUCCESS if successful
544 /// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set.
545 ///
546 ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask,
547 MemoryAttribute attribute);
548
549 /**
550 * Scans all VMAs and updates the page table range of any that use the given vector as backing
551 * memory. This should be called after any operation that causes reallocation of the vector.
552 */
553 void RefreshMemoryBlockMappings(const PhysicalMemory* block);
554
555 /// Dumps the address space layout to the log, for debugging
556 void LogLayout() const;
557
558 /// Gets the total memory usage, used by svcGetInfo
559 u64 GetTotalPhysicalMemoryAvailable() const;
560
561 /// Gets the address space base address
562 VAddr GetAddressSpaceBaseAddress() const;
563
564 /// Gets the address space end address
565 VAddr GetAddressSpaceEndAddress() const;
566
567 /// Gets the total address space address size in bytes
568 u64 GetAddressSpaceSize() const;
569
570 /// Gets the address space width in bits.
571 u64 GetAddressSpaceWidth() const;
572
573 /// Determines whether or not the given address range lies within the address space.
574 bool IsWithinAddressSpace(VAddr address, u64 size) const;
575
576 /// Gets the base address of the ASLR region.
577 VAddr GetASLRRegionBaseAddress() const;
578
579 /// Gets the end address of the ASLR region.
580 VAddr GetASLRRegionEndAddress() const;
581
582 /// Gets the size of the ASLR region
583 u64 GetASLRRegionSize() const;
584
585 /// Determines whether or not the specified address range is within the ASLR region.
586 bool IsWithinASLRRegion(VAddr address, u64 size) const;
587
588 /// Gets the base address of the code region.
589 VAddr GetCodeRegionBaseAddress() const;
590
591 /// Gets the end address of the code region.
592 VAddr GetCodeRegionEndAddress() const;
593
594 /// Gets the total size of the code region in bytes.
595 u64 GetCodeRegionSize() const;
596
597 /// Determines whether or not the specified range is within the code region.
598 bool IsWithinCodeRegion(VAddr address, u64 size) const;
599
600 /// Gets the base address of the heap region.
601 VAddr GetHeapRegionBaseAddress() const;
602
603 /// Gets the end address of the heap region;
604 VAddr GetHeapRegionEndAddress() const;
605
606 /// Gets the total size of the heap region in bytes.
607 u64 GetHeapRegionSize() const;
608
609 /// Gets the total size of the current heap in bytes.
610 ///
611 /// @note This is the current allocated heap size, not the size
612 /// of the region it's allowed to exist within.
613 ///
614 u64 GetCurrentHeapSize() const;
615
616 /// Determines whether or not the specified range is within the heap region.
617 bool IsWithinHeapRegion(VAddr address, u64 size) const;
618
619 /// Gets the base address of the map region.
620 VAddr GetMapRegionBaseAddress() const;
621
622 /// Gets the end address of the map region.
623 VAddr GetMapRegionEndAddress() const;
624
625 /// Gets the total size of the map region in bytes.
626 u64 GetMapRegionSize() const;
627
628 /// Determines whether or not the specified range is within the map region.
629 bool IsWithinMapRegion(VAddr address, u64 size) const;
630
631 /// Gets the base address of the stack region.
632 VAddr GetStackRegionBaseAddress() const;
633
634 /// Gets the end address of the stack region.
635 VAddr GetStackRegionEndAddress() const;
636
637 /// Gets the total size of the stack region in bytes.
638 u64 GetStackRegionSize() const;
639
640 /// Determines whether or not the given address range is within the stack region
641 bool IsWithinStackRegion(VAddr address, u64 size) const;
642
643 /// Gets the base address of the TLS IO region.
644 VAddr GetTLSIORegionBaseAddress() const;
645
646 /// Gets the end address of the TLS IO region.
647 VAddr GetTLSIORegionEndAddress() const;
648
649 /// Gets the total size of the TLS IO region in bytes.
650 u64 GetTLSIORegionSize() const;
651
652 /// Determines if the given address range is within the TLS IO region.
653 bool IsWithinTLSIORegion(VAddr address, u64 size) const;
654
655 /// Each VMManager has its own page table, which is set as the main one when the owning process
656 /// is scheduled.
657 Common::PageTable page_table{Memory::PAGE_BITS};
658
659 using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>;
660
661 /// Checks if an address range adheres to the specified states provided.
662 ///
663 /// @param address The starting address of the address range.
664 /// @param size The size of the address range.
665 /// @param state_mask The memory state mask.
666 /// @param state The state to compare the individual VMA states against,
667 /// which is done in the form of: (vma.state & state_mask) != state.
668 /// @param permission_mask The memory permissions mask.
669 /// @param permissions The permission to compare the individual VMA permissions against,
670 /// which is done in the form of:
671 /// (vma.permission & permission_mask) != permission.
672 /// @param attribute_mask The memory attribute mask.
673 /// @param attribute The memory attributes to compare the individual VMA attributes
674 /// against, which is done in the form of:
675 /// (vma.attributes & attribute_mask) != attribute.
676 /// @param ignore_mask The memory attributes to ignore during the check.
677 ///
678 /// @returns If successful, returns a tuple containing the memory attributes
679 /// (with ignored bits specified by ignore_mask unset), memory permissions, and
680 /// memory state across the memory range.
681 /// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE.
682 ///
683 CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state,
684 VMAPermission permission_mask, VMAPermission permissions,
685 MemoryAttribute attribute_mask, MemoryAttribute attribute,
686 MemoryAttribute ignore_mask) const;
687
688private:
689 using VMAIter = VMAMap::iterator;
690
691 /// Converts a VMAHandle to a mutable VMAIter.
692 VMAIter StripIterConstness(const VMAHandle& iter);
693
694 /// Unmaps the given VMA.
695 VMAIter Unmap(VMAIter vma);
696
697 /**
698 * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
699 * the appropriate error checking.
700 */
701 ResultVal<VMAIter> CarveVMA(VAddr base, u64 size);
702
703 /**
704 * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
705 * end of the range.
706 */
707 ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size);
708
709 /**
710 * Splits a VMA in two, at the specified offset.
711 * @returns the right side of the split, with the original iterator becoming the left side.
712 */
713 VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
714
715 /**
716 * Checks for and merges the specified VMA with adjacent ones if possible.
717 * @returns the merged VMA or the original if no merging was possible.
718 */
719 VMAIter MergeAdjacent(VMAIter vma);
720
721 /**
722 * Merges two adjacent VMAs.
723 */
724 void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right);
725
726 /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
727 void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
728
729 /// Initializes memory region ranges to adhere to a given address space type.
730 void InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type);
731
732 /// Clears the underlying map and page table.
733 void Clear();
734
735 /// Clears out the VMA map, unmapping any previously mapped ranges.
736 void ClearVMAMap();
737
738 /// Clears out the page table
739 void ClearPageTable();
740
741 /// Gets the amount of memory currently mapped (state != Unmapped) in a range.
742 ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const;
743
744 /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range.
745 ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
746 std::size_t size) const;
747
748 /**
749 * A map covering the entirety of the managed address space, keyed by the `base` field of each
750 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant
751 * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
752 * merged when possible so that no two similar and adjacent regions exist that have not been
753 * merged.
754 */
755 VMAMap vma_map;
756
757 u32 address_space_width = 0;
758 VAddr address_space_base = 0;
759 VAddr address_space_end = 0;
760
761 VAddr aslr_region_base = 0;
762 VAddr aslr_region_end = 0;
763
764 VAddr code_region_base = 0;
765 VAddr code_region_end = 0;
766
767 VAddr heap_region_base = 0;
768 VAddr heap_region_end = 0;
769
770 VAddr map_region_base = 0;
771 VAddr map_region_end = 0;
772
773 VAddr stack_region_base = 0;
774 VAddr stack_region_end = 0;
775
776 VAddr tls_io_region_base = 0;
777 VAddr tls_io_region_end = 0;
778
779 // Memory used to back the allocations in the regular heap. A single vector is used to cover
780 // the entire virtual address space extents that bound the allocations, including any holes.
781 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
782 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
783 std::shared_ptr<PhysicalMemory> heap_memory;
784
785 // The end of the currently allocated heap. This is not an inclusive
786 // end of the range. This is essentially 'base_address + current_size'.
787 VAddr heap_end = 0;
788
789 // The current amount of memory mapped via MapPhysicalMemory.
790 // This is used here (and in Nintendo's kernel) only for debugging, and does not impact
791 // any behavior.
792 u64 physical_memory_mapped = 0;
793
794 Core::System& system;
795};
796} // namespace Kernel
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp
index 4fb2cbc4b..106e89743 100644
--- a/src/core/hle/service/audio/audout_u.cpp
+++ b/src/core/hle/service/audio/audout_u.cpp
@@ -210,7 +210,7 @@ private:
210 210
211 /// This is the event handle used to check if the audio buffer was released 211 /// This is the event handle used to check if the audio buffer was released
212 Kernel::EventPair buffer_event; 212 Kernel::EventPair buffer_event;
213 Memory::Memory& main_memory; 213 Core::Memory::Memory& main_memory;
214}; 214};
215 215
216AudOutU::AudOutU(Core::System& system_) : ServiceFramework("audout:u"), system{system_} { 216AudOutU::AudOutU(Core::System& system_) : ServiceFramework("audout:u"), system{system_} {
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 82a5dbf14..175cabf45 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -129,7 +129,7 @@ private:
129 LOG_DEBUG(Service_Audio, "called. rendering_time_limit_percent={}", 129 LOG_DEBUG(Service_Audio, "called. rendering_time_limit_percent={}",
130 rendering_time_limit_percent); 130 rendering_time_limit_percent);
131 131
132 ASSERT(rendering_time_limit_percent >= 0 && rendering_time_limit_percent <= 100); 132 ASSERT(rendering_time_limit_percent <= 100);
133 133
134 IPC::ResponseBuilder rb{ctx, 2}; 134 IPC::ResponseBuilder rb{ctx, 2};
135 rb.Push(RESULT_SUCCESS); 135 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp
index 102017d73..cadc03805 100644
--- a/src/core/hle/service/filesystem/filesystem.cpp
+++ b/src/core/hle/service/filesystem/filesystem.cpp
@@ -451,7 +451,8 @@ FileSys::SaveDataSize FileSystemController::ReadSaveDataSize(FileSys::SaveDataTy
451 451
452 if (res != Loader::ResultStatus::Success) { 452 if (res != Loader::ResultStatus::Success) {
453 FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID()}; 453 FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID()};
454 auto [nacp_unique, discard] = pm.GetControlMetadata(); 454 const auto metadata = pm.GetControlMetadata();
455 const auto& nacp_unique = metadata.first;
455 456
456 if (nacp_unique != nullptr) { 457 if (nacp_unique != nullptr) {
457 new_size = {nacp_unique->GetDefaultNormalSaveSize(), 458 new_size = {nacp_unique->GetDefaultNormalSaveSize(),
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index e6811d5b5..61045c75c 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -575,6 +575,7 @@ private:
575 0, 575 0,
576 user_id->GetSize(), 576 user_id->GetSize(),
577 {}, 577 {},
578 {},
578 }); 579 });
579 580
580 continue; 581 continue;
@@ -595,6 +596,7 @@ private:
595 stoull_be(title_id->GetName()), 596 stoull_be(title_id->GetName()),
596 title_id->GetSize(), 597 title_id->GetSize(),
597 {}, 598 {},
599 {},
598 }); 600 });
599 } 601 }
600 } 602 }
@@ -619,6 +621,7 @@ private:
619 stoull_be(title_id->GetName()), 621 stoull_be(title_id->GetName()),
620 title_id->GetSize(), 622 title_id->GetSize(),
621 {}, 623 {},
624 {},
622 }); 625 });
623 } 626 }
624 } 627 }
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index d6ed5f304..d6031a987 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -14,6 +14,7 @@
14#include "core/hle/ipc_helpers.h" 14#include "core/hle/ipc_helpers.h"
15#include "core/hle/kernel/client_port.h" 15#include "core/hle/kernel/client_port.h"
16#include "core/hle/kernel/client_session.h" 16#include "core/hle/kernel/client_session.h"
17#include "core/hle/kernel/kernel.h"
17#include "core/hle/kernel/readable_event.h" 18#include "core/hle/kernel/readable_event.h"
18#include "core/hle/kernel/shared_memory.h" 19#include "core/hle/kernel/shared_memory.h"
19#include "core/hle/kernel/writable_event.h" 20#include "core/hle/kernel/writable_event.h"
@@ -53,9 +54,7 @@ IAppletResource::IAppletResource(Core::System& system)
53 RegisterHandlers(functions); 54 RegisterHandlers(functions);
54 55
55 auto& kernel = system.Kernel(); 56 auto& kernel = system.Kernel();
56 shared_mem = Kernel::SharedMemory::Create( 57 shared_mem = SharedFrom(&kernel.GetHidSharedMem());
57 kernel, nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite,
58 Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "HID:SharedMemory");
59 58
60 MakeController<Controller_DebugPad>(HidController::DebugPad); 59 MakeController<Controller_DebugPad>(HidController::DebugPad);
61 MakeController<Controller_Touchscreen>(HidController::Touchscreen); 60 MakeController<Controller_Touchscreen>(HidController::Touchscreen);
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index 5e79e2c1a..36ed6f7da 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -6,6 +6,7 @@
6#include "core/core.h" 6#include "core/core.h"
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/hle/ipc_helpers.h" 8#include "core/hle/ipc_helpers.h"
9#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/shared_memory.h" 10#include "core/hle/kernel/shared_memory.h"
10#include "core/hle/service/hid/irs.h" 11#include "core/hle/service/hid/irs.h"
11 12
@@ -38,9 +39,8 @@ IRS::IRS(Core::System& system) : ServiceFramework{"irs"}, system(system) {
38 RegisterHandlers(functions); 39 RegisterHandlers(functions);
39 40
40 auto& kernel = system.Kernel(); 41 auto& kernel = system.Kernel();
41 shared_mem = Kernel::SharedMemory::Create( 42
42 kernel, nullptr, 0x8000, Kernel::MemoryPermission::ReadWrite, 43 shared_mem = SharedFrom(&kernel.GetIrsSharedMem());
43 Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "IRS:SharedMemory");
44} 44}
45 45
46void IRS::ActivateIrsensor(Kernel::HLERequestContext& ctx) { 46void IRS::ActivateIrsensor(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 647943020..0cde7a557 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -8,14 +8,21 @@
8 8
9#include "common/alignment.h" 9#include "common/alignment.h"
10#include "common/hex_util.h" 10#include "common/hex_util.h"
11#include "common/scope_exit.h"
11#include "core/hle/ipc_helpers.h" 12#include "core/hle/ipc_helpers.h"
13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/memory/page_table.h"
15#include "core/hle/kernel/memory/system_control.h"
12#include "core/hle/kernel/process.h" 16#include "core/hle/kernel/process.h"
13#include "core/hle/service/ldr/ldr.h" 17#include "core/hle/service/ldr/ldr.h"
14#include "core/hle/service/service.h" 18#include "core/hle/service/service.h"
15#include "core/loader/nro.h" 19#include "core/loader/nro.h"
20#include "core/memory.h"
16 21
17namespace Service::LDR { 22namespace Service::LDR {
18 23
24constexpr ResultCode ERROR_INSUFFICIENT_ADDRESS_SPACE{ErrorModule::RO, 2};
25
19constexpr ResultCode ERROR_INVALID_MEMORY_STATE{ErrorModule::Loader, 51}; 26constexpr ResultCode ERROR_INVALID_MEMORY_STATE{ErrorModule::Loader, 51};
20constexpr ResultCode ERROR_INVALID_NRO{ErrorModule::Loader, 52}; 27constexpr ResultCode ERROR_INVALID_NRO{ErrorModule::Loader, 52};
21constexpr ResultCode ERROR_INVALID_NRR{ErrorModule::Loader, 53}; 28constexpr ResultCode ERROR_INVALID_NRR{ErrorModule::Loader, 53};
@@ -29,7 +36,61 @@ constexpr ResultCode ERROR_INVALID_NRO_ADDRESS{ErrorModule::Loader, 84};
29constexpr ResultCode ERROR_INVALID_NRR_ADDRESS{ErrorModule::Loader, 85}; 36constexpr ResultCode ERROR_INVALID_NRR_ADDRESS{ErrorModule::Loader, 85};
30constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87}; 37constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87};
31 38
32constexpr u64 MAXIMUM_LOADED_RO = 0x40; 39constexpr std::size_t MAXIMUM_LOADED_RO{0x40};
40constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200};
41
42struct NRRHeader {
43 u32_le magic;
44 INSERT_PADDING_BYTES(12);
45 u64_le title_id_mask;
46 u64_le title_id_pattern;
47 INSERT_PADDING_BYTES(16);
48 std::array<u8, 0x100> modulus;
49 std::array<u8, 0x100> signature_1;
50 std::array<u8, 0x100> signature_2;
51 u64_le title_id;
52 u32_le size;
53 INSERT_PADDING_BYTES(4);
54 u32_le hash_offset;
55 u32_le hash_count;
56 INSERT_PADDING_BYTES(8);
57};
58static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size.");
59
60struct NROHeader {
61 INSERT_PADDING_WORDS(1);
62 u32_le mod_offset;
63 INSERT_PADDING_WORDS(2);
64 u32_le magic;
65 u32_le version;
66 u32_le nro_size;
67 u32_le flags;
68 u32_le text_offset;
69 u32_le text_size;
70 u32_le ro_offset;
71 u32_le ro_size;
72 u32_le rw_offset;
73 u32_le rw_size;
74 u32_le bss_size;
75 INSERT_PADDING_WORDS(1);
76 std::array<u8, 0x20> build_id;
77 INSERT_PADDING_BYTES(0x20);
78};
79static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size.");
80
81using SHA256Hash = std::array<u8, 0x20>;
82
83struct NROInfo {
84 SHA256Hash hash{};
85 VAddr nro_address{};
86 std::size_t nro_size{};
87 VAddr bss_address{};
88 std::size_t bss_size{};
89 std::size_t text_size{};
90 std::size_t ro_size{};
91 std::size_t data_size{};
92 VAddr src_addr{};
93};
33 94
34class DebugMonitor final : public ServiceFramework<DebugMonitor> { 95class DebugMonitor final : public ServiceFramework<DebugMonitor> {
35public: 96public:
@@ -84,7 +145,7 @@ public:
84 {0, &RelocatableObject::LoadNro, "LoadNro"}, 145 {0, &RelocatableObject::LoadNro, "LoadNro"},
85 {1, &RelocatableObject::UnloadNro, "UnloadNro"}, 146 {1, &RelocatableObject::UnloadNro, "UnloadNro"},
86 {2, &RelocatableObject::LoadNrr, "LoadNrr"}, 147 {2, &RelocatableObject::LoadNrr, "LoadNrr"},
87 {3, &RelocatableObject::UnloadNrr, "UnloadNrr"}, 148 {3, nullptr, "UnloadNrr"},
88 {4, &RelocatableObject::Initialize, "Initialize"}, 149 {4, &RelocatableObject::Initialize, "Initialize"},
89 {10, nullptr, "LoadNrrEx"}, 150 {10, nullptr, "LoadNrrEx"},
90 }; 151 };
@@ -190,46 +251,125 @@ public:
190 rb.Push(RESULT_SUCCESS); 251 rb.Push(RESULT_SUCCESS);
191 } 252 }
192 253
193 void UnloadNrr(Kernel::HLERequestContext& ctx) { 254 bool ValidateRegionForMap(Kernel::Memory::PageTable& page_table, VAddr start,
194 if (!initialized) { 255 std::size_t size) const {
195 LOG_ERROR(Service_LDR, "LDR:RO not initialized before use!"); 256 constexpr std::size_t padding_size{4 * Kernel::Memory::PageSize};
196 IPC::ResponseBuilder rb{ctx, 2}; 257 const auto start_info{page_table.QueryInfo(start - 1)};
197 rb.Push(ERROR_NOT_INITIALIZED); 258
198 return; 259 if (start_info.state != Kernel::Memory::MemoryState::Free) {
260 return {};
199 } 261 }
200 262
201 struct Parameters { 263 if (start_info.GetAddress() > (start - padding_size)) {
202 u64_le process_id; 264 return {};
203 u64_le nrr_address; 265 }
204 };
205 266
206 IPC::RequestParser rp{ctx}; 267 const auto end_info{page_table.QueryInfo(start + size)};
207 const auto [process_id, nrr_address] = rp.PopRaw<Parameters>();
208 268
209 LOG_DEBUG(Service_LDR, "called with process_id={:016X}, nrr_addr={:016X}", process_id, 269 if (end_info.state != Kernel::Memory::MemoryState::Free) {
210 nrr_address); 270 return {};
271 }
211 272
212 if (!Common::Is4KBAligned(nrr_address)) { 273 return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
213 LOG_ERROR(Service_LDR, "NRR Address has invalid alignment (actual {:016X})!", 274 }
214 nrr_address); 275
215 IPC::ResponseBuilder rb{ctx, 2}; 276 VAddr GetRandomMapRegion(const Kernel::Memory::PageTable& page_table, std::size_t size) const {
216 rb.Push(ERROR_INVALID_ALIGNMENT); 277 VAddr addr{};
217 return; 278 const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >>
279 Kernel::Memory::PageBits};
280 do {
281 addr = page_table.GetAliasCodeRegionStart() +
282 (Kernel::Memory::SystemControl::GenerateRandomRange(0, end_pages)
283 << Kernel::Memory::PageBits);
284 } while (!page_table.IsInsideAddressSpace(addr, size) ||
285 page_table.IsInsideHeapRegion(addr, size) ||
286 page_table.IsInsideAliasRegion(addr, size));
287 return addr;
288 }
289
290 ResultVal<VAddr> MapProcessCodeMemory(Kernel::Process* process, VAddr baseAddress,
291 u64 size) const {
292 for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) {
293 auto& page_table{process->PageTable()};
294 const VAddr addr{GetRandomMapRegion(page_table, size)};
295 const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)};
296
297 if (result == Kernel::ERR_INVALID_ADDRESS_STATE) {
298 continue;
299 }
300
301 CASCADE_CODE(result);
302
303 if (ValidateRegionForMap(page_table, addr, size)) {
304 return MakeResult<VAddr>(addr);
305 }
218 } 306 }
219 307
220 const auto iter = nrr.find(nrr_address); 308 return ERROR_INSUFFICIENT_ADDRESS_SPACE;
221 if (iter == nrr.end()) { 309 }
222 LOG_ERROR(Service_LDR, 310
223 "Attempting to unload NRR which has not been loaded! (addr={:016X})", 311 ResultVal<VAddr> MapNro(Kernel::Process* process, VAddr nro_addr, std::size_t nro_size,
224 nrr_address); 312 VAddr bss_addr, std::size_t bss_size, std::size_t size) const {
225 IPC::ResponseBuilder rb{ctx, 2}; 313
226 rb.Push(ERROR_INVALID_NRR_ADDRESS); 314 for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) {
227 return; 315 auto& page_table{process->PageTable()};
316 VAddr addr{};
317
318 CASCADE_RESULT(addr, MapProcessCodeMemory(process, nro_addr, nro_size));
319
320 if (bss_size) {
321 auto block_guard = detail::ScopeExit([&] {
322 page_table.UnmapProcessCodeMemory(addr + nro_size, bss_addr, bss_size);
323 page_table.UnmapProcessCodeMemory(addr, nro_addr, nro_size);
324 });
325
326 const ResultCode result{
327 page_table.MapProcessCodeMemory(addr + nro_size, bss_addr, bss_size)};
328
329 if (result == Kernel::ERR_INVALID_ADDRESS_STATE) {
330 continue;
331 }
332
333 if (result.IsError()) {
334 return result;
335 }
336
337 block_guard.Cancel();
338 }
339
340 if (ValidateRegionForMap(page_table, addr, size)) {
341 return MakeResult<VAddr>(addr);
342 }
228 } 343 }
229 344
230 nrr.erase(iter); 345 return ERROR_INSUFFICIENT_ADDRESS_SPACE;
231 IPC::ResponseBuilder rb{ctx, 2}; 346 }
232 rb.Push(RESULT_SUCCESS); 347
348 ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr,
349 VAddr start) const {
350 const VAddr text_start{start + nro_header.text_offset};
351 const VAddr ro_start{start + nro_header.ro_offset};
352 const VAddr data_start{start + nro_header.rw_offset};
353 const VAddr bss_start{data_start + nro_header.rw_size};
354 const VAddr bss_end_addr{
355 Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)};
356
357 auto CopyCode{[&](VAddr src_addr, VAddr dst_addr, u64 size) {
358 std::vector<u8> source_data(size);
359 system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size());
360 system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size());
361 }};
362 CopyCode(nro_addr + nro_header.text_offset, text_start, nro_header.text_size);
363 CopyCode(nro_addr + nro_header.ro_offset, ro_start, nro_header.ro_size);
364 CopyCode(nro_addr + nro_header.rw_offset, data_start, nro_header.rw_size);
365
366 CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(
367 text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute));
368 CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(
369 ro_start, data_start - ro_start, Kernel::Memory::MemoryPermission::Read));
370
371 return process->PageTable().SetCodeMemoryPermission(
372 data_start, bss_end_addr - data_start, Kernel::Memory::MemoryPermission::ReadAndWrite);
233 } 373 }
234 374
235 void LoadNro(Kernel::HLERequestContext& ctx) { 375 void LoadNro(Kernel::HLERequestContext& ctx) {
@@ -317,9 +457,9 @@ public:
317 return; 457 return;
318 } 458 }
319 459
320 NROHeader header; 460 // Load and validate the NRO header
461 NROHeader header{};
321 std::memcpy(&header, nro_data.data(), sizeof(NROHeader)); 462 std::memcpy(&header, nro_data.data(), sizeof(NROHeader));
322
323 if (!IsValidNRO(header, nro_size, bss_size)) { 463 if (!IsValidNRO(header, nro_size, bss_size)) {
324 LOG_ERROR(Service_LDR, "NRO was invalid!"); 464 LOG_ERROR(Service_LDR, "NRO was invalid!");
325 IPC::ResponseBuilder rb{ctx, 2}; 465 IPC::ResponseBuilder rb{ctx, 2};
@@ -327,62 +467,48 @@ public:
327 return; 467 return;
328 } 468 }
329 469
330 // Load NRO as new executable module 470 // Map memory for the NRO
331 auto* process = system.CurrentProcess(); 471 const auto map_result{MapNro(system.CurrentProcess(), nro_address, nro_size, bss_address,
332 auto& vm_manager = process->VMManager(); 472 bss_size, nro_size + bss_size)};
333 auto map_address = vm_manager.FindFreeRegion(nro_size + bss_size); 473 if (map_result.Failed()) {
334
335 if (!map_address.Succeeded() ||
336 *map_address + nro_size + bss_size > vm_manager.GetAddressSpaceEndAddress()) {
337
338 LOG_ERROR(Service_LDR,
339 "General error while allocation memory or no available memory to allocate!");
340 IPC::ResponseBuilder rb{ctx, 2}; 474 IPC::ResponseBuilder rb{ctx, 2};
341 rb.Push(ERROR_INVALID_MEMORY_STATE); 475 rb.Push(map_result.Code());
342 return;
343 } 476 }
344 477
345 // Mark text and read-only region as ModuleCode 478 // Load the NRO into the mapped memory
346 ASSERT(vm_manager 479 if (const auto result{LoadNro(system.CurrentProcess(), header, nro_address, *map_result)};
347 .MirrorMemory(*map_address, nro_address, header.text_size + header.ro_size, 480 result.IsError()) {
348 Kernel::MemoryState::ModuleCode) 481 IPC::ResponseBuilder rb{ctx, 2};
349 .IsSuccess()); 482 rb.Push(map_result.Code());
350 // Mark read/write region as ModuleCodeData, which is necessary if this region is used for
351 // TransferMemory (e.g. Final Fantasy VIII Remastered does this)
352 ASSERT(vm_manager
353 .MirrorMemory(*map_address + header.rw_offset, nro_address + header.rw_offset,
354 header.rw_size, Kernel::MemoryState::ModuleCodeData)
355 .IsSuccess());
356 // Revoke permissions from the old memory region
357 ASSERT(vm_manager.ReprotectRange(nro_address, nro_size, Kernel::VMAPermission::None)
358 .IsSuccess());
359
360 if (bss_size > 0) {
361 // Mark BSS region as ModuleCodeData, which is necessary if this region is used for
362 // TransferMemory (e.g. Final Fantasy VIII Remastered does this)
363 ASSERT(vm_manager
364 .MirrorMemory(*map_address + nro_size, bss_address, bss_size,
365 Kernel::MemoryState::ModuleCodeData)
366 .IsSuccess());
367 ASSERT(vm_manager.ReprotectRange(bss_address, bss_size, Kernel::VMAPermission::None)
368 .IsSuccess());
369 } 483 }
370 484
371 vm_manager.ReprotectRange(*map_address, header.text_size, 485 // Track the loaded NRO
372 Kernel::VMAPermission::ReadExecute); 486 nro.insert_or_assign(*map_result, NROInfo{hash, *map_result, nro_size, bss_address,
373 vm_manager.ReprotectRange(*map_address + header.ro_offset, header.ro_size, 487 bss_size, header.text_size, header.ro_size,
374 Kernel::VMAPermission::Read); 488 header.rw_size, nro_address});
375 vm_manager.ReprotectRange(*map_address + header.rw_offset, header.rw_size,
376 Kernel::VMAPermission::ReadWrite);
377 489
490 // Invalidate JIT caches for the newly mapped process code
378 system.InvalidateCpuInstructionCaches(); 491 system.InvalidateCpuInstructionCaches();
379 492
380 nro.insert_or_assign(*map_address,
381 NROInfo{hash, nro_address, nro_size, bss_address, bss_size});
382
383 IPC::ResponseBuilder rb{ctx, 4}; 493 IPC::ResponseBuilder rb{ctx, 4};
384 rb.Push(RESULT_SUCCESS); 494 rb.Push(RESULT_SUCCESS);
385 rb.Push(*map_address); 495 rb.Push(*map_result);
496 }
497
498 ResultCode UnmapNro(const NROInfo& info) {
499 // Each region must be unmapped separately to validate memory state
500 auto& page_table{system.CurrentProcess()->PageTable()};
501 CASCADE_CODE(page_table.UnmapProcessCodeMemory(info.nro_address + info.text_size +
502 info.ro_size + info.data_size,
503 info.bss_address, info.bss_size));
504 CASCADE_CODE(page_table.UnmapProcessCodeMemory(
505 info.nro_address + info.text_size + info.ro_size,
506 info.src_addr + info.text_size + info.ro_size, info.data_size));
507 CASCADE_CODE(page_table.UnmapProcessCodeMemory(
508 info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size));
509 CASCADE_CODE(
510 page_table.UnmapProcessCodeMemory(info.nro_address, info.src_addr, info.text_size));
511 return RESULT_SUCCESS;
386 } 512 }
387 513
388 void UnloadNro(Kernel::HLERequestContext& ctx) { 514 void UnloadNro(Kernel::HLERequestContext& ctx) {
@@ -422,30 +548,15 @@ public:
422 return; 548 return;
423 } 549 }
424 550
425 auto& vm_manager = system.CurrentProcess()->VMManager(); 551 const auto result{UnmapNro(iter->second)};
426 const auto& nro_info = iter->second;
427
428 // Unmap the mirrored memory
429 ASSERT(
430 vm_manager.UnmapRange(nro_address, nro_info.nro_size + nro_info.bss_size).IsSuccess());
431
432 // Reprotect the source memory
433 ASSERT(vm_manager
434 .ReprotectRange(nro_info.nro_address, nro_info.nro_size,
435 Kernel::VMAPermission::ReadWrite)
436 .IsSuccess());
437 if (nro_info.bss_size > 0) {
438 ASSERT(vm_manager
439 .ReprotectRange(nro_info.bss_address, nro_info.bss_size,
440 Kernel::VMAPermission::ReadWrite)
441 .IsSuccess());
442 }
443 552
444 system.InvalidateCpuInstructionCaches(); 553 system.InvalidateCpuInstructionCaches();
445 554
446 nro.erase(iter); 555 nro.erase(iter);
556
447 IPC::ResponseBuilder rb{ctx, 2}; 557 IPC::ResponseBuilder rb{ctx, 2};
448 rb.Push(RESULT_SUCCESS); 558
559 rb.Push(result);
449 } 560 }
450 561
451 void Initialize(Kernel::HLERequestContext& ctx) { 562 void Initialize(Kernel::HLERequestContext& ctx) {
@@ -458,56 +569,7 @@ public:
458 } 569 }
459 570
460private: 571private:
461 using SHA256Hash = std::array<u8, 0x20>; 572 bool initialized{};
462
463 struct NROHeader {
464 INSERT_PADDING_WORDS(1);
465 u32_le mod_offset;
466 INSERT_PADDING_WORDS(2);
467 u32_le magic;
468 u32_le version;
469 u32_le nro_size;
470 u32_le flags;
471 u32_le text_offset;
472 u32_le text_size;
473 u32_le ro_offset;
474 u32_le ro_size;
475 u32_le rw_offset;
476 u32_le rw_size;
477 u32_le bss_size;
478 INSERT_PADDING_WORDS(1);
479 std::array<u8, 0x20> build_id;
480 INSERT_PADDING_BYTES(0x20);
481 };
482 static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size.");
483
484 struct NRRHeader {
485 u32_le magic;
486 INSERT_PADDING_BYTES(12);
487 u64_le title_id_mask;
488 u64_le title_id_pattern;
489 INSERT_PADDING_BYTES(16);
490 std::array<u8, 0x100> modulus;
491 std::array<u8, 0x100> signature_1;
492 std::array<u8, 0x100> signature_2;
493 u64_le title_id;
494 u32_le size;
495 INSERT_PADDING_BYTES(4);
496 u32_le hash_offset;
497 u32_le hash_count;
498 INSERT_PADDING_BYTES(8);
499 };
500 static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size.");
501
502 struct NROInfo {
503 SHA256Hash hash;
504 VAddr nro_address;
505 u64 nro_size;
506 VAddr bss_address;
507 u64 bss_size;
508 };
509
510 bool initialized = false;
511 573
512 std::map<VAddr, NROInfo> nro; 574 std::map<VAddr, NROInfo> nro;
513 std::map<VAddr, std::vector<SHA256Hash>> nrr; 575 std::map<VAddr, std::vector<SHA256Hash>> nrr;
diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp
index 346c8f899..dec96b771 100644
--- a/src/core/hle/service/lm/lm.cpp
+++ b/src/core/hle/service/lm/lm.cpp
@@ -17,7 +17,7 @@ namespace Service::LM {
17 17
18class ILogger final : public ServiceFramework<ILogger> { 18class ILogger final : public ServiceFramework<ILogger> {
19public: 19public:
20 explicit ILogger(Manager& manager_, Memory::Memory& memory_) 20 explicit ILogger(Manager& manager_, Core::Memory::Memory& memory_)
21 : ServiceFramework("ILogger"), manager{manager_}, memory{memory_} { 21 : ServiceFramework("ILogger"), manager{manager_}, memory{memory_} {
22 static const FunctionInfo functions[] = { 22 static const FunctionInfo functions[] = {
23 {0, &ILogger::Log, "Log"}, 23 {0, &ILogger::Log, "Log"},
@@ -75,12 +75,12 @@ private:
75 } 75 }
76 76
77 Manager& manager; 77 Manager& manager;
78 Memory::Memory& memory; 78 Core::Memory::Memory& memory;
79}; 79};
80 80
81class LM final : public ServiceFramework<LM> { 81class LM final : public ServiceFramework<LM> {
82public: 82public:
83 explicit LM(Manager& manager_, Memory::Memory& memory_) 83 explicit LM(Manager& manager_, Core::Memory::Memory& memory_)
84 : ServiceFramework{"lm"}, manager{manager_}, memory{memory_} { 84 : ServiceFramework{"lm"}, manager{manager_}, memory{memory_} {
85 // clang-format off 85 // clang-format off
86 static const FunctionInfo functions[] = { 86 static const FunctionInfo functions[] = {
@@ -101,7 +101,7 @@ private:
101 } 101 }
102 102
103 Manager& manager; 103 Manager& manager;
104 Memory::Memory& memory; 104 Core::Memory::Memory& memory;
105}; 105};
106 106
107void InstallInterfaces(Core::System& system) { 107void InstallInterfaces(Core::System& system) {
diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp
index 8da4e52c5..ab1746d28 100644
--- a/src/core/hle/service/ns/pl_u.cpp
+++ b/src/core/hle/service/ns/pl_u.cpp
@@ -19,6 +19,7 @@
19#include "core/file_sys/romfs.h" 19#include "core/file_sys/romfs.h"
20#include "core/file_sys/system_archive/system_archive.h" 20#include "core/file_sys/system_archive/system_archive.h"
21#include "core/hle/ipc_helpers.h" 21#include "core/hle/ipc_helpers.h"
22#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/physical_memory.h" 23#include "core/hle/kernel/physical_memory.h"
23#include "core/hle/kernel/shared_memory.h" 24#include "core/hle/kernel/shared_memory.h"
24#include "core/hle/service/filesystem/filesystem.h" 25#include "core/hle/service/filesystem/filesystem.h"
@@ -265,16 +266,13 @@ void PL_U::GetSharedMemoryAddressOffset(Kernel::HLERequestContext& ctx) {
265void PL_U::GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx) { 266void PL_U::GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx) {
266 // Map backing memory for the font data 267 // Map backing memory for the font data
267 LOG_DEBUG(Service_NS, "called"); 268 LOG_DEBUG(Service_NS, "called");
268 system.CurrentProcess()->VMManager().MapMemoryBlock(SHARED_FONT_MEM_VADDR, impl->shared_font, 0,
269 SHARED_FONT_MEM_SIZE,
270 Kernel::MemoryState::Shared);
271 269
272 // Create shared font memory object 270 // Create shared font memory object
273 auto& kernel = system.Kernel(); 271 auto& kernel = system.Kernel();
274 impl->shared_font_mem = Kernel::SharedMemory::Create( 272 impl->shared_font_mem = SharedFrom(&kernel.GetFontSharedMem());
275 kernel, system.CurrentProcess(), SHARED_FONT_MEM_SIZE, Kernel::MemoryPermission::ReadWrite, 273
276 Kernel::MemoryPermission::Read, SHARED_FONT_MEM_VADDR, Kernel::MemoryRegion::BASE, 274 std::memcpy(impl->shared_font_mem->GetPointer(), impl->shared_font->data(),
277 "PL_U:shared_font_mem"); 275 impl->shared_font->size());
278 276
279 IPC::ResponseBuilder rb{ctx, 2, 1}; 277 IPC::ResponseBuilder rb{ctx, 2, 1};
280 rb.Push(RESULT_SUCCESS); 278 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/time/interface.cpp b/src/core/hle/service/time/interface.cpp
index f509653a3..ba8fd6152 100644
--- a/src/core/hle/service/time/interface.cpp
+++ b/src/core/hle/service/time/interface.cpp
@@ -29,7 +29,7 @@ Time::Time(std::shared_ptr<Module> module, Core::System& system, const char* nam
29 {300, &Time::CalculateMonotonicSystemClockBaseTimePoint, "CalculateMonotonicSystemClockBaseTimePoint"}, 29 {300, &Time::CalculateMonotonicSystemClockBaseTimePoint, "CalculateMonotonicSystemClockBaseTimePoint"},
30 {400, &Time::GetClockSnapshot, "GetClockSnapshot"}, 30 {400, &Time::GetClockSnapshot, "GetClockSnapshot"},
31 {401, &Time::GetClockSnapshotFromSystemClockContext, "GetClockSnapshotFromSystemClockContext"}, 31 {401, &Time::GetClockSnapshotFromSystemClockContext, "GetClockSnapshotFromSystemClockContext"},
32 {500, nullptr, "CalculateStandardUserSystemClockDifferenceByUser"}, 32 {500, &Time::CalculateStandardUserSystemClockDifferenceByUser, "CalculateStandardUserSystemClockDifferenceByUser"},
33 {501, &Time::CalculateSpanBetween, "CalculateSpanBetween"}, 33 {501, &Time::CalculateSpanBetween, "CalculateSpanBetween"},
34 }; 34 };
35 // clang-format on 35 // clang-format on
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index ce859f18d..e722886de 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -308,6 +308,29 @@ void Module::Interface::GetClockSnapshotFromSystemClockContext(Kernel::HLEReques
308 ctx.WriteBuffer(&clock_snapshot, sizeof(Clock::ClockSnapshot)); 308 ctx.WriteBuffer(&clock_snapshot, sizeof(Clock::ClockSnapshot));
309} 309}
310 310
311void Module::Interface::CalculateStandardUserSystemClockDifferenceByUser(
312 Kernel::HLERequestContext& ctx) {
313 LOG_DEBUG(Service_Time, "called");
314
315 IPC::RequestParser rp{ctx};
316 const auto snapshot_a = rp.PopRaw<Clock::ClockSnapshot>();
317 const auto snapshot_b = rp.PopRaw<Clock::ClockSnapshot>();
318
319 auto time_span_type{Clock::TimeSpanType::FromSeconds(snapshot_b.user_context.offset -
320 snapshot_a.user_context.offset)};
321
322 if ((snapshot_b.user_context.steady_time_point.clock_source_id !=
323 snapshot_a.user_context.steady_time_point.clock_source_id) ||
324 (snapshot_b.is_automatic_correction_enabled &&
325 snapshot_a.is_automatic_correction_enabled)) {
326 time_span_type.nanoseconds = 0;
327 }
328
329 IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2};
330 rb.Push(RESULT_SUCCESS);
331 rb.PushRaw(time_span_type.nanoseconds);
332}
333
311void Module::Interface::CalculateSpanBetween(Kernel::HLERequestContext& ctx) { 334void Module::Interface::CalculateSpanBetween(Kernel::HLERequestContext& ctx) {
312 LOG_DEBUG(Service_Time, "called"); 335 LOG_DEBUG(Service_Time, "called");
313 336
diff --git a/src/core/hle/service/time/time.h b/src/core/hle/service/time/time.h
index 351988468..41f3002e9 100644
--- a/src/core/hle/service/time/time.h
+++ b/src/core/hle/service/time/time.h
@@ -32,6 +32,7 @@ public:
32 void CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERequestContext& ctx); 32 void CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERequestContext& ctx);
33 void GetClockSnapshot(Kernel::HLERequestContext& ctx); 33 void GetClockSnapshot(Kernel::HLERequestContext& ctx);
34 void GetClockSnapshotFromSystemClockContext(Kernel::HLERequestContext& ctx); 34 void GetClockSnapshotFromSystemClockContext(Kernel::HLERequestContext& ctx);
35 void CalculateStandardUserSystemClockDifferenceByUser(Kernel::HLERequestContext& ctx);
35 void CalculateSpanBetween(Kernel::HLERequestContext& ctx); 36 void CalculateSpanBetween(Kernel::HLERequestContext& ctx);
36 void GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx); 37 void GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx);
37 38
diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp
index fdaef233f..999ec1e51 100644
--- a/src/core/hle/service/time/time_sharedmemory.cpp
+++ b/src/core/hle/service/time/time_sharedmemory.cpp
@@ -6,6 +6,7 @@
6#include "core/core_timing.h" 6#include "core/core_timing.h"
7#include "core/core_timing_util.h" 7#include "core/core_timing_util.h"
8#include "core/hardware_properties.h" 8#include "core/hardware_properties.h"
9#include "core/hle/kernel/kernel.h"
9#include "core/hle/service/time/clock_types.h" 10#include "core/hle/service/time/clock_types.h"
10#include "core/hle/service/time/steady_clock_core.h" 11#include "core/hle/service/time/steady_clock_core.h"
11#include "core/hle/service/time/time_sharedmemory.h" 12#include "core/hle/service/time/time_sharedmemory.h"
@@ -15,9 +16,7 @@ namespace Service::Time {
15static constexpr std::size_t SHARED_MEMORY_SIZE{0x1000}; 16static constexpr std::size_t SHARED_MEMORY_SIZE{0x1000};
16 17
17SharedMemory::SharedMemory(Core::System& system) : system(system) { 18SharedMemory::SharedMemory(Core::System& system) : system(system) {
18 shared_memory_holder = Kernel::SharedMemory::Create( 19 shared_memory_holder = SharedFrom(&system.Kernel().GetTimeSharedMem());
19 system.Kernel(), nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite,
20 Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "Time:SharedMemory");
21 std::memset(shared_memory_holder->GetPointer(), 0, SHARED_MEMORY_SIZE); 20 std::memset(shared_memory_holder->GetPointer(), 0, SHARED_MEMORY_SIZE);
22} 21}
23 22
diff --git a/src/core/hle/service/time/time_zone_manager.cpp b/src/core/hle/service/time/time_zone_manager.cpp
index 07b553a43..c8159bcd5 100644
--- a/src/core/hle/service/time/time_zone_manager.cpp
+++ b/src/core/hle/service/time/time_zone_manager.cpp
@@ -309,7 +309,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
309 offset = GetTZName(name, offset); 309 offset = GetTZName(name, offset);
310 std_len = offset; 310 std_len = offset;
311 } 311 }
312 if (!std_len) { 312 if (std_len == 0) {
313 return {}; 313 return {};
314 } 314 }
315 if (!GetOffset(name, offset, std_offset)) { 315 if (!GetOffset(name, offset, std_offset)) {
@@ -320,7 +320,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
320 int dest_len{}; 320 int dest_len{};
321 int dest_offset{}; 321 int dest_offset{};
322 const char* dest_name{name + offset}; 322 const char* dest_name{name + offset};
323 if (rule.chars.size() < char_count) { 323 if (rule.chars.size() < std::size_t(char_count)) {
324 return {}; 324 return {};
325 } 325 }
326 326
@@ -343,7 +343,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
343 return {}; 343 return {};
344 } 344 }
345 char_count += dest_len + 1; 345 char_count += dest_len + 1;
346 if (rule.chars.size() < char_count) { 346 if (rule.chars.size() < std::size_t(char_count)) {
347 return {}; 347 return {};
348 } 348 }
349 if (name[offset] != '\0' && name[offset] != ',' && name[offset] != ';') { 349 if (name[offset] != '\0' && name[offset] != ',' && name[offset] != ';') {
@@ -414,7 +414,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
414 if (is_reversed || 414 if (is_reversed ||
415 (start_time < end_time && 415 (start_time < end_time &&
416 (end_time - start_time < (year_seconds + (std_offset - dest_offset))))) { 416 (end_time - start_time < (year_seconds + (std_offset - dest_offset))))) {
417 if (rule.ats.size() - 2 < time_count) { 417 if (rule.ats.size() - 2 < std::size_t(time_count)) {
418 break; 418 break;
419 } 419 }
420 420
@@ -609,7 +609,7 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
609 } 609 }
610 610
611 const u64 position{(read_offset - sizeof(TzifHeader))}; 611 const u64 position{(read_offset - sizeof(TzifHeader))};
612 const std::size_t bytes_read{vfs_file->GetSize() - sizeof(TzifHeader) - position}; 612 const s64 bytes_read = s64(vfs_file->GetSize() - sizeof(TzifHeader) - position);
613 if (bytes_read < 0) { 613 if (bytes_read < 0) {
614 return {}; 614 return {};
615 } 615 }
@@ -621,11 +621,11 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
621 std::array<char, time_zone_name_max + 1> temp_name{}; 621 std::array<char, time_zone_name_max + 1> temp_name{};
622 vfs_file->ReadArray(temp_name.data(), bytes_read, read_offset); 622 vfs_file->ReadArray(temp_name.data(), bytes_read, read_offset);
623 if (bytes_read > 2 && temp_name[0] == '\n' && temp_name[bytes_read - 1] == '\n' && 623 if (bytes_read > 2 && temp_name[0] == '\n' && temp_name[bytes_read - 1] == '\n' &&
624 time_zone_rule.type_count + 2 <= time_zone_rule.ttis.size()) { 624 std::size_t(time_zone_rule.type_count) + 2 <= time_zone_rule.ttis.size()) {
625 temp_name[bytes_read - 1] = '\0'; 625 temp_name[bytes_read - 1] = '\0';
626 626
627 std::array<char, time_zone_name_max> name{}; 627 std::array<char, time_zone_name_max> name{};
628 std::memcpy(name.data(), temp_name.data() + 1, bytes_read - 1); 628 std::memcpy(name.data(), temp_name.data() + 1, std::size_t(bytes_read - 1));
629 629
630 TimeZoneRule temp_rule; 630 TimeZoneRule temp_rule;
631 if (ParsePosixName(name.data(), temp_rule)) { 631 if (ParsePosixName(name.data(), temp_rule)) {
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index fdc62d05b..7f109f4eb 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -101,8 +101,8 @@ public:
101 } 101 }
102 102
103 std::u16string ReadInterfaceToken() { 103 std::u16string ReadInterfaceToken() {
104 u32 unknown = Read<u32_le>(); 104 [[maybe_unused]] const u32 unknown = Read<u32_le>();
105 u32 length = Read<u32_le>(); 105 const u32 length = Read<u32_le>();
106 106
107 std::u16string token{}; 107 std::u16string token{};
108 108
diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp
index 53559e8b1..134e83412 100644
--- a/src/core/loader/deconstructed_rom_directory.cpp
+++ b/src/core/loader/deconstructed_rom_directory.cpp
@@ -14,6 +14,7 @@
14#include "core/file_sys/romfs_factory.h" 14#include "core/file_sys/romfs_factory.h"
15#include "core/gdbstub/gdbstub.h" 15#include "core/gdbstub/gdbstub.h"
16#include "core/hle/kernel/kernel.h" 16#include "core/hle/kernel/kernel.h"
17#include "core/hle/kernel/memory/page_table.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/service/filesystem/filesystem.h" 19#include "core/hle/service/filesystem/filesystem.h"
19#include "core/loader/deconstructed_rom_directory.h" 20#include "core/loader/deconstructed_rom_directory.h"
@@ -129,27 +130,47 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
129 } 130 }
130 metadata.Print(); 131 metadata.Print();
131 132
132 if (process.LoadFromMetadata(metadata).IsError()) { 133 const auto static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3",
133 return {ResultStatus::ErrorUnableToParseKernelMetadata, {}}; 134 "subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"};
135
136 // Use the NSO module loader to figure out the code layout
137 std::size_t code_size{};
138 for (const auto& module : static_modules) {
139 const FileSys::VirtualFile module_file{dir->GetFile(module)};
140 if (!module_file) {
141 continue;
142 }
143
144 const bool should_pass_arguments{std::strcmp(module, "rtld") == 0};
145 const auto tentative_next_load_addr{AppLoader_NSO::LoadModule(
146 process, *module_file, code_size, should_pass_arguments, false)};
147 if (!tentative_next_load_addr) {
148 return {ResultStatus::ErrorLoadingNSO, {}};
149 }
150
151 code_size = *tentative_next_load_addr;
134 } 152 }
135 153
136 const FileSys::PatchManager pm(metadata.GetTitleID()); 154 // Setup the process code layout
155 if (process.LoadFromMetadata(metadata, code_size).IsError()) {
156 return {ResultStatus::ErrorUnableToParseKernelMetadata, {}};
157 }
137 158
138 // Load NSO modules 159 // Load NSO modules
139 modules.clear(); 160 modules.clear();
140 const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); 161 const VAddr base_address{process.PageTable().GetCodeRegionStart()};
141 VAddr next_load_addr = base_address; 162 VAddr next_load_addr{base_address};
142 for (const auto& module : {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3", 163 const FileSys::PatchManager pm{metadata.GetTitleID()};
143 "subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"}) { 164 for (const auto& module : static_modules) {
144 const FileSys::VirtualFile module_file = dir->GetFile(module); 165 const FileSys::VirtualFile module_file{dir->GetFile(module)};
145 if (module_file == nullptr) { 166 if (!module_file) {
146 continue; 167 continue;
147 } 168 }
148 169
149 const VAddr load_addr = next_load_addr; 170 const VAddr load_addr{next_load_addr};
150 const bool should_pass_arguments = std::strcmp(module, "rtld") == 0; 171 const bool should_pass_arguments{std::strcmp(module, "rtld") == 0};
151 const auto tentative_next_load_addr = 172 const auto tentative_next_load_addr{AppLoader_NSO::LoadModule(
152 AppLoader_NSO::LoadModule(process, *module_file, load_addr, should_pass_arguments, pm); 173 process, *module_file, load_addr, should_pass_arguments, true, pm)};
153 if (!tentative_next_load_addr) { 174 if (!tentative_next_load_addr) {
154 return {ResultStatus::ErrorLoadingNSO, {}}; 175 return {ResultStatus::ErrorLoadingNSO, {}};
155 } 176 }
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index 8908e5328..1e9ed2837 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -10,8 +10,8 @@
10#include "common/file_util.h" 10#include "common/file_util.h"
11#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "core/hle/kernel/code_set.h" 12#include "core/hle/kernel/code_set.h"
13#include "core/hle/kernel/memory/page_table.h"
13#include "core/hle/kernel/process.h" 14#include "core/hle/kernel/process.h"
14#include "core/hle/kernel/vm_manager.h"
15#include "core/loader/elf.h" 15#include "core/loader/elf.h"
16#include "core/memory.h" 16#include "core/memory.h"
17 17
@@ -393,7 +393,7 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) {
393 return {ResultStatus::ErrorIncorrectELFFileSize, {}}; 393 return {ResultStatus::ErrorIncorrectELFFileSize, {}};
394 } 394 }
395 395
396 const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); 396 const VAddr base_address = process.PageTable().GetCodeRegionStart();
397 ElfReader elf_reader(&buffer[0]); 397 ElfReader elf_reader(&buffer[0]);
398 Kernel::CodeSet codeset = elf_reader.LoadInto(base_address); 398 Kernel::CodeSet codeset = elf_reader.LoadInto(base_address);
399 const VAddr entry_point = codeset.entrypoint; 399 const VAddr entry_point = codeset.entrypoint;
@@ -401,7 +401,7 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) {
401 process.LoadModule(std::move(codeset), entry_point); 401 process.LoadModule(std::move(codeset), entry_point);
402 402
403 is_loaded = true; 403 is_loaded = true;
404 return {ResultStatus::Success, LoadParameters{48, Memory::DEFAULT_STACK_SIZE}}; 404 return {ResultStatus::Success, LoadParameters{48, Core::Memory::DEFAULT_STACK_SIZE}};
405} 405}
406 406
407} // namespace Loader 407} // namespace Loader
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index 092103abe..40fa03ad1 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -7,14 +7,16 @@
7#include "core/file_sys/program_metadata.h" 7#include "core/file_sys/program_metadata.h"
8#include "core/gdbstub/gdbstub.h" 8#include "core/gdbstub/gdbstub.h"
9#include "core/hle/kernel/code_set.h" 9#include "core/hle/kernel/code_set.h"
10#include "core/hle/kernel/memory/page_table.h"
10#include "core/hle/kernel/process.h" 11#include "core/hle/kernel/process.h"
11#include "core/loader/kip.h" 12#include "core/loader/kip.h"
13#include "core/memory.h"
12 14
13namespace Loader { 15namespace Loader {
14 16
15namespace { 17namespace {
16constexpr u32 PageAlignSize(u32 size) { 18constexpr u32 PageAlignSize(u32 size) {
17 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; 19 return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK;
18} 20}
19} // Anonymous namespace 21} // Anonymous namespace
20 22
@@ -68,7 +70,7 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::Process& process) {
68 kip->GetMainThreadCpuCore(), kip->GetMainThreadStackSize(), 70 kip->GetMainThreadCpuCore(), kip->GetMainThreadStackSize(),
69 kip->GetTitleID(), 0xFFFFFFFFFFFFFFFF, kip->GetKernelCapabilities()); 71 kip->GetTitleID(), 0xFFFFFFFFFFFFFFFF, kip->GetKernelCapabilities());
70 72
71 const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); 73 const VAddr base_address = process.PageTable().GetCodeRegionStart();
72 Kernel::CodeSet codeset; 74 Kernel::CodeSet codeset;
73 Kernel::PhysicalMemory program_image; 75 Kernel::PhysicalMemory program_image;
74 76
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 175898b91..5d7e8136e 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -16,8 +16,8 @@
16#include "core/file_sys/vfs_offset.h" 16#include "core/file_sys/vfs_offset.h"
17#include "core/gdbstub/gdbstub.h" 17#include "core/gdbstub/gdbstub.h"
18#include "core/hle/kernel/code_set.h" 18#include "core/hle/kernel/code_set.h"
19#include "core/hle/kernel/memory/page_table.h"
19#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/vm_manager.h"
21#include "core/hle/service/filesystem/filesystem.h" 21#include "core/hle/service/filesystem/filesystem.h"
22#include "core/loader/nro.h" 22#include "core/loader/nro.h"
23#include "core/loader/nso.h" 23#include "core/loader/nso.h"
@@ -127,7 +127,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& file) {
127} 127}
128 128
129static constexpr u32 PageAlignSize(u32 size) { 129static constexpr u32 PageAlignSize(u32 size) {
130 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; 130 return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK;
131} 131}
132 132
133static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, 133static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
@@ -208,7 +208,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) {
208 } 208 }
209 209
210 // Load NRO 210 // Load NRO
211 const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); 211 const VAddr base_address = process.PageTable().GetCodeRegionStart();
212 212
213 if (!LoadNro(process, *file, base_address)) { 213 if (!LoadNro(process, *file, base_address)) {
214 return {ResultStatus::ErrorLoadingNRO, {}}; 214 return {ResultStatus::ErrorLoadingNRO, {}};
@@ -221,7 +221,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) {
221 221
222 is_loaded = true; 222 is_loaded = true;
223 return {ResultStatus::Success, 223 return {ResultStatus::Success,
224 LoadParameters{Kernel::THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE}}; 224 LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}};
225} 225}
226 226
227ResultStatus AppLoader_NRO::ReadIcon(std::vector<u8>& buffer) { 227ResultStatus AppLoader_NRO::ReadIcon(std::vector<u8>& buffer) {
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 044067a5b..612ff9bf6 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -16,8 +16,8 @@
16#include "core/file_sys/patch_manager.h" 16#include "core/file_sys/patch_manager.h"
17#include "core/gdbstub/gdbstub.h" 17#include "core/gdbstub/gdbstub.h"
18#include "core/hle/kernel/code_set.h" 18#include "core/hle/kernel/code_set.h"
19#include "core/hle/kernel/memory/page_table.h"
19#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/vm_manager.h"
21#include "core/loader/nso.h" 21#include "core/loader/nso.h"
22#include "core/memory.h" 22#include "core/memory.h"
23#include "core/settings.h" 23#include "core/settings.h"
@@ -47,7 +47,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
47} 47}
48 48
49constexpr u32 PageAlignSize(u32 size) { 49constexpr u32 PageAlignSize(u32 size) {
50 return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; 50 return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK;
51} 51}
52} // Anonymous namespace 52} // Anonymous namespace
53 53
@@ -73,7 +73,7 @@ FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) {
73 73
74std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, 74std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
75 const FileSys::VfsFile& file, VAddr load_base, 75 const FileSys::VfsFile& file, VAddr load_base,
76 bool should_pass_arguments, 76 bool should_pass_arguments, bool load_into_process,
77 std::optional<FileSys::PatchManager> pm) { 77 std::optional<FileSys::PatchManager> pm) {
78 if (file.GetSize() < sizeof(NSOHeader)) { 78 if (file.GetSize() < sizeof(NSOHeader)) {
79 return {}; 79 return {};
@@ -97,21 +97,17 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
97 if (nso_header.IsSegmentCompressed(i)) { 97 if (nso_header.IsSegmentCompressed(i)) {
98 data = DecompressSegment(data, nso_header.segments[i]); 98 data = DecompressSegment(data, nso_header.segments[i]);
99 } 99 }
100 program_image.resize(nso_header.segments[i].location + 100 program_image.resize(nso_header.segments[i].location + static_cast<u32>(data.size()));
101 PageAlignSize(static_cast<u32>(data.size())));
102 std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(), 101 std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(),
103 data.size()); 102 data.size());
104 codeset.segments[i].addr = nso_header.segments[i].location; 103 codeset.segments[i].addr = nso_header.segments[i].location;
105 codeset.segments[i].offset = nso_header.segments[i].location; 104 codeset.segments[i].offset = nso_header.segments[i].location;
106 codeset.segments[i].size = PageAlignSize(static_cast<u32>(data.size())); 105 codeset.segments[i].size = nso_header.segments[i].size;
107 } 106 }
108 107
109 if (should_pass_arguments) { 108 if (should_pass_arguments && !Settings::values.program_args.empty()) {
110 std::vector<u8> arg_data{Settings::values.program_args.begin(), 109 const auto arg_data{Settings::values.program_args};
111 Settings::values.program_args.end()}; 110
112 if (arg_data.empty()) {
113 arg_data.resize(NSO_ARGUMENT_DEFAULT_SIZE);
114 }
115 codeset.DataSegment().size += NSO_ARGUMENT_DATA_ALLOCATION_SIZE; 111 codeset.DataSegment().size += NSO_ARGUMENT_DATA_ALLOCATION_SIZE;
116 NSOArgumentHeader args_header{ 112 NSOArgumentHeader args_header{
117 NSO_ARGUMENT_DATA_ALLOCATION_SIZE, static_cast<u32_le>(arg_data.size()), {}}; 113 NSO_ARGUMENT_DATA_ALLOCATION_SIZE, static_cast<u32_le>(arg_data.size()), {}};
@@ -123,24 +119,15 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
123 arg_data.size()); 119 arg_data.size());
124 } 120 }
125 121
126 // MOD header pointer is at .text offset + 4 122 codeset.DataSegment().size += nso_header.segments[2].bss_size;
127 u32 module_offset; 123 const u32 image_size{
128 std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); 124 PageAlignSize(static_cast<u32>(program_image.size()) + nso_header.segments[2].bss_size)};
129
130 // Read MOD header
131 MODHeader mod_header{};
132 // Default .bss to size in segment header if MOD0 section doesn't exist
133 u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)};
134 std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader));
135 const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')};
136 if (has_mod_header) {
137 // Resize program image to include .bss section and page align each section
138 bss_size = PageAlignSize(mod_header.bss_end_offset - mod_header.bss_start_offset);
139 }
140 codeset.DataSegment().size += bss_size;
141 const u32 image_size{PageAlignSize(static_cast<u32>(program_image.size()) + bss_size)};
142 program_image.resize(image_size); 125 program_image.resize(image_size);
143 126
127 for (std::size_t i = 0; i < nso_header.segments.size(); ++i) {
128 codeset.segments[i].size = PageAlignSize(codeset.segments[i].size);
129 }
130
144 // Apply patches if necessary 131 // Apply patches if necessary
145 if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { 132 if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) {
146 std::vector<u8> pi_header; 133 std::vector<u8> pi_header;
@@ -154,6 +141,11 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
154 std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data()); 141 std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data());
155 } 142 }
156 143
144 // If we aren't actually loading (i.e. just computing the process code layout), we are done
145 if (!load_into_process) {
146 return load_base + image_size;
147 }
148
157 // Apply cheats if they exist and the program has a valid title ID 149 // Apply cheats if they exist and the program has a valid title ID
158 if (pm) { 150 if (pm) {
159 auto& system = Core::System::GetInstance(); 151 auto& system = Core::System::GetInstance();
@@ -182,8 +174,8 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process) {
182 modules.clear(); 174 modules.clear();
183 175
184 // Load module 176 // Load module
185 const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); 177 const VAddr base_address = process.PageTable().GetCodeRegionStart();
186 if (!LoadModule(process, *file, base_address, true)) { 178 if (!LoadModule(process, *file, base_address, true, true)) {
187 return {ResultStatus::ErrorLoadingNSO, {}}; 179 return {ResultStatus::ErrorLoadingNSO, {}};
188 } 180 }
189 181
@@ -192,7 +184,7 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process) {
192 184
193 is_loaded = true; 185 is_loaded = true;
194 return {ResultStatus::Success, 186 return {ResultStatus::Success,
195 LoadParameters{Kernel::THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE}}; 187 LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}};
196} 188}
197 189
198ResultStatus AppLoader_NSO::ReadNSOModules(Modules& modules) { 190ResultStatus AppLoader_NSO::ReadNSOModules(Modules& modules) {
diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h
index d2d600cd9..b210830f0 100644
--- a/src/core/loader/nso.h
+++ b/src/core/loader/nso.h
@@ -56,8 +56,6 @@ static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size.");
56static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable."); 56static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable.");
57 57
58constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; 58constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000;
59// NOTE: Official software default argument state is unverified.
60constexpr u64 NSO_ARGUMENT_DEFAULT_SIZE = 1;
61 59
62struct NSOArgumentHeader { 60struct NSOArgumentHeader {
63 u32_le allocated_size; 61 u32_le allocated_size;
@@ -84,6 +82,7 @@ public:
84 82
85 static std::optional<VAddr> LoadModule(Kernel::Process& process, const FileSys::VfsFile& file, 83 static std::optional<VAddr> LoadModule(Kernel::Process& process, const FileSys::VfsFile& file,
86 VAddr load_base, bool should_pass_arguments, 84 VAddr load_base, bool should_pass_arguments,
85 bool load_into_process,
87 std::optional<FileSys::PatchManager> pm = {}); 86 std::optional<FileSys::PatchManager> pm = {});
88 87
89 LoadResult Load(Kernel::Process& process) override; 88 LoadResult Load(Kernel::Process& process) override;
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 6061d37ae..9d87045a0 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -14,13 +14,14 @@
14#include "common/swap.h" 14#include "common/swap.h"
15#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
16#include "core/core.h" 16#include "core/core.h"
17#include "core/device_memory.h"
18#include "core/hle/kernel/memory/page_table.h"
17#include "core/hle/kernel/physical_memory.h" 19#include "core/hle/kernel/physical_memory.h"
18#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/vm_manager.h"
20#include "core/memory.h" 21#include "core/memory.h"
21#include "video_core/gpu.h" 22#include "video_core/gpu.h"
22 23
23namespace Memory { 24namespace Core::Memory {
24 25
25// Implementation class used to keep the specifics of the memory subsystem hidden 26// Implementation class used to keep the specifics of the memory subsystem hidden
26// from outside classes. This also allows modification to the internals of the memory 27// from outside classes. This also allows modification to the internals of the memory
@@ -29,9 +30,9 @@ struct Memory::Impl {
29 explicit Impl(Core::System& system_) : system{system_} {} 30 explicit Impl(Core::System& system_) : system{system_} {}
30 31
31 void SetCurrentPageTable(Kernel::Process& process) { 32 void SetCurrentPageTable(Kernel::Process& process) {
32 current_page_table = &process.VMManager().page_table; 33 current_page_table = &process.PageTable().PageTableImpl();
33 34
34 const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); 35 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
35 36
36 system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); 37 system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width);
37 system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); 38 system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
@@ -39,12 +40,7 @@ struct Memory::Impl {
39 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); 40 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
40 } 41 }
41 42
42 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, 43 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
43 Kernel::PhysicalMemory& memory, VAddr offset) {
44 MapMemoryRegion(page_table, base, size, memory.data() + offset);
45 }
46
47 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
48 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 44 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
49 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 45 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
50 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); 46 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
@@ -52,46 +48,27 @@ struct Memory::Impl {
52 48
53 void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, 49 void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
54 Common::MemoryHookPointer mmio_handler) { 50 Common::MemoryHookPointer mmio_handler) {
55 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 51 UNIMPLEMENTED();
56 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
57 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
58 Common::PageType::Special);
59
60 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
61 const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice,
62 std::move(mmio_handler)};
63 page_table.special_regions.add(
64 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
65 } 52 }
66 53
67 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 54 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
68 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 55 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
69 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 56 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
70 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, 57 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
71 Common::PageType::Unmapped);
72
73 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
74 page_table.special_regions.erase(interval);
75 } 58 }
76 59
77 void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, 60 void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
78 Common::MemoryHookPointer hook) { 61 Common::MemoryHookPointer hook) {
79 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 62 UNIMPLEMENTED();
80 const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
81 page_table.special_regions.add(
82 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
83 } 63 }
84 64
85 void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, 65 void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
86 Common::MemoryHookPointer hook) { 66 Common::MemoryHookPointer hook) {
87 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 67 UNIMPLEMENTED();
88 const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
89 page_table.special_regions.subtract(
90 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
91 } 68 }
92 69
93 bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { 70 bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const {
94 const auto& page_table = process.VMManager().page_table; 71 const auto& page_table = process.PageTable().PageTableImpl();
95 72
96 const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; 73 const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
97 if (page_pointer != nullptr) { 74 if (page_pointer != nullptr) {
@@ -113,55 +90,28 @@ struct Memory::Impl {
113 return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); 90 return IsValidVirtualAddress(*system.CurrentProcess(), vaddr);
114 } 91 }
115 92
116 /** 93 u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
117 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) 94 const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
118 * using a VMA from the current process
119 */
120 u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
121 const auto& vm_manager = process.VMManager();
122 95
123 const auto it = vm_manager.FindVMA(vaddr); 96 if (!paddr) {
124 DEBUG_ASSERT(vm_manager.IsValidHandle(it)); 97 return {};
125
126 u8* direct_pointer = nullptr;
127 const auto& vma = it->second;
128 switch (vma.type) {
129 case Kernel::VMAType::AllocatedMemoryBlock:
130 direct_pointer = vma.backing_block->data() + vma.offset;
131 break;
132 case Kernel::VMAType::BackingMemory:
133 direct_pointer = vma.backing_memory;
134 break;
135 case Kernel::VMAType::Free:
136 return nullptr;
137 default:
138 UNREACHABLE();
139 } 98 }
140 99
141 return direct_pointer + (vaddr - vma.base); 100 return system.DeviceMemory().GetPointer(paddr) + vaddr;
142 } 101 }
143 102
144 /** 103 u8* GetPointer(const VAddr vaddr) const {
145 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) 104 u8* const page_pointer{current_page_table->pointers[vaddr >> PAGE_BITS]};
146 * using a VMA from the current process. 105 if (page_pointer) {
147 */
148 u8* GetPointerFromVMA(VAddr vaddr) {
149 return GetPointerFromVMA(*system.CurrentProcess(), vaddr);
150 }
151
152 u8* GetPointer(const VAddr vaddr) {
153 u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
154 if (page_pointer != nullptr) {
155 return page_pointer + vaddr; 106 return page_pointer + vaddr;
156 } 107 }
157 108
158 if (current_page_table->attributes[vaddr >> PAGE_BITS] == 109 if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
159 Common::PageType::RasterizerCachedMemory) { 110 Common::PageType::RasterizerCachedMemory) {
160 return GetPointerFromVMA(vaddr); 111 return GetPointerFromRasterizerCachedMemory(vaddr);
161 } 112 }
162 113
163 LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); 114 return {};
164 return nullptr;
165 } 115 }
166 116
167 u8 Read8(const VAddr addr) { 117 u8 Read8(const VAddr addr) {
@@ -169,15 +119,33 @@ struct Memory::Impl {
169 } 119 }
170 120
171 u16 Read16(const VAddr addr) { 121 u16 Read16(const VAddr addr) {
172 return Read<u16_le>(addr); 122 if ((addr & 1) == 0) {
123 return Read<u16_le>(addr);
124 } else {
125 const u8 a{Read<u8>(addr)};
126 const u8 b{Read<u8>(addr + sizeof(u8))};
127 return (static_cast<u16>(b) << 8) | a;
128 }
173 } 129 }
174 130
175 u32 Read32(const VAddr addr) { 131 u32 Read32(const VAddr addr) {
176 return Read<u32_le>(addr); 132 if ((addr & 3) == 0) {
133 return Read<u32_le>(addr);
134 } else {
135 const u16 a{Read16(addr)};
136 const u16 b{Read16(addr + sizeof(u16))};
137 return (static_cast<u32>(b) << 16) | a;
138 }
177 } 139 }
178 140
179 u64 Read64(const VAddr addr) { 141 u64 Read64(const VAddr addr) {
180 return Read<u64_le>(addr); 142 if ((addr & 7) == 0) {
143 return Read<u64_le>(addr);
144 } else {
145 const u32 a{Read32(addr)};
146 const u32 b{Read32(addr + sizeof(u32))};
147 return (static_cast<u64>(b) << 32) | a;
148 }
181 } 149 }
182 150
183 void Write8(const VAddr addr, const u8 data) { 151 void Write8(const VAddr addr, const u8 data) {
@@ -185,15 +153,30 @@ struct Memory::Impl {
185 } 153 }
186 154
187 void Write16(const VAddr addr, const u16 data) { 155 void Write16(const VAddr addr, const u16 data) {
188 Write<u16_le>(addr, data); 156 if ((addr & 1) == 0) {
157 Write<u16_le>(addr, data);
158 } else {
159 Write<u8>(addr, static_cast<u8>(data));
160 Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8));
161 }
189 } 162 }
190 163
191 void Write32(const VAddr addr, const u32 data) { 164 void Write32(const VAddr addr, const u32 data) {
192 Write<u32_le>(addr, data); 165 if ((addr & 3) == 0) {
166 Write<u32_le>(addr, data);
167 } else {
168 Write16(addr, static_cast<u16>(data));
169 Write16(addr + sizeof(u16), static_cast<u16>(data >> 16));
170 }
193 } 171 }
194 172
195 void Write64(const VAddr addr, const u64 data) { 173 void Write64(const VAddr addr, const u64 data) {
196 Write<u64_le>(addr, data); 174 if ((addr & 7) == 0) {
175 Write<u64_le>(addr, data);
176 } else {
177 Write32(addr, static_cast<u32>(data));
178 Write32(addr + sizeof(u32), static_cast<u32>(data >> 32));
179 }
197 } 180 }
198 181
199 std::string ReadCString(VAddr vaddr, std::size_t max_length) { 182 std::string ReadCString(VAddr vaddr, std::size_t max_length) {
@@ -213,7 +196,7 @@ struct Memory::Impl {
213 196
214 void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, 197 void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
215 const std::size_t size) { 198 const std::size_t size) {
216 const auto& page_table = process.VMManager().page_table; 199 const auto& page_table = process.PageTable().PageTableImpl();
217 200
218 std::size_t remaining_size = size; 201 std::size_t remaining_size = size;
219 std::size_t page_index = src_addr >> PAGE_BITS; 202 std::size_t page_index = src_addr >> PAGE_BITS;
@@ -241,7 +224,7 @@ struct Memory::Impl {
241 break; 224 break;
242 } 225 }
243 case Common::PageType::RasterizerCachedMemory: { 226 case Common::PageType::RasterizerCachedMemory: {
244 const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); 227 const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
245 system.GPU().FlushRegion(current_vaddr, copy_amount); 228 system.GPU().FlushRegion(current_vaddr, copy_amount);
246 std::memcpy(dest_buffer, host_ptr, copy_amount); 229 std::memcpy(dest_buffer, host_ptr, copy_amount);
247 break; 230 break;
@@ -259,7 +242,7 @@ struct Memory::Impl {
259 242
260 void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, 243 void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
261 const std::size_t size) { 244 const std::size_t size) {
262 const auto& page_table = process.VMManager().page_table; 245 const auto& page_table = process.PageTable().PageTableImpl();
263 246
264 std::size_t remaining_size = size; 247 std::size_t remaining_size = size;
265 std::size_t page_index = src_addr >> PAGE_BITS; 248 std::size_t page_index = src_addr >> PAGE_BITS;
@@ -287,7 +270,7 @@ struct Memory::Impl {
287 break; 270 break;
288 } 271 }
289 case Common::PageType::RasterizerCachedMemory: { 272 case Common::PageType::RasterizerCachedMemory: {
290 const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); 273 const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
291 std::memcpy(dest_buffer, host_ptr, copy_amount); 274 std::memcpy(dest_buffer, host_ptr, copy_amount);
292 break; 275 break;
293 } 276 }
@@ -312,7 +295,7 @@ struct Memory::Impl {
312 295
313 void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, 296 void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer,
314 const std::size_t size) { 297 const std::size_t size) {
315 const auto& page_table = process.VMManager().page_table; 298 const auto& page_table = process.PageTable().PageTableImpl();
316 std::size_t remaining_size = size; 299 std::size_t remaining_size = size;
317 std::size_t page_index = dest_addr >> PAGE_BITS; 300 std::size_t page_index = dest_addr >> PAGE_BITS;
318 std::size_t page_offset = dest_addr & PAGE_MASK; 301 std::size_t page_offset = dest_addr & PAGE_MASK;
@@ -338,7 +321,7 @@ struct Memory::Impl {
338 break; 321 break;
339 } 322 }
340 case Common::PageType::RasterizerCachedMemory: { 323 case Common::PageType::RasterizerCachedMemory: {
341 u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); 324 u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
342 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 325 system.GPU().InvalidateRegion(current_vaddr, copy_amount);
343 std::memcpy(host_ptr, src_buffer, copy_amount); 326 std::memcpy(host_ptr, src_buffer, copy_amount);
344 break; 327 break;
@@ -356,7 +339,7 @@ struct Memory::Impl {
356 339
357 void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, 340 void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr,
358 const void* src_buffer, const std::size_t size) { 341 const void* src_buffer, const std::size_t size) {
359 const auto& page_table = process.VMManager().page_table; 342 const auto& page_table = process.PageTable().PageTableImpl();
360 std::size_t remaining_size = size; 343 std::size_t remaining_size = size;
361 std::size_t page_index = dest_addr >> PAGE_BITS; 344 std::size_t page_index = dest_addr >> PAGE_BITS;
362 std::size_t page_offset = dest_addr & PAGE_MASK; 345 std::size_t page_offset = dest_addr & PAGE_MASK;
@@ -382,7 +365,7 @@ struct Memory::Impl {
382 break; 365 break;
383 } 366 }
384 case Common::PageType::RasterizerCachedMemory: { 367 case Common::PageType::RasterizerCachedMemory: {
385 u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); 368 u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
386 std::memcpy(host_ptr, src_buffer, copy_amount); 369 std::memcpy(host_ptr, src_buffer, copy_amount);
387 break; 370 break;
388 } 371 }
@@ -406,7 +389,7 @@ struct Memory::Impl {
406 } 389 }
407 390
408 void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { 391 void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) {
409 const auto& page_table = process.VMManager().page_table; 392 const auto& page_table = process.PageTable().PageTableImpl();
410 std::size_t remaining_size = size; 393 std::size_t remaining_size = size;
411 std::size_t page_index = dest_addr >> PAGE_BITS; 394 std::size_t page_index = dest_addr >> PAGE_BITS;
412 std::size_t page_offset = dest_addr & PAGE_MASK; 395 std::size_t page_offset = dest_addr & PAGE_MASK;
@@ -432,7 +415,7 @@ struct Memory::Impl {
432 break; 415 break;
433 } 416 }
434 case Common::PageType::RasterizerCachedMemory: { 417 case Common::PageType::RasterizerCachedMemory: {
435 u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); 418 u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
436 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 419 system.GPU().InvalidateRegion(current_vaddr, copy_amount);
437 std::memset(host_ptr, 0, copy_amount); 420 std::memset(host_ptr, 0, copy_amount);
438 break; 421 break;
@@ -453,7 +436,7 @@ struct Memory::Impl {
453 436
454 void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, 437 void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
455 const std::size_t size) { 438 const std::size_t size) {
456 const auto& page_table = process.VMManager().page_table; 439 const auto& page_table = process.PageTable().PageTableImpl();
457 std::size_t remaining_size = size; 440 std::size_t remaining_size = size;
458 std::size_t page_index = src_addr >> PAGE_BITS; 441 std::size_t page_index = src_addr >> PAGE_BITS;
459 std::size_t page_offset = src_addr & PAGE_MASK; 442 std::size_t page_offset = src_addr & PAGE_MASK;
@@ -479,7 +462,7 @@ struct Memory::Impl {
479 break; 462 break;
480 } 463 }
481 case Common::PageType::RasterizerCachedMemory: { 464 case Common::PageType::RasterizerCachedMemory: {
482 const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); 465 const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
483 system.GPU().FlushRegion(current_vaddr, copy_amount); 466 system.GPU().FlushRegion(current_vaddr, copy_amount);
484 WriteBlock(process, dest_addr, host_ptr, copy_amount); 467 WriteBlock(process, dest_addr, host_ptr, copy_amount);
485 break; 468 break;
@@ -512,7 +495,7 @@ struct Memory::Impl {
512 495
513 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 496 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
514 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 497 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
515 Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; 498 Common::PageType& page_type{current_page_table->attributes[vaddr >> PAGE_BITS]};
516 499
517 if (cached) { 500 if (cached) {
518 // Switch page type to cached if now cached 501 // Switch page type to cached if now cached
@@ -544,7 +527,7 @@ struct Memory::Impl {
544 // that this area is already unmarked as cached. 527 // that this area is already unmarked as cached.
545 break; 528 break;
546 case Common::PageType::RasterizerCachedMemory: { 529 case Common::PageType::RasterizerCachedMemory: {
547 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); 530 u8* pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
548 if (pointer == nullptr) { 531 if (pointer == nullptr) {
549 // It's possible that this function has been called while updating the 532 // It's possible that this function has been called while updating the
550 // pagetable after unmapping a VMA. In that case the underlying VMA will no 533 // pagetable after unmapping a VMA. In that case the underlying VMA will no
@@ -573,9 +556,9 @@ struct Memory::Impl {
573 * @param memory The memory to map. 556 * @param memory The memory to map.
574 * @param type The page type to map the memory as. 557 * @param type The page type to map the memory as.
575 */ 558 */
576 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, 559 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
577 Common::PageType type) { 560 Common::PageType type) {
578 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, 561 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE,
579 (base + size) * PAGE_SIZE); 562 (base + size) * PAGE_SIZE);
580 563
581 // During boot, current_page_table might not be set yet, in which case we need not flush 564 // During boot, current_page_table might not be set yet, in which case we need not flush
@@ -593,19 +576,26 @@ struct Memory::Impl {
593 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 576 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
594 base + page_table.pointers.size()); 577 base + page_table.pointers.size());
595 578
596 std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); 579 if (!target) {
580 while (base != end) {
581 page_table.pointers[base] = nullptr;
582 page_table.attributes[base] = type;
583 page_table.backing_addr[base] = 0;
597 584
598 if (memory == nullptr) { 585 base += 1;
599 std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, 586 }
600 memory);
601 } else { 587 } else {
602 while (base != end) { 588 while (base != end) {
603 page_table.pointers[base] = memory - (base << PAGE_BITS); 589 page_table.pointers[base] =
590 system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS);
591 page_table.attributes[base] = type;
592 page_table.backing_addr[base] = target - (base << PAGE_BITS);
593
604 ASSERT_MSG(page_table.pointers[base], 594 ASSERT_MSG(page_table.pointers[base],
605 "memory mapping base yield a nullptr within the table"); 595 "memory mapping base yield a nullptr within the table");
606 596
607 base += 1; 597 base += 1;
608 memory += PAGE_SIZE; 598 target += PAGE_SIZE;
609 } 599 }
610 } 600 }
611 } 601 }
@@ -640,7 +630,7 @@ struct Memory::Impl {
640 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); 630 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
641 break; 631 break;
642 case Common::PageType::RasterizerCachedMemory: { 632 case Common::PageType::RasterizerCachedMemory: {
643 const u8* const host_ptr = GetPointerFromVMA(vaddr); 633 const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
644 system.GPU().FlushRegion(vaddr, sizeof(T)); 634 system.GPU().FlushRegion(vaddr, sizeof(T));
645 T value; 635 T value;
646 std::memcpy(&value, host_ptr, sizeof(T)); 636 std::memcpy(&value, host_ptr, sizeof(T));
@@ -682,7 +672,7 @@ struct Memory::Impl {
682 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); 672 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
683 break; 673 break;
684 case Common::PageType::RasterizerCachedMemory: { 674 case Common::PageType::RasterizerCachedMemory: {
685 u8* const host_ptr{GetPointerFromVMA(vaddr)}; 675 u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
686 system.GPU().InvalidateRegion(vaddr, sizeof(T)); 676 system.GPU().InvalidateRegion(vaddr, sizeof(T));
687 std::memcpy(host_ptr, &data, sizeof(T)); 677 std::memcpy(host_ptr, &data, sizeof(T));
688 break; 678 break;
@@ -703,12 +693,7 @@ void Memory::SetCurrentPageTable(Kernel::Process& process) {
703 impl->SetCurrentPageTable(process); 693 impl->SetCurrentPageTable(process);
704} 694}
705 695
706void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, 696void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
707 Kernel::PhysicalMemory& memory, VAddr offset) {
708 impl->MapMemoryRegion(page_table, base, size, memory, offset);
709}
710
711void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
712 impl->MapMemoryRegion(page_table, base, size, target); 697 impl->MapMemoryRegion(page_table, base, size, target);
713} 698}
714 699
@@ -845,4 +830,4 @@ bool IsKernelVirtualAddress(const VAddr vaddr) {
845 return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; 830 return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
846} 831}
847 832
848} // namespace Memory 833} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index b92d678a4..9292f3b0a 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -23,7 +23,7 @@ class PhysicalMemory;
23class Process; 23class Process;
24} // namespace Kernel 24} // namespace Kernel
25 25
26namespace Memory { 26namespace Core::Memory {
27 27
28/** 28/**
29 * Page size used by the ARM architecture. This is the smallest granularity with which memory can 29 * Page size used by the ARM architecture. This is the smallest granularity with which memory can
@@ -67,19 +67,6 @@ public:
67 void SetCurrentPageTable(Kernel::Process& process); 67 void SetCurrentPageTable(Kernel::Process& process);
68 68
69 /** 69 /**
70 * Maps an physical buffer onto a region of the emulated process address space.
71 *
72 * @param page_table The page table of the emulated process.
73 * @param base The address to start mapping at. Must be page-aligned.
74 * @param size The amount of bytes to map. Must be page-aligned.
75 * @param memory Physical buffer with the memory backing the mapping. Must be of length
76 * at least `size + offset`.
77 * @param offset The offset within the physical memory. Must be page-aligned.
78 */
79 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size,
80 Kernel::PhysicalMemory& memory, VAddr offset);
81
82 /**
83 * Maps an allocated buffer onto a region of the emulated process address space. 70 * Maps an allocated buffer onto a region of the emulated process address space.
84 * 71 *
85 * @param page_table The page table of the emulated process. 72 * @param page_table The page table of the emulated process.
@@ -88,7 +75,7 @@ public:
88 * @param target Buffer with the memory backing the mapping. Must be of length at least 75 * @param target Buffer with the memory backing the mapping. Must be of length at least
89 * `size`. 76 * `size`.
90 */ 77 */
91 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); 78 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target);
92 79
93 /** 80 /**
94 * Maps a region of the emulated process address space as a IO region. 81 * Maps a region of the emulated process address space as a IO region.
@@ -503,4 +490,4 @@ private:
503/// Determines if the given VAddr is a kernel address 490/// Determines if the given VAddr is a kernel address
504bool IsKernelVirtualAddress(VAddr vaddr); 491bool IsKernelVirtualAddress(VAddr vaddr);
505 492
506} // namespace Memory 493} // namespace Core::Memory
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index 4472500d2..b139e8465 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -10,13 +10,15 @@
10#include "core/core_timing.h" 10#include "core/core_timing.h"
11#include "core/core_timing_util.h" 11#include "core/core_timing_util.h"
12#include "core/hardware_properties.h" 12#include "core/hardware_properties.h"
13#include "core/hle/kernel/memory/page_table.h"
13#include "core/hle/kernel/process.h" 14#include "core/hle/kernel/process.h"
14#include "core/hle/service/hid/controllers/npad.h" 15#include "core/hle/service/hid/controllers/npad.h"
15#include "core/hle/service/hid/hid.h" 16#include "core/hle/service/hid/hid.h"
16#include "core/hle/service/sm/sm.h" 17#include "core/hle/service/sm/sm.h"
18#include "core/memory.h"
17#include "core/memory/cheat_engine.h" 19#include "core/memory/cheat_engine.h"
18 20
19namespace Memory { 21namespace Core::Memory {
20 22
21constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12); 23constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12);
22constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; 24constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
@@ -194,11 +196,12 @@ void CheatEngine::Initialize() {
194 metadata.process_id = system.CurrentProcess()->GetProcessID(); 196 metadata.process_id = system.CurrentProcess()->GetProcessID();
195 metadata.title_id = system.CurrentProcess()->GetTitleID(); 197 metadata.title_id = system.CurrentProcess()->GetTitleID();
196 198
197 const auto& vm_manager = system.CurrentProcess()->VMManager(); 199 const auto& page_table = system.CurrentProcess()->PageTable();
198 metadata.heap_extents = {vm_manager.GetHeapRegionBaseAddress(), vm_manager.GetHeapRegionSize()}; 200 metadata.heap_extents = {page_table.GetHeapRegionStart(), page_table.GetHeapRegionSize()};
199 metadata.address_space_extents = {vm_manager.GetAddressSpaceBaseAddress(), 201 metadata.address_space_extents = {page_table.GetAddressSpaceStart(),
200 vm_manager.GetAddressSpaceSize()}; 202 page_table.GetAddressSpaceSize()};
201 metadata.alias_extents = {vm_manager.GetMapRegionBaseAddress(), vm_manager.GetMapRegionSize()}; 203 metadata.alias_extents = {page_table.GetAliasCodeRegionStart(),
204 page_table.GetAliasCodeRegionSize()};
202 205
203 is_pending_reload.exchange(true); 206 is_pending_reload.exchange(true);
204} 207}
@@ -230,4 +233,4 @@ void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) {
230 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event); 233 core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event);
231} 234}
232 235
233} // namespace Memory 236} // namespace Core::Memory
diff --git a/src/core/memory/cheat_engine.h b/src/core/memory/cheat_engine.h
index 3d6b2298a..2649423f8 100644
--- a/src/core/memory/cheat_engine.h
+++ b/src/core/memory/cheat_engine.h
@@ -20,7 +20,7 @@ class CoreTiming;
20struct EventType; 20struct EventType;
21} // namespace Core::Timing 21} // namespace Core::Timing
22 22
23namespace Memory { 23namespace Core::Memory {
24 24
25class StandardVmCallbacks : public DmntCheatVm::Callbacks { 25class StandardVmCallbacks : public DmntCheatVm::Callbacks {
26public: 26public:
@@ -84,4 +84,4 @@ private:
84 Core::System& system; 84 Core::System& system;
85}; 85};
86 86
87} // namespace Memory 87} // namespace Core::Memory
diff --git a/src/core/memory/dmnt_cheat_types.h b/src/core/memory/dmnt_cheat_types.h
index bf68fa0fe..5e60733dc 100644
--- a/src/core/memory/dmnt_cheat_types.h
+++ b/src/core/memory/dmnt_cheat_types.h
@@ -26,7 +26,7 @@
26 26
27#include "common/common_types.h" 27#include "common/common_types.h"
28 28
29namespace Memory { 29namespace Core::Memory {
30 30
31struct MemoryRegionExtents { 31struct MemoryRegionExtents {
32 u64 base{}; 32 u64 base{};
@@ -55,4 +55,4 @@ struct CheatEntry {
55 CheatDefinition definition{}; 55 CheatDefinition definition{};
56}; 56};
57 57
58} // namespace Memory 58} // namespace Core::Memory
diff --git a/src/core/memory/dmnt_cheat_vm.cpp b/src/core/memory/dmnt_cheat_vm.cpp
index 4f4fa5099..fb9f36bfd 100644
--- a/src/core/memory/dmnt_cheat_vm.cpp
+++ b/src/core/memory/dmnt_cheat_vm.cpp
@@ -27,7 +27,7 @@
27#include "core/memory/dmnt_cheat_types.h" 27#include "core/memory/dmnt_cheat_types.h"
28#include "core/memory/dmnt_cheat_vm.h" 28#include "core/memory/dmnt_cheat_vm.h"
29 29
30namespace Memory { 30namespace Core::Memory {
31 31
32DmntCheatVm::DmntCheatVm(std::unique_ptr<Callbacks> callbacks) : callbacks(std::move(callbacks)) {} 32DmntCheatVm::DmntCheatVm(std::unique_ptr<Callbacks> callbacks) : callbacks(std::move(callbacks)) {}
33 33
@@ -55,7 +55,7 @@ void DmntCheatVm::LogOpcode(const CheatVmOpcode& opcode) {
55 fmt::format("Cond Type: {:X}", static_cast<u32>(begin_cond->cond_type))); 55 fmt::format("Cond Type: {:X}", static_cast<u32>(begin_cond->cond_type)));
56 callbacks->CommandLog(fmt::format("Rel Addr: {:X}", begin_cond->rel_address)); 56 callbacks->CommandLog(fmt::format("Rel Addr: {:X}", begin_cond->rel_address));
57 callbacks->CommandLog(fmt::format("Value: {:X}", begin_cond->value.bit64)); 57 callbacks->CommandLog(fmt::format("Value: {:X}", begin_cond->value.bit64));
58 } else if (auto end_cond = std::get_if<EndConditionalOpcode>(&opcode.opcode)) { 58 } else if (std::holds_alternative<EndConditionalOpcode>(opcode.opcode)) {
59 callbacks->CommandLog("Opcode: End Conditional"); 59 callbacks->CommandLog("Opcode: End Conditional");
60 } else if (auto ctrl_loop = std::get_if<ControlLoopOpcode>(&opcode.opcode)) { 60 } else if (auto ctrl_loop = std::get_if<ControlLoopOpcode>(&opcode.opcode)) {
61 if (ctrl_loop->start_loop) { 61 if (ctrl_loop->start_loop) {
@@ -399,6 +399,7 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
399 // 8kkkkkkk 399 // 8kkkkkkk
400 // Just parse the mask. 400 // Just parse the mask.
401 begin_keypress_cond.key_mask = first_dword & 0x0FFFFFFF; 401 begin_keypress_cond.key_mask = first_dword & 0x0FFFFFFF;
402 opcode.opcode = begin_keypress_cond;
402 } break; 403 } break;
403 case CheatVmOpcodeType::PerformArithmeticRegister: { 404 case CheatVmOpcodeType::PerformArithmeticRegister: {
404 PerformArithmeticRegisterOpcode perform_math_reg{}; 405 PerformArithmeticRegisterOpcode perform_math_reg{};
@@ -779,7 +780,7 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
779 if (!cond_met) { 780 if (!cond_met) {
780 SkipConditionalBlock(); 781 SkipConditionalBlock();
781 } 782 }
782 } else if (auto end_cond = std::get_if<EndConditionalOpcode>(&cur_opcode.opcode)) { 783 } else if (std::holds_alternative<EndConditionalOpcode>(cur_opcode.opcode)) {
783 // Decrement the condition depth. 784 // Decrement the condition depth.
784 // We will assume, graciously, that mismatched conditional block ends are a nop. 785 // We will assume, graciously, that mismatched conditional block ends are a nop.
785 if (condition_depth > 0) { 786 if (condition_depth > 0) {
@@ -1209,4 +1210,4 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
1209 } 1210 }
1210} 1211}
1211 1212
1212} // namespace Memory 1213} // namespace Core::Memory
diff --git a/src/core/memory/dmnt_cheat_vm.h b/src/core/memory/dmnt_cheat_vm.h
index c36212cf1..8351fd798 100644
--- a/src/core/memory/dmnt_cheat_vm.h
+++ b/src/core/memory/dmnt_cheat_vm.h
@@ -30,7 +30,7 @@
30#include "common/common_types.h" 30#include "common/common_types.h"
31#include "core/memory/dmnt_cheat_types.h" 31#include "core/memory/dmnt_cheat_types.h"
32 32
33namespace Memory { 33namespace Core::Memory {
34 34
35enum class CheatVmOpcodeType : u32 { 35enum class CheatVmOpcodeType : u32 {
36 StoreStatic = 0, 36 StoreStatic = 0,
@@ -318,4 +318,4 @@ private:
318 MemoryAccessType mem_type, u64 rel_address); 318 MemoryAccessType mem_type, u64 rel_address);
319}; 319};
320 320
321}; // namespace Memory 321}; // namespace Core::Memory
diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp
index 85ac81ef7..558cbe6d7 100644
--- a/src/core/reporter.cpp
+++ b/src/core/reporter.cpp
@@ -16,9 +16,11 @@
16#include "core/arm/arm_interface.h" 16#include "core/arm/arm_interface.h"
17#include "core/core.h" 17#include "core/core.h"
18#include "core/hle/kernel/hle_ipc.h" 18#include "core/hle/kernel/hle_ipc.h"
19#include "core/hle/kernel/memory/page_table.h"
19#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
20#include "core/hle/result.h" 21#include "core/hle/result.h"
21#include "core/hle/service/lm/manager.h" 22#include "core/hle/service/lm/manager.h"
23#include "core/memory.h"
22#include "core/reporter.h" 24#include "core/reporter.h"
23#include "core/settings.h" 25#include "core/settings.h"
24 26
@@ -108,14 +110,13 @@ json GetProcessorStateData(const std::string& architecture, u64 entry_point, u64
108 110
109json GetProcessorStateDataAuto(Core::System& system) { 111json GetProcessorStateDataAuto(Core::System& system) {
110 const auto* process{system.CurrentProcess()}; 112 const auto* process{system.CurrentProcess()};
111 const auto& vm_manager{process->VMManager()};
112 auto& arm{system.CurrentArmInterface()}; 113 auto& arm{system.CurrentArmInterface()};
113 114
114 Core::ARM_Interface::ThreadContext64 context{}; 115 Core::ARM_Interface::ThreadContext64 context{};
115 arm.SaveContext(context); 116 arm.SaveContext(context);
116 117
117 return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32", 118 return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32",
118 vm_manager.GetCodeRegionBaseAddress(), context.sp, context.pc, 119 process->PageTable().GetCodeRegionStart(), context.sp, context.pc,
119 context.pstate, context.cpu_registers); 120 context.pstate, context.cpu_registers);
120} 121}
121 122
@@ -147,7 +148,8 @@ json GetFullDataAuto(const std::string& timestamp, u64 title_id, Core::System& s
147} 148}
148 149
149template <bool read_value, typename DescriptorType> 150template <bool read_value, typename DescriptorType>
150json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memory::Memory& memory) { 151json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer,
152 Core::Memory::Memory& memory) {
151 auto buffer_out = json::array(); 153 auto buffer_out = json::array();
152 for (const auto& desc : buffer) { 154 for (const auto& desc : buffer) {
153 auto entry = json{ 155 auto entry = json{
@@ -167,7 +169,7 @@ json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memor
167 return buffer_out; 169 return buffer_out;
168} 170}
169 171
170json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Memory::Memory& memory) { 172json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Core::Memory::Memory& memory) {
171 json out; 173 json out;
172 174
173 auto cmd_buf = json::array(); 175 auto cmd_buf = json::array();
diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp
index 0f3685d1c..fd5a3ee9f 100644
--- a/src/core/telemetry_session.cpp
+++ b/src/core/telemetry_session.cpp
@@ -153,9 +153,9 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) {
153 app_loader.ReadTitle(name); 153 app_loader.ReadTitle(name);
154 154
155 if (name.empty()) { 155 if (name.empty()) {
156 auto [nacp, icon_file] = FileSys::PatchManager(program_id).GetControlMetadata(); 156 const auto metadata = FileSys::PatchManager(program_id).GetControlMetadata();
157 if (nacp != nullptr) { 157 if (metadata.first != nullptr) {
158 name = nacp->GetApplicationName(); 158 name = metadata.first->GetApplicationName();
159 } 159 }
160 } 160 }
161 161
diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp
index 1e060f009..b2c6c537e 100644
--- a/src/core/tools/freezer.cpp
+++ b/src/core/tools/freezer.cpp
@@ -16,7 +16,7 @@ namespace {
16 16
17constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); 17constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60);
18 18
19u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) { 19u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
20 switch (width) { 20 switch (width) {
21 case 1: 21 case 1:
22 return memory.Read8(addr); 22 return memory.Read8(addr);
@@ -32,7 +32,7 @@ u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) {
32 } 32 }
33} 33}
34 34
35void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value) { 35void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 value) {
36 switch (width) { 36 switch (width) {
37 case 1: 37 case 1:
38 memory.Write8(addr, static_cast<u8>(value)); 38 memory.Write8(addr, static_cast<u8>(value));
@@ -53,7 +53,7 @@ void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value)
53 53
54} // Anonymous namespace 54} // Anonymous namespace
55 55
56Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_) 56Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
57 : core_timing{core_timing_}, memory{memory_} { 57 : core_timing{core_timing_}, memory{memory_} {
58 event = Core::Timing::CreateEvent( 58 event = Core::Timing::CreateEvent(
59 "MemoryFreezer::FrameCallback", 59 "MemoryFreezer::FrameCallback",
diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h
index 916339c6c..62fc6aa6c 100644
--- a/src/core/tools/freezer.h
+++ b/src/core/tools/freezer.h
@@ -16,7 +16,7 @@ class CoreTiming;
16struct EventType; 16struct EventType;
17} // namespace Core::Timing 17} // namespace Core::Timing
18 18
19namespace Memory { 19namespace Core::Memory {
20class Memory; 20class Memory;
21} 21}
22 22
@@ -38,7 +38,7 @@ public:
38 u64 value; 38 u64 value;
39 }; 39 };
40 40
41 explicit Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_); 41 explicit Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_);
42 ~Freezer(); 42 ~Freezer();
43 43
44 // Enables or disables the entire memory freezer. 44 // Enables or disables the entire memory freezer.
@@ -82,7 +82,7 @@ private:
82 82
83 std::shared_ptr<Core::Timing::EventType> event; 83 std::shared_ptr<Core::Timing::EventType> event;
84 Core::Timing::CoreTiming& core_timing; 84 Core::Timing::CoreTiming& core_timing;
85 Memory::Memory& memory; 85 Core::Memory::Memory& memory;
86}; 86};
87 87
88} // namespace Tools 88} // namespace Tools
diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt
index 2520ba321..a9c2392b1 100644
--- a/src/input_common/CMakeLists.txt
+++ b/src/input_common/CMakeLists.txt
@@ -27,4 +27,4 @@ if(SDL2_FOUND)
27endif() 27endif()
28 28
29create_target_directory_groups(input_common) 29create_target_directory_groups(input_common)
30target_link_libraries(input_common PUBLIC core PRIVATE common ${Boost_LIBRARIES}) 30target_link_libraries(input_common PUBLIC core PRIVATE common Boost::boost)
diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp
index a2e0c0bd2..675b477fa 100644
--- a/src/input_common/sdl/sdl_impl.cpp
+++ b/src/input_common/sdl/sdl_impl.cpp
@@ -603,6 +603,7 @@ public:
603 if (std::abs(event.jaxis.value / 32767.0) < 0.5) { 603 if (std::abs(event.jaxis.value / 32767.0) < 0.5) {
604 break; 604 break;
605 } 605 }
606 [[fallthrough]];
606 case SDL_JOYBUTTONUP: 607 case SDL_JOYBUTTONUP:
607 case SDL_JOYHATMOTION: 608 case SDL_JOYHATMOTION:
608 return SDLEventToButtonParamPackage(state, event); 609 return SDLEventToButtonParamPackage(state, event);
diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp
index 17043346b..e54674d11 100644
--- a/src/tests/core/arm/arm_test_common.cpp
+++ b/src/tests/core/arm/arm_test_common.cpp
@@ -6,6 +6,7 @@
6 6
7#include "common/page_table.h" 7#include "common/page_table.h"
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/memory/page_table.h"
9#include "core/hle/kernel/process.h" 10#include "core/hle/kernel/process.h"
10#include "core/memory.h" 11#include "core/memory.h"
11#include "tests/core/arm/arm_test_common.h" 12#include "tests/core/arm/arm_test_common.h"
@@ -18,12 +19,7 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
18 auto& system = Core::System::GetInstance(); 19 auto& system = Core::System::GetInstance();
19 20
20 auto process = Kernel::Process::Create(system, "", Kernel::Process::ProcessType::Userland); 21 auto process = Kernel::Process::Create(system, "", Kernel::Process::ProcessType::Userland);
21 page_table = &process->VMManager().page_table; 22 page_table = &process->PageTable().PageTableImpl();
22
23 std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr);
24 page_table->special_regions.clear();
25 std::fill(page_table->attributes.begin(), page_table->attributes.end(),
26 Common::PageType::Unmapped);
27 23
28 system.Memory().MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); 24 system.Memory().MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
29 system.Memory().MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); 25 system.Memory().MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index b57c0d4d4..83e7a1cde 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -29,10 +29,10 @@ namespace VideoCommon {
29 29
30using MapInterval = std::shared_ptr<MapIntervalBase>; 30using MapInterval = std::shared_ptr<MapIntervalBase>;
31 31
32template <typename TBuffer, typename TBufferType, typename StreamBuffer> 32template <typename OwnerBuffer, typename BufferType, typename StreamBuffer>
33class BufferCache { 33class BufferCache {
34public: 34public:
35 using BufferInfo = std::pair<const TBufferType*, u64>; 35 using BufferInfo = std::pair<BufferType, u64>;
36 36
37 BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, 37 BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4,
38 bool is_written = false, bool use_fast_cbuf = false) { 38 bool is_written = false, bool use_fast_cbuf = false) {
@@ -89,9 +89,7 @@ public:
89 } 89 }
90 } 90 }
91 91
92 const u64 offset = static_cast<u64>(block->GetOffset(cpu_addr)); 92 return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))};
93
94 return {ToHandle(block), offset};
95 } 93 }
96 94
97 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. 95 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
@@ -156,7 +154,7 @@ public:
156 } 154 }
157 } 155 }
158 156
159 virtual const TBufferType* GetEmptyBuffer(std::size_t size) = 0; 157 virtual BufferType GetEmptyBuffer(std::size_t size) = 0;
160 158
161protected: 159protected:
162 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 160 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
@@ -166,19 +164,19 @@ protected:
166 164
167 ~BufferCache() = default; 165 ~BufferCache() = default;
168 166
169 virtual const TBufferType* ToHandle(const TBuffer& storage) = 0; 167 virtual BufferType ToHandle(const OwnerBuffer& storage) = 0;
170 168
171 virtual void WriteBarrier() = 0; 169 virtual void WriteBarrier() = 0;
172 170
173 virtual TBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; 171 virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
174 172
175 virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, 173 virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
176 const u8* data) = 0; 174 const u8* data) = 0;
177 175
178 virtual void DownloadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, 176 virtual void DownloadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
179 u8* data) = 0; 177 u8* data) = 0;
180 178
181 virtual void CopyBlock(const TBuffer& src, const TBuffer& dst, std::size_t src_offset, 179 virtual void CopyBlock(const OwnerBuffer& src, const OwnerBuffer& dst, std::size_t src_offset,
182 std::size_t dst_offset, std::size_t size) = 0; 180 std::size_t dst_offset, std::size_t size) = 0;
183 181
184 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) { 182 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) {
@@ -221,9 +219,8 @@ private:
221 return std::make_shared<MapIntervalBase>(start, end, gpu_addr); 219 return std::make_shared<MapIntervalBase>(start, end, gpu_addr);
222 } 220 }
223 221
224 MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr, 222 MapInterval MapAddress(const OwnerBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr,
225 const std::size_t size) { 223 const std::size_t size) {
226
227 std::vector<MapInterval> overlaps = GetMapsInRange(cpu_addr, size); 224 std::vector<MapInterval> overlaps = GetMapsInRange(cpu_addr, size);
228 if (overlaps.empty()) { 225 if (overlaps.empty()) {
229 auto& memory_manager = system.GPU().MemoryManager(); 226 auto& memory_manager = system.GPU().MemoryManager();
@@ -272,7 +269,7 @@ private:
272 return new_map; 269 return new_map;
273 } 270 }
274 271
275 void UpdateBlock(const TBuffer& block, VAddr start, VAddr end, 272 void UpdateBlock(const OwnerBuffer& block, VAddr start, VAddr end,
276 std::vector<MapInterval>& overlaps) { 273 std::vector<MapInterval>& overlaps) {
277 const IntervalType base_interval{start, end}; 274 const IntervalType base_interval{start, end};
278 IntervalSet interval_set{}; 275 IntervalSet interval_set{};
@@ -313,7 +310,7 @@ private:
313 310
314 void FlushMap(MapInterval map) { 311 void FlushMap(MapInterval map) {
315 std::size_t size = map->GetEnd() - map->GetStart(); 312 std::size_t size = map->GetEnd() - map->GetStart();
316 TBuffer block = blocks[map->GetStart() >> block_page_bits]; 313 OwnerBuffer block = blocks[map->GetStart() >> block_page_bits];
317 staging_buffer.resize(size); 314 staging_buffer.resize(size);
318 DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data()); 315 DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data());
319 system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size); 316 system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size);
@@ -328,7 +325,7 @@ private:
328 325
329 buffer_ptr += size; 326 buffer_ptr += size;
330 buffer_offset += size; 327 buffer_offset += size;
331 return {&stream_buffer_handle, uploaded_offset}; 328 return {stream_buffer_handle, uploaded_offset};
332 } 329 }
333 330
334 void AlignBuffer(std::size_t alignment) { 331 void AlignBuffer(std::size_t alignment) {
@@ -338,11 +335,11 @@ private:
338 buffer_offset = offset_aligned; 335 buffer_offset = offset_aligned;
339 } 336 }
340 337
341 TBuffer EnlargeBlock(TBuffer buffer) { 338 OwnerBuffer EnlargeBlock(OwnerBuffer buffer) {
342 const std::size_t old_size = buffer->GetSize(); 339 const std::size_t old_size = buffer->GetSize();
343 const std::size_t new_size = old_size + block_page_size; 340 const std::size_t new_size = old_size + block_page_size;
344 const VAddr cpu_addr = buffer->GetCpuAddr(); 341 const VAddr cpu_addr = buffer->GetCpuAddr();
345 TBuffer new_buffer = CreateBlock(cpu_addr, new_size); 342 OwnerBuffer new_buffer = CreateBlock(cpu_addr, new_size);
346 CopyBlock(buffer, new_buffer, 0, 0, old_size); 343 CopyBlock(buffer, new_buffer, 0, 0, old_size);
347 buffer->SetEpoch(epoch); 344 buffer->SetEpoch(epoch);
348 pending_destruction.push_back(buffer); 345 pending_destruction.push_back(buffer);
@@ -356,14 +353,14 @@ private:
356 return new_buffer; 353 return new_buffer;
357 } 354 }
358 355
359 TBuffer MergeBlocks(TBuffer first, TBuffer second) { 356 OwnerBuffer MergeBlocks(OwnerBuffer first, OwnerBuffer second) {
360 const std::size_t size_1 = first->GetSize(); 357 const std::size_t size_1 = first->GetSize();
361 const std::size_t size_2 = second->GetSize(); 358 const std::size_t size_2 = second->GetSize();
362 const VAddr first_addr = first->GetCpuAddr(); 359 const VAddr first_addr = first->GetCpuAddr();
363 const VAddr second_addr = second->GetCpuAddr(); 360 const VAddr second_addr = second->GetCpuAddr();
364 const VAddr new_addr = std::min(first_addr, second_addr); 361 const VAddr new_addr = std::min(first_addr, second_addr);
365 const std::size_t new_size = size_1 + size_2; 362 const std::size_t new_size = size_1 + size_2;
366 TBuffer new_buffer = CreateBlock(new_addr, new_size); 363 OwnerBuffer new_buffer = CreateBlock(new_addr, new_size);
367 CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); 364 CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1);
368 CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2); 365 CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2);
369 first->SetEpoch(epoch); 366 first->SetEpoch(epoch);
@@ -380,8 +377,8 @@ private:
380 return new_buffer; 377 return new_buffer;
381 } 378 }
382 379
383 TBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { 380 OwnerBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) {
384 TBuffer found{}; 381 OwnerBuffer found;
385 const VAddr cpu_addr_end = cpu_addr + size - 1; 382 const VAddr cpu_addr_end = cpu_addr + size - 1;
386 u64 page_start = cpu_addr >> block_page_bits; 383 u64 page_start = cpu_addr >> block_page_bits;
387 const u64 page_end = cpu_addr_end >> block_page_bits; 384 const u64 page_end = cpu_addr_end >> block_page_bits;
@@ -457,7 +454,7 @@ private:
457 Core::System& system; 454 Core::System& system;
458 455
459 std::unique_ptr<StreamBuffer> stream_buffer; 456 std::unique_ptr<StreamBuffer> stream_buffer;
460 TBufferType stream_buffer_handle{}; 457 BufferType stream_buffer_handle{};
461 458
462 bool invalidated = false; 459 bool invalidated = false;
463 460
@@ -475,9 +472,9 @@ private:
475 472
476 static constexpr u64 block_page_bits = 21; 473 static constexpr u64 block_page_bits = 21;
477 static constexpr u64 block_page_size = 1ULL << block_page_bits; 474 static constexpr u64 block_page_size = 1ULL << block_page_bits;
478 std::unordered_map<u64, TBuffer> blocks; 475 std::unordered_map<u64, OwnerBuffer> blocks;
479 476
480 std::list<TBuffer> pending_destruction; 477 std::list<OwnerBuffer> pending_destruction;
481 u64 epoch = 0; 478 u64 epoch = 0;
482 u64 modified_ticks = 0; 479 u64 modified_ticks = 0;
483 480
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 430ee86ba..7231597d4 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -1006,6 +1006,12 @@ union Instruction {
1006 } stg; 1006 } stg;
1007 1007
1008 union { 1008 union {
1009 BitField<23, 3, AtomicOp> operation;
1010 BitField<48, 1, u64> extended;
1011 BitField<20, 3, GlobalAtomicType> type;
1012 } red;
1013
1014 union {
1009 BitField<52, 4, AtomicOp> operation; 1015 BitField<52, 4, AtomicOp> operation;
1010 BitField<49, 3, GlobalAtomicType> type; 1016 BitField<49, 3, GlobalAtomicType> type;
1011 BitField<28, 20, s64> offset; 1017 BitField<28, 20, s64> offset;
@@ -1501,7 +1507,7 @@ union Instruction {
1501 1507
1502 TextureType GetTextureType() const { 1508 TextureType GetTextureType() const {
1503 // The TLDS instruction has a weird encoding for the texture type. 1509 // The TLDS instruction has a weird encoding for the texture type.
1504 if (texture_info >= 0 && texture_info <= 1) { 1510 if (texture_info <= 1) {
1505 return TextureType::Texture1D; 1511 return TextureType::Texture1D;
1506 } 1512 }
1507 if (texture_info == 2 || texture_info == 8 || texture_info == 12 || 1513 if (texture_info == 2 || texture_info == 8 || texture_info == 12 ||
@@ -1787,6 +1793,7 @@ public:
1787 ST_S, 1793 ST_S,
1788 ST, // Store in generic memory 1794 ST, // Store in generic memory
1789 STG, // Store in global memory 1795 STG, // Store in global memory
1796 RED, // Reduction operation
1790 ATOM, // Atomic operation on global memory 1797 ATOM, // Atomic operation on global memory
1791 ATOMS, // Atomic operation on shared memory 1798 ATOMS, // Atomic operation on shared memory
1792 AL2P, // Transforms attribute memory into physical memory 1799 AL2P, // Transforms attribute memory into physical memory
@@ -2097,6 +2104,7 @@ private:
2097 INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"), 2104 INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"),
2098 INST("101-------------", Id::ST, Type::Memory, "ST"), 2105 INST("101-------------", Id::ST, Type::Memory, "ST"),
2099 INST("1110111011011---", Id::STG, Type::Memory, "STG"), 2106 INST("1110111011011---", Id::STG, Type::Memory, "STG"),
2107 INST("1110101111111---", Id::RED, Type::Memory, "RED"),
2100 INST("11101101--------", Id::ATOM, Type::Memory, "ATOM"), 2108 INST("11101101--------", Id::ATOM, Type::Memory, "ATOM"),
2101 INST("11101100--------", Id::ATOMS, Type::Memory, "ATOMS"), 2109 INST("11101100--------", Id::ATOMS, Type::Memory, "ATOMS"),
2102 INST("1110111110100---", Id::AL2P, Type::Memory, "AL2P"), 2110 INST("1110111110100---", Id::AL2P, Type::Memory, "AL2P"),
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index cc434faf7..20e73a37e 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -12,8 +12,9 @@ namespace VideoCommon {
12 12
13GPUAsynch::GPUAsynch(Core::System& system, std::unique_ptr<VideoCore::RendererBase>&& renderer_, 13GPUAsynch::GPUAsynch(Core::System& system, std::unique_ptr<VideoCore::RendererBase>&& renderer_,
14 std::unique_ptr<Core::Frontend::GraphicsContext>&& context) 14 std::unique_ptr<Core::Frontend::GraphicsContext>&& context)
15 : GPU(system, std::move(renderer_), true), gpu_thread{system}, gpu_context(std::move(context)), 15 : GPU(system, std::move(renderer_), true), gpu_thread{system},
16 cpu_context(renderer->GetRenderWindow().CreateSharedContext()) {} 16 cpu_context(renderer->GetRenderWindow().CreateSharedContext()),
17 gpu_context(std::move(context)) {}
17 18
18GPUAsynch::~GPUAsynch() = default; 19GPUAsynch::~GPUAsynch() = default;
19 20
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index a3389d0d2..fd49bc2a9 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -6,8 +6,8 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/memory/page_table.h"
9#include "core/hle/kernel/process.h" 10#include "core/hle/kernel/process.h"
10#include "core/hle/kernel/vm_manager.h"
11#include "core/memory.h" 11#include "core/memory.h"
12#include "video_core/gpu.h" 12#include "video_core/gpu.h"
13#include "video_core/memory_manager.h" 13#include "video_core/memory_manager.h"
@@ -17,10 +17,7 @@ namespace Tegra {
17 17
18MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) 18MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
19 : rasterizer{rasterizer}, system{system} { 19 : rasterizer{rasterizer}, system{system} {
20 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); 20 page_table.Resize(address_space_width, page_bits, false);
21 std::fill(page_table.attributes.begin(), page_table.attributes.end(),
22 Common::PageType::Unmapped);
23 page_table.Resize(address_space_width);
24 21
25 // Initialize the map with a single free region covering the entire managed space. 22 // Initialize the map with a single free region covering the entire managed space.
26 VirtualMemoryArea initial_vma; 23 VirtualMemoryArea initial_vma;
@@ -55,9 +52,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
55 52
56 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); 53 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
57 ASSERT(system.CurrentProcess() 54 ASSERT(system.CurrentProcess()
58 ->VMManager() 55 ->PageTable()
59 .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, 56 .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
60 Kernel::MemoryAttribute::DeviceMapped) 57 Kernel::Memory::MemoryAttribute::DeviceShared)
61 .IsSuccess()); 58 .IsSuccess());
62 59
63 return gpu_addr; 60 return gpu_addr;
@@ -70,9 +67,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
70 67
71 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); 68 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
72 ASSERT(system.CurrentProcess() 69 ASSERT(system.CurrentProcess()
73 ->VMManager() 70 ->PageTable()
74 .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, 71 .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
75 Kernel::MemoryAttribute::DeviceMapped) 72 Kernel::Memory::MemoryAttribute::DeviceShared)
76 .IsSuccess()); 73 .IsSuccess());
77 return gpu_addr; 74 return gpu_addr;
78} 75}
@@ -89,9 +86,10 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
89 86
90 UnmapRange(gpu_addr, aligned_size); 87 UnmapRange(gpu_addr, aligned_size);
91 ASSERT(system.CurrentProcess() 88 ASSERT(system.CurrentProcess()
92 ->VMManager() 89 ->PageTable()
93 .SetMemoryAttribute(cpu_addr.value(), size, Kernel::MemoryAttribute::DeviceMapped, 90 .SetMemoryAttribute(cpu_addr.value(), size,
94 Kernel::MemoryAttribute::None) 91 Kernel::Memory::MemoryAttribute::DeviceShared,
92 Kernel::Memory::MemoryAttribute::None)
95 .IsSuccess()); 93 .IsSuccess());
96 94
97 return gpu_addr; 95 return gpu_addr;
@@ -147,16 +145,8 @@ T MemoryManager::Read(GPUVAddr addr) const {
147 return value; 145 return value;
148 } 146 }
149 147
150 switch (page_table.attributes[addr >> page_bits]) { 148 UNREACHABLE();
151 case Common::PageType::Unmapped: 149
152 LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr);
153 return 0;
154 case Common::PageType::Memory:
155 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr);
156 break;
157 default:
158 UNREACHABLE();
159 }
160 return {}; 150 return {};
161} 151}
162 152
@@ -173,17 +163,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
173 return; 163 return;
174 } 164 }
175 165
176 switch (page_table.attributes[addr >> page_bits]) { 166 UNREACHABLE();
177 case Common::PageType::Unmapped:
178 LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
179 static_cast<u32>(data), addr);
180 return;
181 case Common::PageType::Memory:
182 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr);
183 break;
184 default:
185 UNREACHABLE();
186 }
187} 167}
188 168
189template u8 MemoryManager::Read<u8>(GPUVAddr addr) const; 169template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;
@@ -249,18 +229,11 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s
249 const std::size_t copy_amount{ 229 const std::size_t copy_amount{
250 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 230 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
251 231
252 switch (page_table.attributes[page_index]) { 232 const VAddr src_addr{page_table.backing_addr[page_index] + page_offset};
253 case Common::PageType::Memory: { 233 // Flush must happen on the rasterizer interface, such that memory is always synchronous
254 const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; 234 // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
255 // Flush must happen on the rasterizer interface, such that memory is always synchronous 235 rasterizer.FlushRegion(src_addr, copy_amount);
256 // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. 236 memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
257 rasterizer.FlushRegion(src_addr, copy_amount);
258 memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
259 break;
260 }
261 default:
262 UNREACHABLE();
263 }
264 237
265 page_index++; 238 page_index++;
266 page_offset = 0; 239 page_offset = 0;
@@ -305,18 +278,11 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const
305 const std::size_t copy_amount{ 278 const std::size_t copy_amount{
306 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 279 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
307 280
308 switch (page_table.attributes[page_index]) { 281 const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset};
309 case Common::PageType::Memory: { 282 // Invalidate must happen on the rasterizer interface, such that memory is always
310 const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; 283 // synchronous when it is written (even when in asynchronous GPU mode).
311 // Invalidate must happen on the rasterizer interface, such that memory is always 284 rasterizer.InvalidateRegion(dest_addr, copy_amount);
312 // synchronous when it is written (even when in asynchronous GPU mode). 285 memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
313 rasterizer.InvalidateRegion(dest_addr, copy_amount);
314 memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
315 break;
316 }
317 default:
318 UNREACHABLE();
319 }
320 286
321 page_index++; 287 page_index++;
322 page_offset = 0; 288 page_offset = 0;
@@ -362,8 +328,8 @@ void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const
362 328
363bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { 329bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) {
364 const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; 330 const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits];
365 const std::size_t page = (addr & Memory::PAGE_MASK) + size; 331 const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size;
366 return page <= Memory::PAGE_SIZE; 332 return page <= Core::Memory::PAGE_SIZE;
367} 333}
368 334
369void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, 335void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
@@ -375,12 +341,13 @@ void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageTy
375 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 341 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
376 base + page_table.pointers.size()); 342 base + page_table.pointers.size());
377 343
378 std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
379
380 if (memory == nullptr) { 344 if (memory == nullptr) {
381 std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); 345 while (base != end) {
382 std::fill(page_table.backing_addr.begin() + base, page_table.backing_addr.begin() + end, 346 page_table.pointers[base] = nullptr;
383 backing_addr); 347 page_table.backing_addr[base] = 0;
348
349 base += 1;
350 }
384 } else { 351 } else {
385 while (base != end) { 352 while (base != end) {
386 page_table.pointers[base] = memory; 353 page_table.pointers[base] = memory;
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 0d9468535..0ddd52d5a 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -179,7 +179,7 @@ private:
179 /// End of address space, based on address space in bits. 179 /// End of address space, based on address space in bits.
180 static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; 180 static constexpr GPUVAddr address_space_end{1ULL << address_space_width};
181 181
182 Common::BackingPageTable page_table{page_bits}; 182 Common::PageTable page_table;
183 VMAMap vma_map; 183 VMAMap vma_map;
184 VideoCore::RasterizerInterface& rasterizer; 184 VideoCore::RasterizerInterface& rasterizer;
185 185
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp
index d01db97da..53622ca05 100644
--- a/src/video_core/rasterizer_accelerated.cpp
+++ b/src/video_core/rasterizer_accelerated.cpp
@@ -23,15 +23,15 @@ constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
23 23
24} // Anonymous namespace 24} // Anonymous namespace
25 25
26RasterizerAccelerated::RasterizerAccelerated(Memory::Memory& cpu_memory_) 26RasterizerAccelerated::RasterizerAccelerated(Core::Memory::Memory& cpu_memory_)
27 : cpu_memory{cpu_memory_} {} 27 : cpu_memory{cpu_memory_} {}
28 28
29RasterizerAccelerated::~RasterizerAccelerated() = default; 29RasterizerAccelerated::~RasterizerAccelerated() = default;
30 30
31void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { 31void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
32 std::lock_guard lock{pages_mutex}; 32 std::lock_guard lock{pages_mutex};
33 const u64 page_start{addr >> Memory::PAGE_BITS}; 33 const u64 page_start{addr >> Core::Memory::PAGE_BITS};
34 const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; 34 const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS};
35 35
36 // Interval maps will erase segments if count reaches 0, so if delta is negative we have to 36 // Interval maps will erase segments if count reaches 0, so if delta is negative we have to
37 // subtract after iterating 37 // subtract after iterating
@@ -44,8 +44,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
44 const auto interval = pair.first & pages_interval; 44 const auto interval = pair.first & pages_interval;
45 const int count = pair.second; 45 const int count = pair.second;
46 46
47 const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS; 47 const VAddr interval_start_addr = boost::icl::first(interval) << Core::Memory::PAGE_BITS;
48 const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS; 48 const VAddr interval_end_addr = boost::icl::last_next(interval) << Core::Memory::PAGE_BITS;
49 const u64 interval_size = interval_end_addr - interval_start_addr; 49 const u64 interval_size = interval_end_addr - interval_start_addr;
50 50
51 if (delta > 0 && count == delta) { 51 if (delta > 0 && count == delta) {
diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h
index 315798e7c..91866d7dd 100644
--- a/src/video_core/rasterizer_accelerated.h
+++ b/src/video_core/rasterizer_accelerated.h
@@ -11,7 +11,7 @@
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/rasterizer_interface.h" 12#include "video_core/rasterizer_interface.h"
13 13
14namespace Memory { 14namespace Core::Memory {
15class Memory; 15class Memory;
16} 16}
17 17
@@ -20,7 +20,7 @@ namespace VideoCore {
20/// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface. 20/// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface.
21class RasterizerAccelerated : public RasterizerInterface { 21class RasterizerAccelerated : public RasterizerInterface {
22public: 22public:
23 explicit RasterizerAccelerated(Memory::Memory& cpu_memory_); 23 explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_);
24 ~RasterizerAccelerated() override; 24 ~RasterizerAccelerated() override;
25 25
26 void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; 26 void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
@@ -30,7 +30,7 @@ private:
30 CachedPageMap cached_pages; 30 CachedPageMap cached_pages;
31 std::mutex pages_mutex; 31 std::mutex pages_mutex;
32 32
33 Memory::Memory& cpu_memory; 33 Core::Memory::Memory& cpu_memory;
34}; 34};
35 35
36} // namespace VideoCore 36} // namespace VideoCore
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 4eb37a96c..cb5792407 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -55,33 +55,31 @@ void OGLBufferCache::WriteBarrier() {
55 glMemoryBarrier(GL_ALL_BARRIER_BITS); 55 glMemoryBarrier(GL_ALL_BARRIER_BITS);
56} 56}
57 57
58const GLuint* OGLBufferCache::ToHandle(const Buffer& buffer) { 58GLuint OGLBufferCache::ToHandle(const Buffer& buffer) {
59 return buffer->GetHandle(); 59 return buffer->GetHandle();
60} 60}
61 61
62const GLuint* OGLBufferCache::GetEmptyBuffer(std::size_t) { 62GLuint OGLBufferCache::GetEmptyBuffer(std::size_t) {
63 static const GLuint null_buffer = 0; 63 return 0;
64 return &null_buffer;
65} 64}
66 65
67void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 66void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
68 const u8* data) { 67 const u8* data) {
69 glNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset), 68 glNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset),
70 static_cast<GLsizeiptr>(size), data); 69 static_cast<GLsizeiptr>(size), data);
71} 70}
72 71
73void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 72void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
74 u8* data) { 73 u8* data) {
75 MICROPROFILE_SCOPE(OpenGL_Buffer_Download); 74 MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
76 glGetNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset), 75 glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset),
77 static_cast<GLsizeiptr>(size), data); 76 static_cast<GLsizeiptr>(size), data);
78} 77}
79 78
80void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, 79void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
81 std::size_t dst_offset, std::size_t size) { 80 std::size_t dst_offset, std::size_t size) {
82 glCopyNamedBufferSubData(*src->GetHandle(), *dst->GetHandle(), 81 glCopyNamedBufferSubData(src->GetHandle(), dst->GetHandle(), static_cast<GLintptr>(src_offset),
83 static_cast<GLintptr>(src_offset), static_cast<GLintptr>(dst_offset), 82 static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size));
84 static_cast<GLsizeiptr>(size));
85} 83}
86 84
87OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_pointer, 85OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_pointer,
@@ -89,7 +87,7 @@ OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_poi
89 DEBUG_ASSERT(cbuf_cursor < std::size(cbufs)); 87 DEBUG_ASSERT(cbuf_cursor < std::size(cbufs));
90 const GLuint& cbuf = cbufs[cbuf_cursor++]; 88 const GLuint& cbuf = cbufs[cbuf_cursor++];
91 glNamedBufferSubData(cbuf, 0, static_cast<GLsizeiptr>(size), raw_pointer); 89 glNamedBufferSubData(cbuf, 0, static_cast<GLsizeiptr>(size), raw_pointer);
92 return {&cbuf, 0}; 90 return {cbuf, 0};
93} 91}
94 92
95} // namespace OpenGL 93} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index d94a11252..a74817857 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -34,12 +34,12 @@ public:
34 explicit CachedBufferBlock(VAddr cpu_addr, const std::size_t size); 34 explicit CachedBufferBlock(VAddr cpu_addr, const std::size_t size);
35 ~CachedBufferBlock(); 35 ~CachedBufferBlock();
36 36
37 const GLuint* GetHandle() const { 37 GLuint GetHandle() const {
38 return &gl_buffer.handle; 38 return gl_buffer.handle;
39 } 39 }
40 40
41private: 41private:
42 OGLBuffer gl_buffer{}; 42 OGLBuffer gl_buffer;
43}; 43};
44 44
45class OGLBufferCache final : public GenericBufferCache { 45class OGLBufferCache final : public GenericBufferCache {
@@ -48,7 +48,7 @@ public:
48 const Device& device, std::size_t stream_size); 48 const Device& device, std::size_t stream_size);
49 ~OGLBufferCache(); 49 ~OGLBufferCache();
50 50
51 const GLuint* GetEmptyBuffer(std::size_t) override; 51 GLuint GetEmptyBuffer(std::size_t) override;
52 52
53 void Acquire() noexcept { 53 void Acquire() noexcept {
54 cbuf_cursor = 0; 54 cbuf_cursor = 0;
@@ -57,9 +57,9 @@ public:
57protected: 57protected:
58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
59 59
60 void WriteBarrier() override; 60 GLuint ToHandle(const Buffer& buffer) override;
61 61
62 const GLuint* ToHandle(const Buffer& buffer) override; 62 void WriteBarrier() override;
63 63
64 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 64 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
65 const u8* data) override; 65 const u8* data) override;
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index c286502ba..d83dca25a 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -87,7 +87,7 @@ u32 Extract(u32& base, u32& num, u32 amount, std::optional<GLenum> limit = {}) {
87std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept { 87std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept {
88 std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings; 88 std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings;
89 89
90 static std::array<std::size_t, 5> stage_swizzle = {0, 1, 2, 3, 4}; 90 static constexpr std::array<std::size_t, 5> stage_swizzle{0, 1, 2, 3, 4};
91 const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS); 91 const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS);
92 const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS); 92 const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS);
93 const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS); 93 const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS);
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index f12e9f55f..d7ba57aca 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -94,9 +94,9 @@ CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept
94 : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} 94 : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {}
95 95
96CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept { 96CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept {
97 VideoCommon::CachedQueryBase<HostCounter>::operator=(std::move(rhs));
98 cache = rhs.cache; 97 cache = rhs.cache;
99 type = rhs.type; 98 type = rhs.type;
99 CachedQueryBase<HostCounter>::operator=(std::move(rhs));
100 return *this; 100 return *this;
101} 101}
102 102
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index f4598fbf7..175374f0d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -188,10 +188,8 @@ void RasterizerOpenGL::SetupVertexBuffer() {
188 ASSERT(end > start); 188 ASSERT(end > start);
189 const u64 size = end - start + 1; 189 const u64 size = end - start + 1;
190 const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size); 190 const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
191 191 glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
192 // Bind the vertex array to the buffer at the current offset. 192 vertex_array.stride);
193 vertex_array_pushbuffer.SetVertexBuffer(static_cast<GLuint>(index), vertex_buffer,
194 vertex_buffer_offset, vertex_array.stride);
195 } 193 }
196} 194}
197 195
@@ -222,7 +220,7 @@ GLintptr RasterizerOpenGL::SetupIndexBuffer() {
222 const auto& regs = system.GPU().Maxwell3D().regs; 220 const auto& regs = system.GPU().Maxwell3D().regs;
223 const std::size_t size = CalculateIndexBufferSize(); 221 const std::size_t size = CalculateIndexBufferSize();
224 const auto [buffer, offset] = buffer_cache.UploadMemory(regs.index_array.IndexStart(), size); 222 const auto [buffer, offset] = buffer_cache.UploadMemory(regs.index_array.IndexStart(), size);
225 vertex_array_pushbuffer.SetIndexBuffer(buffer); 223 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
226 return offset; 224 return offset;
227} 225}
228 226
@@ -524,7 +522,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
524 522
525 // Prepare vertex array format. 523 // Prepare vertex array format.
526 SetupVertexFormat(); 524 SetupVertexFormat();
527 vertex_array_pushbuffer.Setup();
528 525
529 // Upload vertex and index data. 526 // Upload vertex and index data.
530 SetupVertexBuffer(); 527 SetupVertexBuffer();
@@ -534,17 +531,13 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
534 index_buffer_offset = SetupIndexBuffer(); 531 index_buffer_offset = SetupIndexBuffer();
535 } 532 }
536 533
537 // Prepare packed bindings.
538 bind_ubo_pushbuffer.Setup();
539 bind_ssbo_pushbuffer.Setup();
540
541 // Setup emulation uniform buffer. 534 // Setup emulation uniform buffer.
542 GLShader::MaxwellUniformData ubo; 535 GLShader::MaxwellUniformData ubo;
543 ubo.SetFromRegs(gpu); 536 ubo.SetFromRegs(gpu);
544 const auto [buffer, offset] = 537 const auto [buffer, offset] =
545 buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment()); 538 buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment());
546 bind_ubo_pushbuffer.Push(EmulationUniformBlockBinding, buffer, offset, 539 glBindBufferRange(GL_UNIFORM_BUFFER, EmulationUniformBlockBinding, buffer, offset,
547 static_cast<GLsizeiptr>(sizeof(ubo))); 540 static_cast<GLsizeiptr>(sizeof(ubo)));
548 541
549 // Setup shaders and their used resources. 542 // Setup shaders and their used resources.
550 texture_cache.GuardSamplers(true); 543 texture_cache.GuardSamplers(true);
@@ -557,11 +550,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
557 // Signal the buffer cache that we are not going to upload more things. 550 // Signal the buffer cache that we are not going to upload more things.
558 buffer_cache.Unmap(); 551 buffer_cache.Unmap();
559 552
560 // Now that we are no longer uploading data, we can safely bind the buffers to OpenGL.
561 vertex_array_pushbuffer.Bind();
562 bind_ubo_pushbuffer.Bind();
563 bind_ssbo_pushbuffer.Bind();
564
565 program_manager.BindGraphicsPipeline(); 553 program_manager.BindGraphicsPipeline();
566 554
567 if (texture_cache.TextureBarrier()) { 555 if (texture_cache.TextureBarrier()) {
@@ -630,17 +618,11 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
630 (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment()); 618 (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment());
631 buffer_cache.Map(buffer_size); 619 buffer_cache.Map(buffer_size);
632 620
633 bind_ubo_pushbuffer.Setup();
634 bind_ssbo_pushbuffer.Setup();
635
636 SetupComputeConstBuffers(kernel); 621 SetupComputeConstBuffers(kernel);
637 SetupComputeGlobalMemory(kernel); 622 SetupComputeGlobalMemory(kernel);
638 623
639 buffer_cache.Unmap(); 624 buffer_cache.Unmap();
640 625
641 bind_ubo_pushbuffer.Bind();
642 bind_ssbo_pushbuffer.Bind();
643
644 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 626 const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
645 glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z); 627 glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z);
646 ++num_queued_commands; 628 ++num_queued_commands;
@@ -771,8 +753,8 @@ void RasterizerOpenGL::SetupConstBuffer(u32 binding, const Tegra::Engines::Const
771 const ConstBufferEntry& entry) { 753 const ConstBufferEntry& entry) {
772 if (!buffer.enabled) { 754 if (!buffer.enabled) {
773 // Set values to zero to unbind buffers 755 // Set values to zero to unbind buffers
774 bind_ubo_pushbuffer.Push(binding, buffer_cache.GetEmptyBuffer(sizeof(float)), 0, 756 glBindBufferRange(GL_UNIFORM_BUFFER, binding, buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
775 sizeof(float)); 757 sizeof(float));
776 return; 758 return;
777 } 759 }
778 760
@@ -783,7 +765,7 @@ void RasterizerOpenGL::SetupConstBuffer(u32 binding, const Tegra::Engines::Const
783 const auto alignment = device.GetUniformBufferAlignment(); 765 const auto alignment = device.GetUniformBufferAlignment();
784 const auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment, false, 766 const auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment, false,
785 device.HasFastBufferSubData()); 767 device.HasFastBufferSubData());
786 bind_ubo_pushbuffer.Push(binding, cbuf, offset, size); 768 glBindBufferRange(GL_UNIFORM_BUFFER, binding, cbuf, offset, size);
787} 769}
788 770
789void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) { 771void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) {
@@ -819,7 +801,8 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
819 const auto alignment{device.GetShaderStorageBufferAlignment()}; 801 const auto alignment{device.GetShaderStorageBufferAlignment()};
820 const auto [ssbo, buffer_offset] = 802 const auto [ssbo, buffer_offset] =
821 buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten()); 803 buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten());
822 bind_ssbo_pushbuffer.Push(binding, ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); 804 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset,
805 static_cast<GLsizeiptr>(size));
823} 806}
824 807
825void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader& shader) { 808void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader& shader) {
@@ -1432,7 +1415,7 @@ void RasterizerOpenGL::EndTransformFeedback() {
1432 const GPUVAddr gpu_addr = binding.Address(); 1415 const GPUVAddr gpu_addr = binding.Address();
1433 const std::size_t size = binding.buffer_size; 1416 const std::size_t size = binding.buffer_size;
1434 const auto [dest_buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); 1417 const auto [dest_buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
1435 glCopyNamedBufferSubData(handle, *dest_buffer, 0, offset, static_cast<GLsizeiptr>(size)); 1418 glCopyNamedBufferSubData(handle, dest_buffer, 0, offset, static_cast<GLsizeiptr>(size));
1436 } 1419 }
1437} 1420}
1438 1421
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 435da4425..caea174d2 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -231,9 +231,7 @@ private:
231 static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024; 231 static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024;
232 OGLBufferCache buffer_cache; 232 OGLBufferCache buffer_cache;
233 233
234 VertexArrayPushBuffer vertex_array_pushbuffer{state_tracker}; 234 GLint vertex_binding = 0;
235 BindBuffersRangePushBuffer bind_ubo_pushbuffer{GL_UNIFORM_BUFFER};
236 BindBuffersRangePushBuffer bind_ssbo_pushbuffer{GL_SHADER_STORAGE_BUFFER};
237 235
238 std::array<OGLBuffer, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers> 236 std::array<OGLBuffer, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers>
239 transform_feedback_buffers; 237 transform_feedback_buffers;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 59bbd1211..9495f48a2 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -835,7 +835,8 @@ private:
835 835
836 void DeclareConstantBuffers() { 836 void DeclareConstantBuffers() {
837 u32 binding = device.GetBaseBindings(stage).uniform_buffer; 837 u32 binding = device.GetBaseBindings(stage).uniform_buffer;
838 for (const auto& [index, cbuf] : ir.GetConstantBuffers()) { 838 for (const auto& buffers : ir.GetConstantBuffers()) {
839 const auto index = buffers.first;
839 code.AddLine("layout (std140, binding = {}) uniform {} {{", binding++, 840 code.AddLine("layout (std140, binding = {}) uniform {} {{", binding++,
840 GetConstBufferBlock(index)); 841 GetConstBufferBlock(index));
841 code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS); 842 code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS);
@@ -2119,8 +2120,14 @@ private:
2119 return {}; 2120 return {};
2120 } 2121 }
2121 return {fmt::format("atomic{}({}, {})", opname, Visit(operation[0]).GetCode(), 2122 return {fmt::format("atomic{}({}, {})", opname, Visit(operation[0]).GetCode(),
2122 Visit(operation[1]).As(type)), 2123 Visit(operation[1]).AsUint()),
2123 type}; 2124 Type::Uint};
2125 }
2126
2127 template <const std::string_view& opname, Type type>
2128 Expression Reduce(Operation operation) {
2129 code.AddLine("{};", Atomic<opname, type>(operation).GetCode());
2130 return {};
2124 } 2131 }
2125 2132
2126 Expression Branch(Operation operation) { 2133 Expression Branch(Operation operation) {
@@ -2479,6 +2486,20 @@ private:
2479 &GLSLDecompiler::Atomic<Func::Or, Type::Int>, 2486 &GLSLDecompiler::Atomic<Func::Or, Type::Int>,
2480 &GLSLDecompiler::Atomic<Func::Xor, Type::Int>, 2487 &GLSLDecompiler::Atomic<Func::Xor, Type::Int>,
2481 2488
2489 &GLSLDecompiler::Reduce<Func::Add, Type::Uint>,
2490 &GLSLDecompiler::Reduce<Func::Min, Type::Uint>,
2491 &GLSLDecompiler::Reduce<Func::Max, Type::Uint>,
2492 &GLSLDecompiler::Reduce<Func::And, Type::Uint>,
2493 &GLSLDecompiler::Reduce<Func::Or, Type::Uint>,
2494 &GLSLDecompiler::Reduce<Func::Xor, Type::Uint>,
2495
2496 &GLSLDecompiler::Reduce<Func::Add, Type::Int>,
2497 &GLSLDecompiler::Reduce<Func::Min, Type::Int>,
2498 &GLSLDecompiler::Reduce<Func::Max, Type::Int>,
2499 &GLSLDecompiler::Reduce<Func::And, Type::Int>,
2500 &GLSLDecompiler::Reduce<Func::Or, Type::Int>,
2501 &GLSLDecompiler::Reduce<Func::Xor, Type::Int>,
2502
2482 &GLSLDecompiler::Branch, 2503 &GLSLDecompiler::Branch,
2483 &GLSLDecompiler::BranchIndirect, 2504 &GLSLDecompiler::BranchIndirect,
2484 &GLSLDecompiler::PushFlowStack, 2505 &GLSLDecompiler::PushFlowStack,
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index f1a28cc21..b2a179746 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -315,8 +315,8 @@ public:
315 315
316RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system, 316RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system,
317 Core::Frontend::GraphicsContext& context) 317 Core::Frontend::GraphicsContext& context)
318 : VideoCore::RendererBase{emu_window}, emu_window{emu_window}, system{system}, 318 : RendererBase{emu_window}, emu_window{emu_window}, system{system}, context{context},
319 frame_mailbox{}, context{context}, has_debug_tool{HasDebugTool()} {} 319 has_debug_tool{HasDebugTool()} {}
320 320
321RendererOpenGL::~RendererOpenGL() = default; 321RendererOpenGL::~RendererOpenGL() = default;
322 322
diff --git a/src/video_core/renderer_opengl/utils.cpp b/src/video_core/renderer_opengl/utils.cpp
index b751086fa..6d7bb16b2 100644
--- a/src/video_core/renderer_opengl/utils.cpp
+++ b/src/video_core/renderer_opengl/utils.cpp
@@ -14,68 +14,6 @@
14 14
15namespace OpenGL { 15namespace OpenGL {
16 16
17struct VertexArrayPushBuffer::Entry {
18 GLuint binding_index{};
19 const GLuint* buffer{};
20 GLintptr offset{};
21 GLsizei stride{};
22};
23
24VertexArrayPushBuffer::VertexArrayPushBuffer(StateTracker& state_tracker)
25 : state_tracker{state_tracker} {}
26
27VertexArrayPushBuffer::~VertexArrayPushBuffer() = default;
28
29void VertexArrayPushBuffer::Setup() {
30 index_buffer = nullptr;
31 vertex_buffers.clear();
32}
33
34void VertexArrayPushBuffer::SetIndexBuffer(const GLuint* buffer) {
35 index_buffer = buffer;
36}
37
38void VertexArrayPushBuffer::SetVertexBuffer(GLuint binding_index, const GLuint* buffer,
39 GLintptr offset, GLsizei stride) {
40 vertex_buffers.push_back(Entry{binding_index, buffer, offset, stride});
41}
42
43void VertexArrayPushBuffer::Bind() {
44 if (index_buffer) {
45 state_tracker.BindIndexBuffer(*index_buffer);
46 }
47
48 for (const auto& entry : vertex_buffers) {
49 glBindVertexBuffer(entry.binding_index, *entry.buffer, entry.offset, entry.stride);
50 }
51}
52
53struct BindBuffersRangePushBuffer::Entry {
54 GLuint binding;
55 const GLuint* buffer;
56 GLintptr offset;
57 GLsizeiptr size;
58};
59
60BindBuffersRangePushBuffer::BindBuffersRangePushBuffer(GLenum target) : target{target} {}
61
62BindBuffersRangePushBuffer::~BindBuffersRangePushBuffer() = default;
63
64void BindBuffersRangePushBuffer::Setup() {
65 entries.clear();
66}
67
68void BindBuffersRangePushBuffer::Push(GLuint binding, const GLuint* buffer, GLintptr offset,
69 GLsizeiptr size) {
70 entries.push_back(Entry{binding, buffer, offset, size});
71}
72
73void BindBuffersRangePushBuffer::Bind() {
74 for (const Entry& entry : entries) {
75 glBindBufferRange(target, entry.binding, *entry.buffer, entry.offset, entry.size);
76 }
77}
78
79void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info) { 17void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info) {
80 if (!GLAD_GL_KHR_debug) { 18 if (!GLAD_GL_KHR_debug) {
81 // We don't need to throw an error as this is just for debugging 19 // We don't need to throw an error as this is just for debugging
diff --git a/src/video_core/renderer_opengl/utils.h b/src/video_core/renderer_opengl/utils.h
index 47ee3177b..9c09ee12c 100644
--- a/src/video_core/renderer_opengl/utils.h
+++ b/src/video_core/renderer_opengl/utils.h
@@ -11,49 +11,6 @@
11 11
12namespace OpenGL { 12namespace OpenGL {
13 13
14class StateTracker;
15
16class VertexArrayPushBuffer final {
17public:
18 explicit VertexArrayPushBuffer(StateTracker& state_tracker);
19 ~VertexArrayPushBuffer();
20
21 void Setup();
22
23 void SetIndexBuffer(const GLuint* buffer);
24
25 void SetVertexBuffer(GLuint binding_index, const GLuint* buffer, GLintptr offset,
26 GLsizei stride);
27
28 void Bind();
29
30private:
31 struct Entry;
32
33 StateTracker& state_tracker;
34
35 const GLuint* index_buffer{};
36 std::vector<Entry> vertex_buffers;
37};
38
39class BindBuffersRangePushBuffer final {
40public:
41 explicit BindBuffersRangePushBuffer(GLenum target);
42 ~BindBuffersRangePushBuffer();
43
44 void Setup();
45
46 void Push(GLuint binding, const GLuint* buffer, GLintptr offset, GLsizeiptr size);
47
48 void Bind();
49
50private:
51 struct Entry;
52
53 GLenum target;
54 std::vector<Entry> entries;
55};
56
57void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info = {}); 14void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info = {});
58 15
59} // namespace OpenGL 16} // namespace OpenGL
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 0d167afbd..81e1de2be 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -74,18 +74,18 @@ Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); 74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size);
75} 75}
76 76
77const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) { 77VkBuffer VKBufferCache::ToHandle(const Buffer& buffer) {
78 return buffer->GetHandle(); 78 return buffer->GetHandle();
79} 79}
80 80
81const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) { 81VkBuffer VKBufferCache::GetEmptyBuffer(std::size_t size) {
82 size = std::max(size, std::size_t(4)); 82 size = std::max(size, std::size_t(4));
83 const auto& empty = staging_pool.GetUnusedBuffer(size, false); 83 const auto& empty = staging_pool.GetUnusedBuffer(size, false);
84 scheduler.RequestOutsideRenderPassOperationContext(); 84 scheduler.RequestOutsideRenderPassOperationContext();
85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { 85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
86 cmdbuf.FillBuffer(buffer, 0, size, 0); 86 cmdbuf.FillBuffer(buffer, 0, size, 0);
87 }); 87 });
88 return empty.handle.address(); 88 return *empty.handle;
89} 89}
90 90
91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
@@ -94,7 +94,7 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
94 std::memcpy(staging.commit->Map(size), data, size); 94 std::memcpy(staging.commit->Map(size), data, size);
95 95
96 scheduler.RequestOutsideRenderPassOperationContext(); 96 scheduler.RequestOutsideRenderPassOperationContext();
97 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 97 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset,
98 size](vk::CommandBuffer cmdbuf) { 98 size](vk::CommandBuffer cmdbuf) {
99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size}); 99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
100 100
@@ -117,7 +117,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
117 u8* data) { 117 u8* data) {
118 const auto& staging = staging_pool.GetUnusedBuffer(size, true); 118 const auto& staging = staging_pool.GetUnusedBuffer(size, true);
119 scheduler.RequestOutsideRenderPassOperationContext(); 119 scheduler.RequestOutsideRenderPassOperationContext();
120 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 120 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset,
121 size](vk::CommandBuffer cmdbuf) { 121 size](vk::CommandBuffer cmdbuf) {
122 VkBufferMemoryBarrier barrier; 122 VkBufferMemoryBarrier barrier;
123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; 123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
@@ -144,7 +144,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
144void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, 144void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
145 std::size_t dst_offset, std::size_t size) { 145 std::size_t dst_offset, std::size_t size) {
146 scheduler.RequestOutsideRenderPassOperationContext(); 146 scheduler.RequestOutsideRenderPassOperationContext();
147 scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset, 147 scheduler.Record([src_buffer = src->GetHandle(), dst_buffer = dst->GetHandle(), src_offset,
148 dst_offset, size](vk::CommandBuffer cmdbuf) { 148 dst_offset, size](vk::CommandBuffer cmdbuf) {
149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size}); 149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
150 150
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index d3c23da98..3cd2e2774 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -33,8 +33,8 @@ public:
33 VAddr cpu_addr, std::size_t size); 33 VAddr cpu_addr, std::size_t size);
34 ~CachedBufferBlock(); 34 ~CachedBufferBlock();
35 35
36 const VkBuffer* GetHandle() const { 36 VkBuffer GetHandle() const {
37 return buffer.handle.address(); 37 return *buffer.handle;
38 } 38 }
39 39
40private: 40private:
@@ -50,15 +50,15 @@ public:
50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool); 50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
51 ~VKBufferCache(); 51 ~VKBufferCache();
52 52
53 const VkBuffer* GetEmptyBuffer(std::size_t size) override; 53 VkBuffer GetEmptyBuffer(std::size_t size) override;
54 54
55protected: 55protected:
56 VkBuffer ToHandle(const Buffer& buffer) override;
57
56 void WriteBarrier() override {} 58 void WriteBarrier() override {}
57 59
58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 60 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
59 61
60 const VkBuffer* ToHandle(const Buffer& buffer) override;
61
62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
63 const u8* data) override; 63 const u8* data) override;
64 64
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 9d92305f4..878a78755 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -343,13 +343,13 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler,
343 343
344QuadArrayPass::~QuadArrayPass() = default; 344QuadArrayPass::~QuadArrayPass() = default;
345 345
346std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { 346std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
347 const u32 num_triangle_vertices = num_vertices * 6 / 4; 347 const u32 num_triangle_vertices = num_vertices * 6 / 4;
348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32); 348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
350 350
351 update_descriptor_queue.Acquire(); 351 update_descriptor_queue.Acquire();
352 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); 352 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
354 354
355 scheduler.RequestOutsideRenderPassOperationContext(); 355 scheduler.RequestOutsideRenderPassOperationContext();
@@ -377,7 +377,7 @@ std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertice
377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); 378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
379 }); 379 });
380 return {buffer.handle.address(), 0}; 380 return {*buffer.handle, 0};
381} 381}
382 382
383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, 383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
@@ -391,14 +391,14 @@ Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
391 391
392Uint8Pass::~Uint8Pass() = default; 392Uint8Pass::~Uint8Pass() = default;
393 393
394std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, 394std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
395 u64 src_offset) { 395 u64 src_offset) {
396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); 396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
398 398
399 update_descriptor_queue.Acquire(); 399 update_descriptor_queue.Acquire();
400 update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices); 400 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
401 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); 401 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
403 403
404 scheduler.RequestOutsideRenderPassOperationContext(); 404 scheduler.RequestOutsideRenderPassOperationContext();
@@ -422,7 +422,7 @@ std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer s
422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
424 }); 424 });
425 return {buffer.handle.address(), 0}; 425 return {*buffer.handle, 0};
426} 426}
427 427
428} // namespace Vulkan 428} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index c62516bff..ec80c8683 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -50,7 +50,7 @@ public:
50 VKUpdateDescriptorQueue& update_descriptor_queue); 50 VKUpdateDescriptorQueue& update_descriptor_queue);
51 ~QuadArrayPass(); 51 ~QuadArrayPass();
52 52
53 std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first); 53 std::pair<VkBuffer, VkDeviceSize> Assemble(u32 num_vertices, u32 first);
54 54
55private: 55private:
56 VKScheduler& scheduler; 56 VKScheduler& scheduler;
@@ -65,7 +65,7 @@ public:
65 VKUpdateDescriptorQueue& update_descriptor_queue); 65 VKUpdateDescriptorQueue& update_descriptor_queue);
66 ~Uint8Pass(); 66 ~Uint8Pass();
67 67
68 std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset); 68 std::pair<VkBuffer, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset);
69 69
70private: 70private:
71 VKScheduler& scheduler; 71 VKScheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 35ee54d30..5b6858e9b 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -32,7 +32,7 @@ public:
32 * memory. When passing false, it will try to allocate device local memory. 32 * memory. When passing false, it will try to allocate device local memory.
33 * @returns A memory commit. 33 * @returns A memory commit.
34 */ 34 */
35 VKMemoryCommit Commit(const VkMemoryRequirements& reqs, bool host_visible); 35 VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
36 36
37 /// Commits memory required by the buffer and binds it. 37 /// Commits memory required by the buffer and binds it.
38 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible); 38 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 774ba1f26..4ca0febb8 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -137,13 +137,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
137 137
138class BufferBindings final { 138class BufferBindings final {
139public: 139public:
140 void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) { 140 void AddVertexBinding(VkBuffer buffer, VkDeviceSize offset) {
141 vertex.buffer_ptrs[vertex.num_buffers] = buffer; 141 vertex.buffers[vertex.num_buffers] = buffer;
142 vertex.offsets[vertex.num_buffers] = offset; 142 vertex.offsets[vertex.num_buffers] = offset;
143 ++vertex.num_buffers; 143 ++vertex.num_buffers;
144 } 144 }
145 145
146 void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) { 146 void SetIndexBinding(VkBuffer buffer, VkDeviceSize offset, VkIndexType type) {
147 index.buffer = buffer; 147 index.buffer = buffer;
148 index.offset = offset; 148 index.offset = offset;
149 index.type = type; 149 index.type = type;
@@ -227,19 +227,19 @@ private:
227 // Some of these fields are intentionally left uninitialized to avoid initializing them twice. 227 // Some of these fields are intentionally left uninitialized to avoid initializing them twice.
228 struct { 228 struct {
229 std::size_t num_buffers = 0; 229 std::size_t num_buffers = 0;
230 std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs; 230 std::array<VkBuffer, Maxwell::NumVertexArrays> buffers;
231 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets; 231 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets;
232 } vertex; 232 } vertex;
233 233
234 struct { 234 struct {
235 const VkBuffer* buffer = nullptr; 235 VkBuffer buffer = nullptr;
236 VkDeviceSize offset; 236 VkDeviceSize offset;
237 VkIndexType type; 237 VkIndexType type;
238 } index; 238 } index;
239 239
240 template <std::size_t N> 240 template <std::size_t N>
241 void BindStatic(VKScheduler& scheduler) const { 241 void BindStatic(VKScheduler& scheduler) const {
242 if (index.buffer != nullptr) { 242 if (index.buffer) {
243 BindStatic<N, true>(scheduler); 243 BindStatic<N, true>(scheduler);
244 } else { 244 } else {
245 BindStatic<N, false>(scheduler); 245 BindStatic<N, false>(scheduler);
@@ -254,18 +254,14 @@ private:
254 } 254 }
255 255
256 std::array<VkBuffer, N> buffers; 256 std::array<VkBuffer, N> buffers;
257 std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(),
258 [](const auto ptr) { return *ptr; });
259
260 std::array<VkDeviceSize, N> offsets; 257 std::array<VkDeviceSize, N> offsets;
258 std::copy(vertex.buffers.begin(), vertex.buffers.begin() + N, buffers.begin());
261 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin()); 259 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin());
262 260
263 if constexpr (is_indexed) { 261 if constexpr (is_indexed) {
264 // Indexed draw 262 // Indexed draw
265 scheduler.Record([buffers, offsets, index_buffer = *index.buffer, 263 scheduler.Record([buffers, offsets, index = index](vk::CommandBuffer cmdbuf) {
266 index_offset = index.offset, 264 cmdbuf.BindIndexBuffer(index.buffer, index.offset, index.type);
267 index_type = index.type](vk::CommandBuffer cmdbuf) {
268 cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type);
269 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data()); 265 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
270 }); 266 });
271 } else { 267 } else {
@@ -790,7 +786,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
790 const std::size_t size = binding.buffer_size; 786 const std::size_t size = binding.buffer_size;
791 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); 787 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
792 788
793 scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) { 789 scheduler.Record([buffer = buffer, offset = offset, size](vk::CommandBuffer cmdbuf) {
794 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size); 790 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size);
795 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); 791 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
796 }); 792 });
@@ -870,7 +866,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
870 auto format = regs.index_array.format; 866 auto format = regs.index_array.format;
871 const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte; 867 const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte;
872 if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) { 868 if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) {
873 std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, *buffer, offset); 869 std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, buffer, offset);
874 format = Maxwell::IndexFormat::UnsignedShort; 870 format = Maxwell::IndexFormat::UnsignedShort;
875 } 871 }
876 872
@@ -1007,8 +1003,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1007 const auto size = memory_manager.Read<u32>(address + 8); 1003 const auto size = memory_manager.Read<u32>(address + 8);
1008 1004
1009 if (size == 0) { 1005 if (size == 0) {
1010 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry because 1006 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry
1011 // Vulkan doesn't like empty buffers. 1007 // because Vulkan doesn't like empty buffers.
1012 constexpr std::size_t dummy_size = 4; 1008 constexpr std::size_t dummy_size = 4;
1013 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size); 1009 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
1014 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size); 1010 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 62e4ca488..aaa138f52 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -1938,11 +1938,8 @@ private:
1938 return {}; 1938 return {};
1939 } 1939 }
1940 1940
1941 template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type, 1941 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
1942 Type value_type = result_type>
1943 Expression Atomic(Operation operation) { 1942 Expression Atomic(Operation operation) {
1944 const Id type_def = GetTypeDefinition(result_type);
1945
1946 Id pointer; 1943 Id pointer;
1947 if (const auto smem = std::get_if<SmemNode>(&*operation[0])) { 1944 if (const auto smem = std::get_if<SmemNode>(&*operation[0])) {
1948 pointer = GetSharedMemoryPointer(*smem); 1945 pointer = GetSharedMemoryPointer(*smem);
@@ -1950,15 +1947,19 @@ private:
1950 pointer = GetGlobalMemoryPointer(*gmem); 1947 pointer = GetGlobalMemoryPointer(*gmem);
1951 } else { 1948 } else {
1952 UNREACHABLE(); 1949 UNREACHABLE();
1953 return {Constant(type_def, 0), result_type}; 1950 return {v_float_zero, Type::Float};
1954 } 1951 }
1955
1956 const Id value = As(Visit(operation[1]), value_type);
1957
1958 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); 1952 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
1959 const Id semantics = Constant(type_def, 0); 1953 const Id semantics = Constant(t_uint, 0);
1954 const Id value = AsUint(Visit(operation[1]));
1955
1956 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
1957 }
1960 1958
1961 return {(this->*func)(type_def, pointer, scope, semantics, value), result_type}; 1959 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
1960 Expression Reduce(Operation operation) {
1961 Atomic<func>(operation);
1962 return {};
1962 } 1963 }
1963 1964
1964 Expression Branch(Operation operation) { 1965 Expression Branch(Operation operation) {
@@ -2547,21 +2548,35 @@ private:
2547 &SPIRVDecompiler::AtomicImageXor, 2548 &SPIRVDecompiler::AtomicImageXor,
2548 &SPIRVDecompiler::AtomicImageExchange, 2549 &SPIRVDecompiler::AtomicImageExchange,
2549 2550
2550 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange, Type::Uint>, 2551 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
2551 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd, Type::Uint>, 2552 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
2552 &SPIRVDecompiler::Atomic<&Module::OpAtomicUMin, Type::Uint>, 2553 &SPIRVDecompiler::Atomic<&Module::OpAtomicUMin>,
2553 &SPIRVDecompiler::Atomic<&Module::OpAtomicUMax, Type::Uint>, 2554 &SPIRVDecompiler::Atomic<&Module::OpAtomicUMax>,
2554 &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd, Type::Uint>, 2555 &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>,
2555 &SPIRVDecompiler::Atomic<&Module::OpAtomicOr, Type::Uint>, 2556 &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>,
2556 &SPIRVDecompiler::Atomic<&Module::OpAtomicXor, Type::Uint>, 2557 &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>,
2557 2558
2558 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange, Type::Int>, 2559 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
2559 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd, Type::Int>, 2560 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
2560 &SPIRVDecompiler::Atomic<&Module::OpAtomicSMin, Type::Int>, 2561 &SPIRVDecompiler::Atomic<&Module::OpAtomicSMin>,
2561 &SPIRVDecompiler::Atomic<&Module::OpAtomicSMax, Type::Int>, 2562 &SPIRVDecompiler::Atomic<&Module::OpAtomicSMax>,
2562 &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd, Type::Int>, 2563 &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>,
2563 &SPIRVDecompiler::Atomic<&Module::OpAtomicOr, Type::Int>, 2564 &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>,
2564 &SPIRVDecompiler::Atomic<&Module::OpAtomicXor, Type::Int>, 2565 &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>,
2566
2567 &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>,
2568 &SPIRVDecompiler::Reduce<&Module::OpAtomicUMin>,
2569 &SPIRVDecompiler::Reduce<&Module::OpAtomicUMax>,
2570 &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>,
2571 &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>,
2572 &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>,
2573
2574 &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>,
2575 &SPIRVDecompiler::Reduce<&Module::OpAtomicSMin>,
2576 &SPIRVDecompiler::Reduce<&Module::OpAtomicSMax>,
2577 &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>,
2578 &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>,
2579 &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>,
2565 2580
2566 &SPIRVDecompiler::Branch, 2581 &SPIRVDecompiler::Branch,
2567 &SPIRVDecompiler::BranchIndirect, 2582 &SPIRVDecompiler::BranchIndirect,
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index 4bfec0077..681ecde98 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -35,12 +35,13 @@ void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template
35 payload.clear(); 35 payload.clear();
36 } 36 }
37 37
38 // TODO(Rodrigo): Rework to write the payload directly
38 const auto payload_start = payload.data() + payload.size(); 39 const auto payload_start = payload.data() + payload.size();
39 for (const auto& entry : entries) { 40 for (const auto& entry : entries) {
40 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) { 41 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) {
41 payload.push_back(*image); 42 payload.push_back(*image);
42 } else if (const auto buffer = std::get_if<Buffer>(&entry)) { 43 } else if (const auto buffer = std::get_if<VkDescriptorBufferInfo>(&entry)) {
43 payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size); 44 payload.push_back(*buffer);
44 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) { 45 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) {
45 payload.push_back(*texel); 46 payload.push_back(*texel);
46 } else { 47 } else {
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index a9e3d5dba..6ba2c9997 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -18,12 +18,11 @@ class VKScheduler;
18 18
19class DescriptorUpdateEntry { 19class DescriptorUpdateEntry {
20public: 20public:
21 explicit DescriptorUpdateEntry() : image{} {} 21 explicit DescriptorUpdateEntry() {}
22 22
23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {} 23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {}
24 24
25 DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size) 25 DescriptorUpdateEntry(VkDescriptorBufferInfo buffer) : buffer{buffer} {}
26 : buffer{buffer, offset, size} {}
27 26
28 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {} 27 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {}
29 28
@@ -54,8 +53,8 @@ public:
54 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}}); 53 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}});
55 } 54 }
56 55
57 void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) { 56 void AddBuffer(VkBuffer buffer, u64 offset, std::size_t size) {
58 entries.push_back(Buffer{buffer, offset, size}); 57 entries.emplace_back(VkDescriptorBufferInfo{buffer, offset, size});
59 } 58 }
60 59
61 void AddTexelBuffer(VkBufferView texel_buffer) { 60 void AddTexelBuffer(VkBufferView texel_buffer) {
@@ -67,12 +66,7 @@ public:
67 } 66 }
68 67
69private: 68private:
70 struct Buffer { 69 using Variant = std::variant<VkDescriptorImageInfo, VkDescriptorBufferInfo, VkBufferView>;
71 const VkBuffer* buffer = nullptr;
72 u64 offset = 0;
73 std::size_t size = 0;
74 };
75 using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>;
76 70
77 const VKDevice& device; 71 const VKDevice& device;
78 VKScheduler& scheduler; 72 VKScheduler& scheduler;
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index 2e2711350..6d313963a 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -484,17 +484,17 @@ bool TryInspectAddress(CFGRebuildState& state) {
484 } 484 }
485 case BlockCollision::Inside: { 485 case BlockCollision::Inside: {
486 // This case is the tricky one: 486 // This case is the tricky one:
487 // We need to Split the block in 2 sepparate blocks 487 // We need to split the block into 2 separate blocks
488 const u32 end = state.block_info[block_index].end; 488 const u32 end = state.block_info[block_index].end;
489 BlockInfo& new_block = CreateBlockInfo(state, address, end); 489 BlockInfo& new_block = CreateBlockInfo(state, address, end);
490 BlockInfo& current_block = state.block_info[block_index]; 490 BlockInfo& current_block = state.block_info[block_index];
491 current_block.end = address - 1; 491 current_block.end = address - 1;
492 new_block.branch = current_block.branch; 492 new_block.branch = std::move(current_block.branch);
493 BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>(); 493 BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>();
494 const auto branch = std::get_if<SingleBranch>(forward_branch.get()); 494 const auto branch = std::get_if<SingleBranch>(forward_branch.get());
495 branch->address = address; 495 branch->address = address;
496 branch->ignore = true; 496 branch->ignore = true;
497 current_block.branch = forward_branch; 497 current_block.branch = std::move(forward_branch);
498 return true; 498 return true;
499 } 499 }
500 default: 500 default:
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index 0dd7a1196..85ee9aa5e 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -352,8 +352,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
352 registry.ObtainBoundSampler(static_cast<u32>(instr.image.index.Value())); 352 registry.ObtainBoundSampler(static_cast<u32>(instr.image.index.Value()));
353 } else { 353 } else {
354 const Node image_register = GetRegister(instr.gpr39); 354 const Node image_register = GetRegister(instr.gpr39);
355 const auto [base_image, buffer, offset] = TrackCbuf( 355 const auto result = TrackCbuf(image_register, global_code,
356 image_register, global_code, static_cast<s64>(global_code.size())); 356 static_cast<s64>(global_code.size()));
357 const auto buffer = std::get<1>(result);
358 const auto offset = std::get<2>(result);
357 descriptor = registry.ObtainBindlessSampler(buffer, offset); 359 descriptor = registry.ObtainBindlessSampler(buffer, offset);
358 } 360 }
359 if (!descriptor) { 361 if (!descriptor) {
@@ -497,9 +499,12 @@ Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType t
497 499
498Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) { 500Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) {
499 const Node image_register = GetRegister(reg); 501 const Node image_register = GetRegister(reg);
500 const auto [base_image, buffer, offset] = 502 const auto result =
501 TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size())); 503 TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()));
502 504
505 const auto buffer = std::get<1>(result);
506 const auto offset = std::get<2>(result);
507
503 const auto it = 508 const auto it =
504 std::find_if(std::begin(used_images), std::end(used_images), 509 std::find_if(std::begin(used_images), std::end(used_images),
505 [buffer = buffer, offset = offset](const Image& entry) { 510 [buffer = buffer, offset = offset](const Image& entry) {
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index b8f63922f..8112ead3e 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -3,7 +3,9 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <utility>
6#include <vector> 7#include <vector>
8
7#include <fmt/format.h> 9#include <fmt/format.h>
8 10
9#include "common/alignment.h" 11#include "common/alignment.h"
@@ -16,6 +18,7 @@
16 18
17namespace VideoCommon::Shader { 19namespace VideoCommon::Shader {
18 20
21using std::move;
19using Tegra::Shader::AtomicOp; 22using Tegra::Shader::AtomicOp;
20using Tegra::Shader::AtomicType; 23using Tegra::Shader::AtomicType;
21using Tegra::Shader::Attribute; 24using Tegra::Shader::Attribute;
@@ -27,29 +30,26 @@ using Tegra::Shader::StoreType;
27 30
28namespace { 31namespace {
29 32
30Node GetAtomOperation(AtomicOp op, bool is_signed, Node memory, Node data) { 33OperationCode GetAtomOperation(AtomicOp op) {
31 const OperationCode operation_code = [op] { 34 switch (op) {
32 switch (op) { 35 case AtomicOp::Add:
33 case AtomicOp::Add: 36 return OperationCode::AtomicIAdd;
34 return OperationCode::AtomicIAdd; 37 case AtomicOp::Min:
35 case AtomicOp::Min: 38 return OperationCode::AtomicIMin;
36 return OperationCode::AtomicIMin; 39 case AtomicOp::Max:
37 case AtomicOp::Max: 40 return OperationCode::AtomicIMax;
38 return OperationCode::AtomicIMax; 41 case AtomicOp::And:
39 case AtomicOp::And: 42 return OperationCode::AtomicIAnd;
40 return OperationCode::AtomicIAnd; 43 case AtomicOp::Or:
41 case AtomicOp::Or: 44 return OperationCode::AtomicIOr;
42 return OperationCode::AtomicIOr; 45 case AtomicOp::Xor:
43 case AtomicOp::Xor: 46 return OperationCode::AtomicIXor;
44 return OperationCode::AtomicIXor; 47 case AtomicOp::Exch:
45 case AtomicOp::Exch: 48 return OperationCode::AtomicIExchange;
46 return OperationCode::AtomicIExchange; 49 default:
47 default: 50 UNIMPLEMENTED_MSG("op={}", static_cast<int>(op));
48 UNIMPLEMENTED_MSG("op={}", static_cast<int>(op)); 51 return OperationCode::AtomicIAdd;
49 return OperationCode::AtomicIAdd; 52 }
50 }
51 }();
52 return SignedOperation(operation_code, is_signed, std::move(memory), std::move(data));
53} 53}
54 54
55bool IsUnaligned(Tegra::Shader::UniformType uniform_type) { 55bool IsUnaligned(Tegra::Shader::UniformType uniform_type) {
@@ -90,23 +90,22 @@ u32 GetMemorySize(Tegra::Shader::UniformType uniform_type) {
90 90
91Node ExtractUnaligned(Node value, Node address, u32 mask, u32 size) { 91Node ExtractUnaligned(Node value, Node address, u32 mask, u32 size) {
92 Node offset = Operation(OperationCode::UBitwiseAnd, address, Immediate(mask)); 92 Node offset = Operation(OperationCode::UBitwiseAnd, address, Immediate(mask));
93 offset = Operation(OperationCode::ULogicalShiftLeft, std::move(offset), Immediate(3)); 93 offset = Operation(OperationCode::ULogicalShiftLeft, move(offset), Immediate(3));
94 return Operation(OperationCode::UBitfieldExtract, std::move(value), std::move(offset), 94 return Operation(OperationCode::UBitfieldExtract, move(value), move(offset), Immediate(size));
95 Immediate(size));
96} 95}
97 96
98Node InsertUnaligned(Node dest, Node value, Node address, u32 mask, u32 size) { 97Node InsertUnaligned(Node dest, Node value, Node address, u32 mask, u32 size) {
99 Node offset = Operation(OperationCode::UBitwiseAnd, std::move(address), Immediate(mask)); 98 Node offset = Operation(OperationCode::UBitwiseAnd, move(address), Immediate(mask));
100 offset = Operation(OperationCode::ULogicalShiftLeft, std::move(offset), Immediate(3)); 99 offset = Operation(OperationCode::ULogicalShiftLeft, move(offset), Immediate(3));
101 return Operation(OperationCode::UBitfieldInsert, std::move(dest), std::move(value), 100 return Operation(OperationCode::UBitfieldInsert, move(dest), move(value), move(offset),
102 std::move(offset), Immediate(size)); 101 Immediate(size));
103} 102}
104 103
105Node Sign16Extend(Node value) { 104Node Sign16Extend(Node value) {
106 Node sign = Operation(OperationCode::UBitwiseAnd, value, Immediate(1U << 15)); 105 Node sign = Operation(OperationCode::UBitwiseAnd, value, Immediate(1U << 15));
107 Node is_sign = Operation(OperationCode::LogicalUEqual, std::move(sign), Immediate(1U << 15)); 106 Node is_sign = Operation(OperationCode::LogicalUEqual, move(sign), Immediate(1U << 15));
108 Node extend = Operation(OperationCode::Select, is_sign, Immediate(0xFFFF0000), Immediate(0)); 107 Node extend = Operation(OperationCode::Select, is_sign, Immediate(0xFFFF0000), Immediate(0));
109 return Operation(OperationCode::UBitwiseOr, std::move(value), std::move(extend)); 108 return Operation(OperationCode::UBitwiseOr, move(value), move(extend));
110} 109}
111 110
112} // Anonymous namespace 111} // Anonymous namespace
@@ -379,20 +378,36 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
379 378
380 if (IsUnaligned(type)) { 379 if (IsUnaligned(type)) {
381 const u32 mask = GetUnalignedMask(type); 380 const u32 mask = GetUnalignedMask(type);
382 value = InsertUnaligned(gmem, std::move(value), real_address, mask, size); 381 value = InsertUnaligned(gmem, move(value), real_address, mask, size);
383 } 382 }
384 383
385 bb.push_back(Operation(OperationCode::Assign, gmem, value)); 384 bb.push_back(Operation(OperationCode::Assign, gmem, value));
386 } 385 }
387 break; 386 break;
388 } 387 }
388 case OpCode::Id::RED: {
389 UNIMPLEMENTED_IF_MSG(instr.red.type != GlobalAtomicType::U32);
390 UNIMPLEMENTED_IF_MSG(instr.red.operation != AtomicOp::Add);
391 const auto [real_address, base_address, descriptor] =
392 TrackGlobalMemory(bb, instr, true, true);
393 if (!real_address || !base_address) {
394 // Tracking failed, skip atomic.
395 break;
396 }
397 Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
398 Node value = GetRegister(instr.gpr0);
399 bb.push_back(Operation(OperationCode::ReduceIAdd, move(gmem), move(value)));
400 break;
401 }
389 case OpCode::Id::ATOM: { 402 case OpCode::Id::ATOM: {
390 UNIMPLEMENTED_IF_MSG(instr.atom.operation == AtomicOp::Inc || 403 UNIMPLEMENTED_IF_MSG(instr.atom.operation == AtomicOp::Inc ||
391 instr.atom.operation == AtomicOp::Dec || 404 instr.atom.operation == AtomicOp::Dec ||
392 instr.atom.operation == AtomicOp::SafeAdd, 405 instr.atom.operation == AtomicOp::SafeAdd,
393 "operation={}", static_cast<int>(instr.atom.operation.Value())); 406 "operation={}", static_cast<int>(instr.atom.operation.Value()));
394 UNIMPLEMENTED_IF_MSG(instr.atom.type == GlobalAtomicType::S64 || 407 UNIMPLEMENTED_IF_MSG(instr.atom.type == GlobalAtomicType::S64 ||
395 instr.atom.type == GlobalAtomicType::U64, 408 instr.atom.type == GlobalAtomicType::U64 ||
409 instr.atom.type == GlobalAtomicType::F16x2_FTZ_RN ||
410 instr.atom.type == GlobalAtomicType::F32_FTZ_RN,
396 "type={}", static_cast<int>(instr.atom.type.Value())); 411 "type={}", static_cast<int>(instr.atom.type.Value()));
397 412
398 const auto [real_address, base_address, descriptor] = 413 const auto [real_address, base_address, descriptor] =
@@ -403,11 +418,11 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
403 } 418 }
404 419
405 const bool is_signed = 420 const bool is_signed =
406 instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64; 421 instr.atom.type == GlobalAtomicType::S32 || instr.atom.type == GlobalAtomicType::S64;
407 Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); 422 Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
408 Node value = GetAtomOperation(static_cast<AtomicOp>(instr.atom.operation), is_signed, gmem, 423 SetRegister(bb, instr.gpr0,
409 GetRegister(instr.gpr20)); 424 SignedOperation(GetAtomOperation(instr.atom.operation), is_signed, gmem,
410 SetRegister(bb, instr.gpr0, std::move(value)); 425 GetRegister(instr.gpr20)));
411 break; 426 break;
412 } 427 }
413 case OpCode::Id::ATOMS: { 428 case OpCode::Id::ATOMS: {
@@ -421,11 +436,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
421 instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64; 436 instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64;
422 const s32 offset = instr.atoms.GetImmediateOffset(); 437 const s32 offset = instr.atoms.GetImmediateOffset();
423 Node address = GetRegister(instr.gpr8); 438 Node address = GetRegister(instr.gpr8);
424 address = Operation(OperationCode::IAdd, std::move(address), Immediate(offset)); 439 address = Operation(OperationCode::IAdd, move(address), Immediate(offset));
425 Node value = 440 SetRegister(bb, instr.gpr0,
426 GetAtomOperation(static_cast<AtomicOp>(instr.atoms.operation), is_signed, 441 SignedOperation(GetAtomOperation(instr.atoms.operation), is_signed,
427 GetSharedMemory(std::move(address)), GetRegister(instr.gpr20)); 442 GetSharedMemory(move(address)), GetRegister(instr.gpr20)));
428 SetRegister(bb, instr.gpr0, std::move(value));
429 break; 443 break;
430 } 444 }
431 case OpCode::Id::AL2P: { 445 case OpCode::Id::AL2P: {
diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp
index 3b391d3e6..d4ffa8014 100644
--- a/src/video_core/shader/decode/shift.cpp
+++ b/src/video_core/shader/decode/shift.cpp
@@ -23,7 +23,6 @@ Node IsFull(Node shift) {
23} 23}
24 24
25Node Shift(OperationCode opcode, Node value, Node shift) { 25Node Shift(OperationCode opcode, Node value, Node shift) {
26 Node is_full = Operation(OperationCode::LogicalIEqual, shift, Immediate(32));
27 Node shifted = Operation(opcode, move(value), shift); 26 Node shifted = Operation(opcode, move(value), shift);
28 return Operation(OperationCode::Select, IsFull(move(shift)), Immediate(0), move(shifted)); 27 return Operation(OperationCode::Select, IsFull(move(shift)), Immediate(0), move(shifted));
29} 28}
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 5fcc9da60..3eee961f5 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -178,6 +178,20 @@ enum class OperationCode {
178 AtomicIOr, /// (memory, int) -> int 178 AtomicIOr, /// (memory, int) -> int
179 AtomicIXor, /// (memory, int) -> int 179 AtomicIXor, /// (memory, int) -> int
180 180
181 ReduceUAdd, /// (memory, uint) -> void
182 ReduceUMin, /// (memory, uint) -> void
183 ReduceUMax, /// (memory, uint) -> void
184 ReduceUAnd, /// (memory, uint) -> void
185 ReduceUOr, /// (memory, uint) -> void
186 ReduceUXor, /// (memory, uint) -> void
187
188 ReduceIAdd, /// (memory, int) -> void
189 ReduceIMin, /// (memory, int) -> void
190 ReduceIMax, /// (memory, int) -> void
191 ReduceIAnd, /// (memory, int) -> void
192 ReduceIOr, /// (memory, int) -> void
193 ReduceIXor, /// (memory, int) -> void
194
181 Branch, /// (uint branch_target) -> void 195 Branch, /// (uint branch_target) -> void
182 BranchIndirect, /// (uint branch_target) -> void 196 BranchIndirect, /// (uint branch_target) -> void
183 PushFlowStack, /// (uint branch_target) -> void 197 PushFlowStack, /// (uint branch_target) -> void
diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp
index 8852c8a1b..822674926 100644
--- a/src/video_core/shader/shader_ir.cpp
+++ b/src/video_core/shader/shader_ir.cpp
@@ -56,8 +56,7 @@ Node ShaderIR::GetConstBuffer(u64 index_, u64 offset_) {
56 const auto index = static_cast<u32>(index_); 56 const auto index = static_cast<u32>(index_);
57 const auto offset = static_cast<u32>(offset_); 57 const auto offset = static_cast<u32>(offset_);
58 58
59 const auto [entry, is_new] = used_cbufs.try_emplace(index); 59 used_cbufs.try_emplace(index).first->second.MarkAsUsed(offset);
60 entry->second.MarkAsUsed(offset);
61 60
62 return MakeNode<CbufNode>(index, Immediate(offset)); 61 return MakeNode<CbufNode>(index, Immediate(offset));
63} 62}
@@ -66,8 +65,7 @@ Node ShaderIR::GetConstBufferIndirect(u64 index_, u64 offset_, Node node) {
66 const auto index = static_cast<u32>(index_); 65 const auto index = static_cast<u32>(index_);
67 const auto offset = static_cast<u32>(offset_); 66 const auto offset = static_cast<u32>(offset_);
68 67
69 const auto [entry, is_new] = used_cbufs.try_emplace(index); 68 used_cbufs.try_emplace(index).first->second.MarkAsUsedIndirect();
70 entry->second.MarkAsUsedIndirect();
71 69
72 Node final_offset = [&] { 70 Node final_offset = [&] {
73 // Attempt to inline constant buffer without a variable offset. This is done to allow 71 // Attempt to inline constant buffer without a variable offset. This is done to allow
@@ -166,6 +164,7 @@ Node ShaderIR::ConvertIntegerSize(Node value, Register::Size size, bool is_signe
166 std::move(value), Immediate(16)); 164 std::move(value), Immediate(16));
167 value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, 165 value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE,
168 std::move(value), Immediate(16)); 166 std::move(value), Immediate(16));
167 return value;
169 case Register::Size::Word: 168 case Register::Size::Word:
170 // Default - do nothing 169 // Default - do nothing
171 return value; 170 return value;
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index 10739b37d..224943ad9 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -27,8 +27,9 @@ std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor,
27 27
28 if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { 28 if (const auto conditional = std::get_if<ConditionalNode>(&*node)) {
29 const auto& conditional_code = conditional->GetCode(); 29 const auto& conditional_code = conditional->GetCode();
30 auto [found, internal_cursor] = FindOperation( 30 auto result = FindOperation(
31 conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code); 31 conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code);
32 auto& found = result.first;
32 if (found) { 33 if (found) {
33 return {std::move(found), cursor}; 34 return {std::move(found), cursor};
34 } 35 }
@@ -186,8 +187,8 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co
186std::optional<u32> ShaderIR::TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const { 187std::optional<u32> ShaderIR::TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const {
187 // Reduce the cursor in one to avoid infinite loops when the instruction sets the same register 188 // Reduce the cursor in one to avoid infinite loops when the instruction sets the same register
188 // that it uses as operand 189 // that it uses as operand
189 const auto [found, found_cursor] = 190 const auto result = TrackRegister(&std::get<GprNode>(*tracked), code, cursor - 1);
190 TrackRegister(&std::get<GprNode>(*tracked), code, cursor - 1); 191 const auto& found = result.first;
191 if (!found) { 192 if (!found) {
192 return {}; 193 return {};
193 } 194 }
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 7af0e792c..715f39d0d 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -248,8 +248,14 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
248 248
249 // Use an extra temporal buffer 249 // Use an extra temporal buffer
250 auto& tmp_buffer = staging_cache.GetBuffer(1); 250 auto& tmp_buffer = staging_cache.GetBuffer(1);
251 // Special case for 3D Texture Segments
252 const bool must_read_current_data =
253 params.block_depth > 0 && params.target == VideoCore::Surface::SurfaceTarget::Texture2D;
251 tmp_buffer.resize(guest_memory_size); 254 tmp_buffer.resize(guest_memory_size);
252 host_ptr = tmp_buffer.data(); 255 host_ptr = tmp_buffer.data();
256 if (must_read_current_data) {
257 memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
258 }
253 259
254 if (params.is_tiled) { 260 if (params.is_tiled) {
255 ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width); 261 ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index a39a8661b..c5ab21f56 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -72,9 +72,9 @@ public:
72 return (cpu_addr < end) && (cpu_addr_end > start); 72 return (cpu_addr < end) && (cpu_addr_end > start);
73 } 73 }
74 74
75 bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) { 75 bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) const {
76 const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size; 76 const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size;
77 return (gpu_addr <= other_start && other_end <= gpu_addr_end); 77 return gpu_addr <= other_start && other_end <= gpu_addr_end;
78 } 78 }
79 79
80 // Use only when recycling a surface 80 // Use only when recycling a surface
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 6f3ef45be..0de499946 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -167,7 +167,6 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
167 167
168SurfaceParams SurfaceParams::CreateForDepthBuffer(Core::System& system) { 168SurfaceParams SurfaceParams::CreateForDepthBuffer(Core::System& system) {
169 const auto& regs = system.GPU().Maxwell3D().regs; 169 const auto& regs = system.GPU().Maxwell3D().regs;
170 regs.zeta_width, regs.zeta_height, regs.zeta.format, regs.zeta.memory_layout.type;
171 SurfaceParams params; 170 SurfaceParams params;
172 params.is_tiled = regs.zeta.memory_layout.type == 171 params.is_tiled = regs.zeta.memory_layout.type ==
173 Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear; 172 Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp
index 57a1f5803..6b5f5984b 100644
--- a/src/video_core/texture_cache/surface_view.cpp
+++ b/src/video_core/texture_cache/surface_view.cpp
@@ -20,4 +20,8 @@ bool ViewParams::operator==(const ViewParams& rhs) const {
20 std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target); 20 std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target);
21} 21}
22 22
23bool ViewParams::operator!=(const ViewParams& rhs) const {
24 return !operator==(rhs);
25}
26
23} // namespace VideoCommon 27} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h
index b17fd11a9..90a8bb0ae 100644
--- a/src/video_core/texture_cache/surface_view.h
+++ b/src/video_core/texture_cache/surface_view.h
@@ -21,6 +21,7 @@ struct ViewParams {
21 std::size_t Hash() const; 21 std::size_t Hash() const;
22 22
23 bool operator==(const ViewParams& rhs) const; 23 bool operator==(const ViewParams& rhs) const;
24 bool operator!=(const ViewParams& rhs) const;
24 25
25 bool IsLayered() const { 26 bool IsLayered() const {
26 switch (target) { 27 switch (target) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 4edd4313b..69ca08fd1 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -614,10 +614,10 @@ private:
614 * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of 614 * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of
615 * the HLE methods. 615 * the HLE methods.
616 * 616 *
617 * @param overlaps The overlapping surfaces registered in the cache. 617 * @param overlaps The overlapping surfaces registered in the cache.
618 * @param params The parameters on the new surface. 618 * @param params The parameters on the new surface.
619 * @param gpu_addr The starting address of the new surface. 619 * @param gpu_addr The starting address of the new surface.
620 * @param cache_addr The starting address of the new surface on physical memory. 620 * @param cpu_addr The starting address of the new surface on physical memory.
621 */ 621 */
622 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, 622 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
623 const SurfaceParams& params, 623 const SurfaceParams& params,
@@ -647,7 +647,8 @@ private:
647 break; 647 break;
648 } 648 }
649 const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr); 649 const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr);
650 const auto [x, y, z] = params.GetBlockOffsetXYZ(offset); 650 const auto offsets = params.GetBlockOffsetXYZ(offset);
651 const auto z = std::get<2>(offsets);
651 modified |= surface->IsModified(); 652 modified |= surface->IsModified();
652 const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height, 653 const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height,
653 1); 654 1);
diff --git a/src/web_service/CMakeLists.txt b/src/web_service/CMakeLists.txt
index 01f2d129d..0c9bb0d55 100644
--- a/src/web_service/CMakeLists.txt
+++ b/src/web_service/CMakeLists.txt
@@ -8,9 +8,4 @@ add_library(web_service STATIC
8) 8)
9 9
10create_target_directory_groups(web_service) 10create_target_directory_groups(web_service)
11 11target_link_libraries(web_service PRIVATE common json-headers httplib lurlparser)
12get_directory_property(OPENSSL_LIBS
13 DIRECTORY ${PROJECT_SOURCE_DIR}/externals/libressl
14 DEFINITION OPENSSL_LIBS)
15target_compile_definitions(web_service PRIVATE -DCPPHTTPLIB_OPENSSL_SUPPORT)
16target_link_libraries(web_service PRIVATE common json-headers ${OPENSSL_LIBS} httplib lurlparser)
diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp
index 737ffe409..09d1651ac 100644
--- a/src/web_service/web_backend.cpp
+++ b/src/web_service/web_backend.cpp
@@ -43,7 +43,7 @@ struct Client::Impl {
43 if (jwt.empty() && !allow_anonymous) { 43 if (jwt.empty() && !allow_anonymous) {
44 LOG_ERROR(WebService, "Credentials must be provided for authenticated requests"); 44 LOG_ERROR(WebService, "Credentials must be provided for authenticated requests");
45 return Common::WebResult{Common::WebResult::Code::CredentialsMissing, 45 return Common::WebResult{Common::WebResult::Code::CredentialsMissing,
46 "Credentials needed"}; 46 "Credentials needed", ""};
47 } 47 }
48 48
49 auto result = GenericRequest(method, path, data, accept, jwt); 49 auto result = GenericRequest(method, path, data, accept, jwt);
@@ -81,12 +81,12 @@ struct Client::Impl {
81 cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port); 81 cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port);
82 } else { 82 } else {
83 LOG_ERROR(WebService, "Bad URL scheme {}", parsedUrl.m_Scheme); 83 LOG_ERROR(WebService, "Bad URL scheme {}", parsedUrl.m_Scheme);
84 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme"}; 84 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme", ""};
85 } 85 }
86 } 86 }
87 if (cli == nullptr) { 87 if (cli == nullptr) {
88 LOG_ERROR(WebService, "Invalid URL {}", host + path); 88 LOG_ERROR(WebService, "Invalid URL {}", host + path);
89 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Invalid URL"}; 89 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Invalid URL", ""};
90 } 90 }
91 cli->set_timeout_sec(TIMEOUT_SECONDS); 91 cli->set_timeout_sec(TIMEOUT_SECONDS);
92 92
@@ -118,27 +118,27 @@ struct Client::Impl {
118 118
119 if (!cli->send(request, response)) { 119 if (!cli->send(request, response)) {
120 LOG_ERROR(WebService, "{} to {} returned null", method, host + path); 120 LOG_ERROR(WebService, "{} to {} returned null", method, host + path);
121 return Common::WebResult{Common::WebResult::Code::LibError, "Null response"}; 121 return Common::WebResult{Common::WebResult::Code::LibError, "Null response", ""};
122 } 122 }
123 123
124 if (response.status >= 400) { 124 if (response.status >= 400) {
125 LOG_ERROR(WebService, "{} to {} returned error status code: {}", method, host + path, 125 LOG_ERROR(WebService, "{} to {} returned error status code: {}", method, host + path,
126 response.status); 126 response.status);
127 return Common::WebResult{Common::WebResult::Code::HttpError, 127 return Common::WebResult{Common::WebResult::Code::HttpError,
128 std::to_string(response.status)}; 128 std::to_string(response.status), ""};
129 } 129 }
130 130
131 auto content_type = response.headers.find("content-type"); 131 auto content_type = response.headers.find("content-type");
132 132
133 if (content_type == response.headers.end()) { 133 if (content_type == response.headers.end()) {
134 LOG_ERROR(WebService, "{} to {} returned no content", method, host + path); 134 LOG_ERROR(WebService, "{} to {} returned no content", method, host + path);
135 return Common::WebResult{Common::WebResult::Code::WrongContent, ""}; 135 return Common::WebResult{Common::WebResult::Code::WrongContent, "", ""};
136 } 136 }
137 137
138 if (content_type->second.find(accept) == std::string::npos) { 138 if (content_type->second.find(accept) == std::string::npos) {
139 LOG_ERROR(WebService, "{} to {} returned wrong content: {}", method, host + path, 139 LOG_ERROR(WebService, "{} to {} returned wrong content: {}", method, host + path,
140 content_type->second); 140 content_type->second);
141 return Common::WebResult{Common::WebResult::Code::WrongContent, "Wrong content"}; 141 return Common::WebResult{Common::WebResult::Code::WrongContent, "Wrong content", ""};
142 } 142 }
143 return Common::WebResult{Common::WebResult::Code::Success, "", response.body}; 143 return Common::WebResult{Common::WebResult::Code::Success, "", response.body};
144 } 144 }
diff --git a/src/yuzu/debugger/profiler.cpp b/src/yuzu/debugger/profiler.cpp
index f594ef076..53049ffd6 100644
--- a/src/yuzu/debugger/profiler.cpp
+++ b/src/yuzu/debugger/profiler.cpp
@@ -51,7 +51,8 @@ MicroProfileDialog::MicroProfileDialog(QWidget* parent) : QWidget(parent, Qt::Di
51 setWindowTitle(tr("MicroProfile")); 51 setWindowTitle(tr("MicroProfile"));
52 resize(1000, 600); 52 resize(1000, 600);
53 // Remove the "?" button from the titlebar and enable the maximize button 53 // Remove the "?" button from the titlebar and enable the maximize button
54 setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint | Qt::WindowMaximizeButtonHint); 54 setWindowFlags((windowFlags() & ~Qt::WindowContextHelpButtonHint) |
55 Qt::WindowMaximizeButtonHint);
55 56
56#if MICROPROFILE_ENABLED 57#if MICROPROFILE_ENABLED
57 58
diff --git a/src/yuzu/game_list_worker.cpp b/src/yuzu/game_list_worker.cpp
index da2c27aa2..2018150db 100644
--- a/src/yuzu/game_list_worker.cpp
+++ b/src/yuzu/game_list_worker.cpp
@@ -91,7 +91,8 @@ std::pair<std::vector<u8>, std::string> GetGameListCachedObject(
91 return generator(); 91 return generator();
92 } 92 }
93 93
94 if (file1.write(reinterpret_cast<const char*>(icon.data()), icon.size()) != icon.size()) { 94 if (file1.write(reinterpret_cast<const char*>(icon.data()), icon.size()) !=
95 s64(icon.size())) {
95 LOG_ERROR(Frontend, "Failed to write data to cache file."); 96 LOG_ERROR(Frontend, "Failed to write data to cache file.");
96 return generator(); 97 return generator();
97 } 98 }
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 1717e06f9..2c8eb481d 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1019,9 +1019,9 @@ void GMainWindow::BootGame(const QString& filename) {
1019 std::string title_name; 1019 std::string title_name;
1020 const auto res = Core::System::GetInstance().GetGameName(title_name); 1020 const auto res = Core::System::GetInstance().GetGameName(title_name);
1021 if (res != Loader::ResultStatus::Success) { 1021 if (res != Loader::ResultStatus::Success) {
1022 const auto [nacp, icon_file] = FileSys::PatchManager(title_id).GetControlMetadata(); 1022 const auto metadata = FileSys::PatchManager(title_id).GetControlMetadata();
1023 if (nacp != nullptr) 1023 if (metadata.first != nullptr)
1024 title_name = nacp->GetApplicationName(); 1024 title_name = metadata.first->GetApplicationName();
1025 1025
1026 if (title_name.empty()) 1026 if (title_name.empty())
1027 title_name = FileUtil::GetFilename(filename.toStdString()); 1027 title_name = FileUtil::GetFilename(filename.toStdString());
@@ -1628,7 +1628,7 @@ void GMainWindow::OnMenuInstallToNAND() {
1628 } 1628 }
1629 1629
1630 FileSys::InstallResult res; 1630 FileSys::InstallResult res;
1631 if (index >= static_cast<size_t>(FileSys::TitleType::Application)) { 1631 if (index >= static_cast<s32>(FileSys::TitleType::Application)) {
1632 res = Core::System::GetInstance() 1632 res = Core::System::GetInstance()
1633 .GetFileSystemController() 1633 .GetFileSystemController()
1634 .GetUserNANDContents() 1634 .GetUserNANDContents()