summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/svc.cpp51
-rw-r--r--src/core/hle/kernel/vm_manager.cpp103
-rw-r--r--src/core/hle/kernel/vm_manager.h139
3 files changed, 279 insertions, 14 deletions
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 348a22904..c826dfd96 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -254,11 +254,52 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) {
254 return vm_manager.ReprotectRange(addr, size, converted_permissions); 254 return vm_manager.ReprotectRange(addr, size, converted_permissions);
255} 255}
256 256
257static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) { 257static ResultCode SetMemoryAttribute(VAddr address, u64 size, u32 mask, u32 attribute) {
258 LOG_WARNING(Kernel_SVC, 258 LOG_DEBUG(Kernel_SVC,
259 "(STUBBED) called, addr=0x{:X}, size=0x{:X}, state0=0x{:X}, state1=0x{:X}", addr, 259 "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
260 size, state0, state1); 260 size, mask, attribute);
261 return RESULT_SUCCESS; 261
262 if (!Common::Is4KBAligned(address)) {
263 LOG_ERROR(Kernel_SVC, "Address not page aligned (0x{:016X})", address);
264 return ERR_INVALID_ADDRESS;
265 }
266
267 if (size == 0 || !Common::Is4KBAligned(size)) {
268 LOG_ERROR(Kernel_SVC, "Invalid size (0x{:X}). Size must be non-zero and page aligned.",
269 size);
270 return ERR_INVALID_ADDRESS;
271 }
272
273 if (!IsValidAddressRange(address, size)) {
274 LOG_ERROR(Kernel_SVC, "Address range overflowed (Address: 0x{:016X}, Size: 0x{:016X})",
275 address, size);
276 return ERR_INVALID_ADDRESS_STATE;
277 }
278
279 const auto mem_attribute = static_cast<MemoryAttribute>(attribute);
280 const auto mem_mask = static_cast<MemoryAttribute>(mask);
281 const auto attribute_with_mask = mem_attribute | mem_mask;
282
283 if (attribute_with_mask != mem_mask) {
284 LOG_ERROR(Kernel_SVC,
285 "Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
286 attribute, mask);
287 return ERR_INVALID_COMBINATION;
288 }
289
290 if ((attribute_with_mask | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) {
291 LOG_ERROR(Kernel_SVC, "Specified attribute isn't equal to MemoryAttributeUncached (8).");
292 return ERR_INVALID_COMBINATION;
293 }
294
295 auto& vm_manager = Core::CurrentProcess()->VMManager();
296 if (!IsInsideAddressSpace(vm_manager, address, size)) {
297 LOG_ERROR(Kernel_SVC,
298 "Given address (0x{:016X}) is outside the bounds of the address space.", address);
299 return ERR_INVALID_ADDRESS_STATE;
300 }
301
302 return vm_manager.SetMemoryAttribute(address, size, mem_mask, mem_attribute);
262} 303}
263 304
264/// Maps a memory range into a different range. 305/// Maps a memory range into a different range.
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index d3b55a51e..f39e096ca 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -37,7 +37,7 @@ static const char* GetMemoryStateName(MemoryState state) {
37 37
38bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { 38bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
39 ASSERT(base + size == next.base); 39 ASSERT(base + size == next.base);
40 if (permissions != next.permissions || meminfo_state != next.meminfo_state || 40 if (permissions != next.permissions || state != next.state || attribute != next.attribute ||
41 type != next.type) { 41 type != next.type) {
42 return false; 42 return false;
43 } 43 }
@@ -115,7 +115,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
115 115
116 final_vma.type = VMAType::AllocatedMemoryBlock; 116 final_vma.type = VMAType::AllocatedMemoryBlock;
117 final_vma.permissions = VMAPermission::ReadWrite; 117 final_vma.permissions = VMAPermission::ReadWrite;
118 final_vma.meminfo_state = state; 118 final_vma.state = state;
119 final_vma.backing_block = std::move(block); 119 final_vma.backing_block = std::move(block);
120 final_vma.offset = offset; 120 final_vma.offset = offset;
121 UpdatePageTableForVMA(final_vma); 121 UpdatePageTableForVMA(final_vma);
@@ -140,7 +140,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
140 140
141 final_vma.type = VMAType::BackingMemory; 141 final_vma.type = VMAType::BackingMemory;
142 final_vma.permissions = VMAPermission::ReadWrite; 142 final_vma.permissions = VMAPermission::ReadWrite;
143 final_vma.meminfo_state = state; 143 final_vma.state = state;
144 final_vma.backing_memory = memory; 144 final_vma.backing_memory = memory;
145 UpdatePageTableForVMA(final_vma); 145 UpdatePageTableForVMA(final_vma);
146 146
@@ -177,7 +177,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u6
177 177
178 final_vma.type = VMAType::MMIO; 178 final_vma.type = VMAType::MMIO;
179 final_vma.permissions = VMAPermission::ReadWrite; 179 final_vma.permissions = VMAPermission::ReadWrite;
180 final_vma.meminfo_state = state; 180 final_vma.state = state;
181 final_vma.paddr = paddr; 181 final_vma.paddr = paddr;
182 final_vma.mmio_handler = std::move(mmio_handler); 182 final_vma.mmio_handler = std::move(mmio_handler);
183 UpdatePageTableForVMA(final_vma); 183 UpdatePageTableForVMA(final_vma);
@@ -189,7 +189,7 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
189 VirtualMemoryArea& vma = vma_handle->second; 189 VirtualMemoryArea& vma = vma_handle->second;
190 vma.type = VMAType::Free; 190 vma.type = VMAType::Free;
191 vma.permissions = VMAPermission::None; 191 vma.permissions = VMAPermission::None;
192 vma.meminfo_state = MemoryState::Unmapped; 192 vma.state = MemoryState::Unmapped;
193 193
194 vma.backing_block = nullptr; 194 vma.backing_block = nullptr;
195 vma.offset = 0; 195 vma.offset = 0;
@@ -308,9 +308,10 @@ MemoryInfo VMManager::QueryMemory(VAddr address) const {
308 308
309 if (IsValidHandle(vma)) { 309 if (IsValidHandle(vma)) {
310 memory_info.base_address = vma->second.base; 310 memory_info.base_address = vma->second.base;
311 memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute);
311 memory_info.permission = static_cast<u32>(vma->second.permissions); 312 memory_info.permission = static_cast<u32>(vma->second.permissions);
312 memory_info.size = vma->second.size; 313 memory_info.size = vma->second.size;
313 memory_info.state = ToSvcMemoryState(vma->second.meminfo_state); 314 memory_info.state = ToSvcMemoryState(vma->second.state);
314 } else { 315 } else {
315 memory_info.base_address = address_space_end; 316 memory_info.base_address = address_space_end;
316 memory_info.permission = static_cast<u32>(VMAPermission::None); 317 memory_info.permission = static_cast<u32>(VMAPermission::None);
@@ -321,6 +322,34 @@ MemoryInfo VMManager::QueryMemory(VAddr address) const {
321 return memory_info; 322 return memory_info;
322} 323}
323 324
325ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask,
326 MemoryAttribute attribute) {
327 constexpr auto ignore_mask = MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped;
328 constexpr auto attribute_mask = ~ignore_mask;
329
330 const auto result = CheckRangeState(
331 address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None,
332 VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask);
333
334 if (result.Failed()) {
335 return result.Code();
336 }
337
338 const auto [prev_state, prev_permissions, prev_attributes] = *result;
339 const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute);
340
341 const auto carve_result = CarveVMARange(address, size);
342 if (carve_result.Failed()) {
343 return carve_result.Code();
344 }
345
346 auto vma_iter = *carve_result;
347 vma_iter->second.attribute = new_attribute;
348
349 MergeAdjacent(vma_iter);
350 return RESULT_SUCCESS;
351}
352
324ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) { 353ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) {
325 const auto vma = FindVMA(src_addr); 354 const auto vma = FindVMA(src_addr);
326 355
@@ -364,7 +393,7 @@ void VMManager::LogLayout() const {
364 (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', 393 (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
365 (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', 394 (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
366 (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', 395 (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-',
367 GetMemoryStateName(vma.meminfo_state)); 396 GetMemoryStateName(vma.state));
368 } 397 }
369} 398}
370 399
@@ -591,6 +620,66 @@ void VMManager::ClearPageTable() {
591 Memory::PageType::Unmapped); 620 Memory::PageType::Unmapped);
592} 621}
593 622
623VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
624 MemoryState state, VMAPermission permission_mask,
625 VMAPermission permissions,
626 MemoryAttribute attribute_mask,
627 MemoryAttribute attribute,
628 MemoryAttribute ignore_mask) const {
629 auto iter = FindVMA(address);
630
631 // If we don't have a valid VMA handle at this point, then it means this is
632 // being called with an address outside of the address space, which is definitely
633 // indicative of a bug, as this function only operates on mapped memory regions.
634 DEBUG_ASSERT(IsValidHandle(iter));
635
636 const VAddr end_address = address + size - 1;
637 const MemoryAttribute initial_attributes = iter->second.attribute;
638 const VMAPermission initial_permissions = iter->second.permissions;
639 const MemoryState initial_state = iter->second.state;
640
641 while (true) {
642 // The iterator should be valid throughout the traversal. Hitting the end of
643 // the mapped VMA regions is unquestionably indicative of a bug.
644 DEBUG_ASSERT(IsValidHandle(iter));
645
646 const auto& vma = iter->second;
647
648 if (vma.state != initial_state) {
649 return ERR_INVALID_ADDRESS_STATE;
650 }
651
652 if ((vma.state & state_mask) != state) {
653 return ERR_INVALID_ADDRESS_STATE;
654 }
655
656 if (vma.permissions != initial_permissions) {
657 return ERR_INVALID_ADDRESS_STATE;
658 }
659
660 if ((vma.permissions & permission_mask) != permissions) {
661 return ERR_INVALID_ADDRESS_STATE;
662 }
663
664 if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) {
665 return ERR_INVALID_ADDRESS_STATE;
666 }
667
668 if ((vma.attribute & attribute_mask) != attribute) {
669 return ERR_INVALID_ADDRESS_STATE;
670 }
671
672 if (end_address <= vma.EndAddress()) {
673 break;
674 }
675
676 ++iter;
677 }
678
679 return MakeResult(
680 std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask));
681}
682
594u64 VMManager::GetTotalMemoryUsage() const { 683u64 VMManager::GetTotalMemoryUsage() const {
595 LOG_WARNING(Kernel, "(STUBBED) called"); 684 LOG_WARNING(Kernel, "(STUBBED) called");
596 return 0xF8000000; 685 return 0xF8000000;
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 10bacac3e..6091533bc 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -6,6 +6,7 @@
6 6
7#include <map> 7#include <map>
8#include <memory> 8#include <memory>
9#include <tuple>
9#include <vector> 10#include <vector>
10#include "common/common_types.h" 11#include "common/common_types.h"
11#include "core/hle/result.h" 12#include "core/hle/result.h"
@@ -43,6 +44,88 @@ enum class VMAPermission : u8 {
43 ReadWriteExecute = Read | Write | Execute, 44 ReadWriteExecute = Read | Write | Execute,
44}; 45};
45 46
47constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) {
48 return static_cast<VMAPermission>(u32(lhs) | u32(rhs));
49}
50
51constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) {
52 return static_cast<VMAPermission>(u32(lhs) & u32(rhs));
53}
54
55constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) {
56 return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs));
57}
58
59constexpr VMAPermission operator~(VMAPermission permission) {
60 return static_cast<VMAPermission>(~u32(permission));
61}
62
63constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) {
64 lhs = lhs | rhs;
65 return lhs;
66}
67
68constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) {
69 lhs = lhs & rhs;
70 return lhs;
71}
72
73constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) {
74 lhs = lhs ^ rhs;
75 return lhs;
76}
77
78/// Attribute flags that can be applied to a VMA
79enum class MemoryAttribute : u32 {
80 Mask = 0xFF,
81
82 /// No particular qualities
83 None = 0,
84 /// Memory locked/borrowed for use. e.g. This would be used by transfer memory.
85 Locked = 1,
86 /// Memory locked for use by IPC-related internals.
87 LockedForIPC = 2,
88 /// Mapped as part of the device address space.
89 DeviceMapped = 4,
90 /// Uncached memory
91 Uncached = 8,
92};
93
94constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) {
95 return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs));
96}
97
98constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) {
99 return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs));
100}
101
102constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) {
103 return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs));
104}
105
106constexpr MemoryAttribute operator~(MemoryAttribute attribute) {
107 return static_cast<MemoryAttribute>(~u32(attribute));
108}
109
110constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) {
111 lhs = lhs | rhs;
112 return lhs;
113}
114
115constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) {
116 lhs = lhs & rhs;
117 return lhs;
118}
119
120constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) {
121 lhs = lhs ^ rhs;
122 return lhs;
123}
124
125constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) {
126 return static_cast<u32>(attribute & MemoryAttribute::Mask);
127}
128
46// clang-format off 129// clang-format off
47/// Represents memory states and any relevant flags, as used by the kernel. 130/// Represents memory states and any relevant flags, as used by the kernel.
48/// svcQueryMemory interprets these by masking away all but the first eight 131/// svcQueryMemory interprets these by masking away all but the first eight
@@ -174,6 +257,16 @@ struct PageInfo {
174 * also backed by a single host memory allocation. 257 * also backed by a single host memory allocation.
175 */ 258 */
176struct VirtualMemoryArea { 259struct VirtualMemoryArea {
260 /// Gets the starting (base) address of this VMA.
261 VAddr StartAddress() const {
262 return base;
263 }
264
265 /// Gets the ending address of this VMA.
266 VAddr EndAddress() const {
267 return base + size - 1;
268 }
269
177 /// Virtual base address of the region. 270 /// Virtual base address of the region.
178 VAddr base = 0; 271 VAddr base = 0;
179 /// Size of the region. 272 /// Size of the region.
@@ -181,8 +274,8 @@ struct VirtualMemoryArea {
181 274
182 VMAType type = VMAType::Free; 275 VMAType type = VMAType::Free;
183 VMAPermission permissions = VMAPermission::None; 276 VMAPermission permissions = VMAPermission::None;
184 /// Tag returned by svcQueryMemory. Not otherwise used. 277 MemoryState state = MemoryState::Unmapped;
185 MemoryState meminfo_state = MemoryState::Unmapped; 278 MemoryAttribute attribute = MemoryAttribute::None;
186 279
187 // Settings for type = AllocatedMemoryBlock 280 // Settings for type = AllocatedMemoryBlock
188 /// Memory block backing this VMA. 281 /// Memory block backing this VMA.
@@ -299,6 +392,19 @@ public:
299 /// 392 ///
300 MemoryInfo QueryMemory(VAddr address) const; 393 MemoryInfo QueryMemory(VAddr address) const;
301 394
395 /// Sets an attribute across the given address range.
396 ///
397 /// @param address The starting address
398 /// @param size The size of the range to set the attribute on.
399 /// @param mask The attribute mask
400 /// @param attribute The attribute to set across the given address range
401 ///
402 /// @returns RESULT_SUCCESS if successful
403 /// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set.
404 ///
405 ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask,
406 MemoryAttribute attribute);
407
302 /** 408 /**
303 * Scans all VMAs and updates the page table range of any that use the given vector as backing 409 * Scans all VMAs and updates the page table range of any that use the given vector as backing
304 * memory. This should be called after any operation that causes reallocation of the vector. 410 * memory. This should be called after any operation that causes reallocation of the vector.
@@ -435,6 +541,35 @@ private:
435 /// Clears out the page table 541 /// Clears out the page table
436 void ClearPageTable(); 542 void ClearPageTable();
437 543
544 using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>;
545
546 /// Checks if an address range adheres to the specified states provided.
547 ///
548 /// @param address The starting address of the address range.
549 /// @param size The size of the address range.
550 /// @param state_mask The memory state mask.
551 /// @param state The state to compare the individual VMA states against,
552 /// which is done in the form of: (vma.state & state_mask) != state.
553 /// @param permission_mask The memory permissions mask.
554 /// @param permissions The permission to compare the individual VMA permissions against,
555 /// which is done in the form of:
556 /// (vma.permission & permission_mask) != permission.
557 /// @param attribute_mask The memory attribute mask.
558 /// @param attribute The memory attributes to compare the individual VMA attributes
559 /// against, which is done in the form of:
560 /// (vma.attributes & attribute_mask) != attribute.
561 /// @param ignore_mask The memory attributes to ignore during the check.
562 ///
563 /// @returns If successful, returns a tuple containing the memory attributes
564 /// (with ignored bits specified by ignore_mask unset), memory permissions, and
565 /// memory state across the memory range.
566 /// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE.
567 ///
568 CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state,
569 VMAPermission permission_mask, VMAPermission permissions,
570 MemoryAttribute attribute_mask, MemoryAttribute attribute,
571 MemoryAttribute ignore_mask) const;
572
438 /** 573 /**
439 * A map covering the entirety of the managed address space, keyed by the `base` field of each 574 * A map covering the entirety of the managed address space, keyed by the `base` field of each
440 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant 575 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant