summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/k_page_table.cpp105
-rw-r--r--src/core/hle/kernel/k_page_table.h19
2 files changed, 124 insertions, 0 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 0a932334f..e47d9ce29 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -2068,4 +2068,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
2068 return ResultSuccess; 2068 return ResultSuccess;
2069} 2069}
2070 2070
2071ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
2072 size_t size, KMemoryState state_mask, KMemoryState state,
2073 KMemoryPermission perm_mask, KMemoryPermission perm,
2074 KMemoryAttribute attr_mask, KMemoryAttribute attr,
2075 KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
2076 // Validate basic preconditions.
2077 ASSERT((lock_attr & attr) == KMemoryAttribute::None);
2078 ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
2079 KMemoryAttribute::None);
2080
2081 // Validate the lock request.
2082 const size_t num_pages = size / PageSize;
2083 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2084
2085 // Lock the table.
2086 KScopedLightLock lk(general_lock);
2087
2088 // Check that the output page group is empty, if it exists.
2089 if (out_pg) {
2090 ASSERT(out_pg->GetNumPages() == 0);
2091 }
2092
2093 // Check the state.
2094 KMemoryState old_state{};
2095 KMemoryPermission old_perm{};
2096 KMemoryAttribute old_attr{};
2097 size_t num_allocator_blocks{};
2098 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2099 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2100 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
2101 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
2102 attr_mask, attr));
2103
2104 // Get the physical address, if we're supposed to.
2105 if (out_paddr != nullptr) {
2106 ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
2107 }
2108
2109 // Make the page group, if we're supposed to.
2110 if (out_pg != nullptr) {
2111 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
2112 }
2113
2114 // Decide on new perm and attr.
2115 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2116 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
2117
2118 // Update permission, if we need to.
2119 if (new_perm != old_perm) {
2120 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2121 }
2122
2123 // Apply the memory block updates.
2124 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
2125
2126 return ResultSuccess;
2127}
2128
2129ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
2130 KMemoryState state, KMemoryPermission perm_mask,
2131 KMemoryPermission perm, KMemoryAttribute attr_mask,
2132 KMemoryAttribute attr, KMemoryPermission new_perm,
2133 KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
2134 // Validate basic preconditions.
2135 ASSERT((attr_mask & lock_attr) == lock_attr);
2136 ASSERT((attr & lock_attr) == lock_attr);
2137
2138 // Validate the unlock request.
2139 const size_t num_pages = size / PageSize;
2140 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2141
2142 // Lock the table.
2143 KScopedLightLock lk(general_lock);
2144
2145 // Check the state.
2146 KMemoryState old_state{};
2147 KMemoryPermission old_perm{};
2148 KMemoryAttribute old_attr{};
2149 size_t num_allocator_blocks{};
2150 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2151 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2152 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
2153 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
2154 attr_mask, attr));
2155
2156 // Check the page group.
2157 if (pg != nullptr) {
2158 UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
2159 }
2160
2161 // Decide on new perm and attr.
2162 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2163 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
2164
2165 // Update permission, if we need to.
2166 if (new_perm != old_perm) {
2167 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2168 }
2169
2170 // Apply the memory block updates.
2171 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
2172
2173 return ResultSuccess;
2174}
2175
2071} // namespace Kernel 2176} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index b61a39145..bfabdf38c 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -164,6 +164,17 @@ private:
164 attr_mask, attr, ignore_attr); 164 attr_mask, attr, ignore_attr);
165 } 165 }
166 166
167 ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
168 KMemoryState state_mask, KMemoryState state,
169 KMemoryPermission perm_mask, KMemoryPermission perm,
170 KMemoryAttribute attr_mask, KMemoryAttribute attr,
171 KMemoryPermission new_perm, KMemoryAttribute lock_attr);
172 ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
173 KMemoryPermission perm_mask, KMemoryPermission perm,
174 KMemoryAttribute attr_mask, KMemoryAttribute attr,
175 KMemoryPermission new_perm, KMemoryAttribute lock_attr,
176 const KPageLinkedList* pg);
177
167 ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages); 178 ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
168 179
169 bool IsLockedByCurrentThread() const { 180 bool IsLockedByCurrentThread() const {
@@ -176,6 +187,14 @@ private:
176 return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); 187 return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
177 } 188 }
178 189
190 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
191 ASSERT(this->IsLockedByCurrentThread());
192
193 *out = GetPhysicalAddr(virt_addr);
194
195 return *out != 0;
196 }
197
179 mutable KLightLock general_lock; 198 mutable KLightLock general_lock;
180 mutable KLightLock map_physical_memory_lock; 199 mutable KLightLock map_physical_memory_lock;
181 200