summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp18
1 files changed, 18 insertions, 0 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 9857278f6..f285c6f63 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -12,6 +12,7 @@
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/logging/log.h" 13#include "common/logging/log.h"
14#include "common/page_table.h" 14#include "common/page_table.h"
15#include "common/settings.h"
15#include "common/swap.h" 16#include "common/swap.h"
16#include "core/arm/arm_interface.h" 17#include "core/arm/arm_interface.h"
17#include "core/core.h" 18#include "core/core.h"
@@ -32,6 +33,7 @@ struct Memory::Impl {
32 33
33 void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { 34 void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
34 current_page_table = &process.PageTable().PageTableImpl(); 35 current_page_table = &process.PageTable().PageTableImpl();
36 current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
35 37
36 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); 38 const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
37 39
@@ -41,13 +43,23 @@ struct Memory::Impl {
41 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 43 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
42 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 44 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
43 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 45 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
46 ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End,
47 "Out of bounds target: {:016X}", target);
44 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); 48 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
49
50 if (Settings::IsFastmemEnabled()) {
51 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
52 }
45 } 53 }
46 54
47 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 55 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
48 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 56 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
49 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 57 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
50 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); 58 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
59
60 if (Settings::IsFastmemEnabled()) {
61 system.DeviceMemory().buffer.Unmap(base, size);
62 }
51 } 63 }
52 64
53 bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { 65 bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const {
@@ -466,6 +478,12 @@ struct Memory::Impl {
466 if (vaddr == 0) { 478 if (vaddr == 0) {
467 return; 479 return;
468 } 480 }
481
482 if (Settings::IsFastmemEnabled()) {
483 const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
484 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
485 }
486
469 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU 487 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
470 // address space, marking the region as un/cached. The region is marked un/cached at a 488 // address space, marking the region as un/cached. The region is marked un/cached at a
471 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size 489 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size