summaryrefslogtreecommitdiff
path: root/src/video_core/buffer_cache
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2020-06-09 18:27:59 -0300
committerGravatar ReinUsesLisp2020-06-09 18:30:49 -0300
commit6508cdd00351e51c7d5867c00da60781c133ade8 (patch)
tree4d86ffb3c43a37418235df2d980252bc9b3660b3 /src/video_core/buffer_cache
parentMerge pull request #4040 from ReinUsesLisp/nv-transform-feedback (diff)
downloadyuzu-6508cdd00351e51c7d5867c00da60781c133ade8.tar.gz
yuzu-6508cdd00351e51c7d5867c00da60781c133ade8.tar.xz
yuzu-6508cdd00351e51c7d5867c00da60781c133ade8.zip
buffer_cache: Avoid passing references of shared pointers and misc style changes
Instead of using as template argument a shared pointer, use the underlying type and manage shared pointers explicitly. This can make removing shared pointers from the cache more easy. While we are at it, make some misc style changes and general improvements (like insert_or_assign instead of operator[] + operator=).
Diffstat (limited to 'src/video_core/buffer_cache')
-rw-r--r--src/video_core/buffer_cache/buffer_block.h27
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h199
2 files changed, 112 insertions, 114 deletions
diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h
index e35ee0b67..e64170e66 100644
--- a/src/video_core/buffer_cache/buffer_block.h
+++ b/src/video_core/buffer_cache/buffer_block.h
@@ -15,48 +15,47 @@ namespace VideoCommon {
15 15
16class BufferBlock { 16class BufferBlock {
17public: 17public:
18 bool Overlaps(const VAddr start, const VAddr end) const { 18 bool Overlaps(VAddr start, VAddr end) const {
19 return (cpu_addr < end) && (cpu_addr_end > start); 19 return (cpu_addr < end) && (cpu_addr_end > start);
20 } 20 }
21 21
22 bool IsInside(const VAddr other_start, const VAddr other_end) const { 22 bool IsInside(VAddr other_start, VAddr other_end) const {
23 return cpu_addr <= other_start && other_end <= cpu_addr_end; 23 return cpu_addr <= other_start && other_end <= cpu_addr_end;
24 } 24 }
25 25
26 std::size_t GetOffset(const VAddr in_addr) { 26 std::size_t Offset(VAddr in_addr) const {
27 return static_cast<std::size_t>(in_addr - cpu_addr); 27 return static_cast<std::size_t>(in_addr - cpu_addr);
28 } 28 }
29 29
30 VAddr GetCpuAddr() const { 30 VAddr CpuAddr() const {
31 return cpu_addr; 31 return cpu_addr;
32 } 32 }
33 33
34 VAddr GetCpuAddrEnd() const { 34 VAddr CpuAddrEnd() const {
35 return cpu_addr_end; 35 return cpu_addr_end;
36 } 36 }
37 37
38 void SetCpuAddr(const VAddr new_addr) { 38 void SetCpuAddr(VAddr new_addr) {
39 cpu_addr = new_addr; 39 cpu_addr = new_addr;
40 cpu_addr_end = new_addr + size; 40 cpu_addr_end = new_addr + size;
41 } 41 }
42 42
43 std::size_t GetSize() const { 43 std::size_t Size() const {
44 return size; 44 return size;
45 } 45 }
46 46
47 void SetEpoch(u64 new_epoch) { 47 u64 Epoch() const {
48 epoch = new_epoch; 48 return epoch;
49 } 49 }
50 50
51 u64 GetEpoch() { 51 void SetEpoch(u64 new_epoch) {
52 return epoch; 52 epoch = new_epoch;
53 } 53 }
54 54
55protected: 55protected:
56 explicit BufferBlock(VAddr cpu_addr, const std::size_t size) : size{size} { 56 explicit BufferBlock(VAddr cpu_addr_, std::size_t size_) : size{size_} {
57 SetCpuAddr(cpu_addr); 57 SetCpuAddr(cpu_addr_);
58 } 58 }
59 ~BufferBlock() = default;
60 59
61private: 60private:
62 VAddr cpu_addr{}; 61 VAddr cpu_addr{};
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index b88fce2cd..efc480d08 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -30,12 +30,16 @@
30 30
31namespace VideoCommon { 31namespace VideoCommon {
32 32
33template <typename OwnerBuffer, typename BufferType, typename StreamBuffer> 33template <typename Buffer, typename BufferType, typename StreamBuffer>
34class BufferCache { 34class BufferCache {
35 using IntervalSet = boost::icl::interval_set<VAddr>; 35 using IntervalSet = boost::icl::interval_set<VAddr>;
36 using IntervalType = typename IntervalSet::interval_type; 36 using IntervalType = typename IntervalSet::interval_type;
37 using VectorMapInterval = boost::container::small_vector<MapInterval*, 1>; 37 using VectorMapInterval = boost::container::small_vector<MapInterval*, 1>;
38 38
39 static constexpr u64 WRITE_PAGE_BIT = 11;
40 static constexpr u64 BLOCK_PAGE_BITS = 21;
41 static constexpr u64 BLOCK_PAGE_SIZE = 1ULL << BLOCK_PAGE_BITS;
42
39public: 43public:
40 using BufferInfo = std::pair<BufferType, u64>; 44 using BufferInfo = std::pair<BufferType, u64>;
41 45
@@ -82,7 +86,7 @@ public:
82 } 86 }
83 } 87 }
84 88
85 OwnerBuffer block = GetBlock(cpu_addr, size); 89 Buffer* const block = GetBlock(cpu_addr, size);
86 MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size); 90 MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size);
87 if (!map) { 91 if (!map) {
88 return {GetEmptyBuffer(size), 0}; 92 return {GetEmptyBuffer(size), 0};
@@ -98,7 +102,7 @@ public:
98 } 102 }
99 } 103 }
100 104
101 return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))}; 105 return {block->Handle(), static_cast<u64>(block->Offset(cpu_addr))};
102 } 106 }
103 107
104 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. 108 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
@@ -125,16 +129,18 @@ public:
125 return std::exchange(invalidated, false); 129 return std::exchange(invalidated, false);
126 } 130 }
127 131
132 /// Function called at the end of each frame, inteded for deferred operations
128 void TickFrame() { 133 void TickFrame() {
129 ++epoch; 134 ++epoch;
135
130 while (!pending_destruction.empty()) { 136 while (!pending_destruction.empty()) {
131 // Delay at least 4 frames before destruction. 137 // Delay at least 4 frames before destruction.
132 // This is due to triple buffering happening on some drivers. 138 // This is due to triple buffering happening on some drivers.
133 static constexpr u64 epochs_to_destroy = 5; 139 static constexpr u64 epochs_to_destroy = 5;
134 if (pending_destruction.front()->GetEpoch() + epochs_to_destroy > epoch) { 140 if (pending_destruction.front()->Epoch() + epochs_to_destroy > epoch) {
135 break; 141 break;
136 } 142 }
137 pending_destruction.pop_front(); 143 pending_destruction.pop();
138 } 144 }
139 } 145 }
140 146
@@ -249,23 +255,21 @@ public:
249 255
250protected: 256protected:
251 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 257 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
252 std::unique_ptr<StreamBuffer> stream_buffer) 258 std::unique_ptr<StreamBuffer> stream_buffer_)
253 : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)}, 259 : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer_)},
254 stream_buffer_handle{this->stream_buffer->GetHandle()} {} 260 stream_buffer_handle{stream_buffer->Handle()} {}
255 261
256 ~BufferCache() = default; 262 ~BufferCache() = default;
257 263
258 virtual BufferType ToHandle(const OwnerBuffer& storage) = 0; 264 virtual std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
259 265
260 virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; 266 virtual void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
261
262 virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
263 const u8* data) = 0; 267 const u8* data) = 0;
264 268
265 virtual void DownloadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size, 269 virtual void DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
266 u8* data) = 0; 270 u8* data) = 0;
267 271
268 virtual void CopyBlock(const OwnerBuffer& src, const OwnerBuffer& dst, std::size_t src_offset, 272 virtual void CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
269 std::size_t dst_offset, std::size_t size) = 0; 273 std::size_t dst_offset, std::size_t size) = 0;
270 274
271 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) { 275 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) {
@@ -321,7 +325,7 @@ protected:
321 } 325 }
322 326
323private: 327private:
324 MapInterval* MapAddress(const OwnerBuffer& block, GPUVAddr gpu_addr, VAddr cpu_addr, 328 MapInterval* MapAddress(const Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr,
325 std::size_t size) { 329 std::size_t size) {
326 const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size); 330 const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size);
327 if (overlaps.empty()) { 331 if (overlaps.empty()) {
@@ -329,11 +333,11 @@ private:
329 const VAddr cpu_addr_end = cpu_addr + size; 333 const VAddr cpu_addr_end = cpu_addr + size;
330 if (memory_manager.IsGranularRange(gpu_addr, size)) { 334 if (memory_manager.IsGranularRange(gpu_addr, size)) {
331 u8* host_ptr = memory_manager.GetPointer(gpu_addr); 335 u8* host_ptr = memory_manager.GetPointer(gpu_addr);
332 UploadBlockData(block, block->GetOffset(cpu_addr), size, host_ptr); 336 UploadBlockData(*block, block->Offset(cpu_addr), size, host_ptr);
333 } else { 337 } else {
334 staging_buffer.resize(size); 338 staging_buffer.resize(size);
335 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); 339 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
336 UploadBlockData(block, block->GetOffset(cpu_addr), size, staging_buffer.data()); 340 UploadBlockData(*block, block->Offset(cpu_addr), size, staging_buffer.data());
337 } 341 }
338 return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr)); 342 return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr));
339 } 343 }
@@ -376,7 +380,7 @@ private:
376 return map; 380 return map;
377 } 381 }
378 382
379 void UpdateBlock(const OwnerBuffer& block, VAddr start, VAddr end, 383 void UpdateBlock(const Buffer* block, VAddr start, VAddr end,
380 const VectorMapInterval& overlaps) { 384 const VectorMapInterval& overlaps) {
381 const IntervalType base_interval{start, end}; 385 const IntervalType base_interval{start, end};
382 IntervalSet interval_set{}; 386 IntervalSet interval_set{};
@@ -386,13 +390,13 @@ private:
386 interval_set.subtract(subtract); 390 interval_set.subtract(subtract);
387 } 391 }
388 for (auto& interval : interval_set) { 392 for (auto& interval : interval_set) {
389 std::size_t size = interval.upper() - interval.lower(); 393 const std::size_t size = interval.upper() - interval.lower();
390 if (size > 0) { 394 if (size == 0) {
391 staging_buffer.resize(size); 395 continue;
392 system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size);
393 UploadBlockData(block, block->GetOffset(interval.lower()), size,
394 staging_buffer.data());
395 } 396 }
397 staging_buffer.resize(size);
398 system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size);
399 UploadBlockData(*block, block->Offset(interval.lower()), size, staging_buffer.data());
396 } 400 }
397 } 401 }
398 402
@@ -422,10 +426,14 @@ private:
422 } 426 }
423 427
424 void FlushMap(MapInterval* map) { 428 void FlushMap(MapInterval* map) {
429 const auto it = blocks.find(map->start >> BLOCK_PAGE_BITS);
430 ASSERT_OR_EXECUTE(it != blocks.end(), return;);
431
432 std::shared_ptr<Buffer> block = it->second;
433
425 const std::size_t size = map->end - map->start; 434 const std::size_t size = map->end - map->start;
426 OwnerBuffer block = blocks[map->start >> block_page_bits];
427 staging_buffer.resize(size); 435 staging_buffer.resize(size);
428 DownloadBlockData(block, block->GetOffset(map->start), size, staging_buffer.data()); 436 DownloadBlockData(*block, block->Offset(map->start), size, staging_buffer.data());
429 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size); 437 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size);
430 map->MarkAsModified(false, 0); 438 map->MarkAsModified(false, 0);
431 } 439 }
@@ -448,97 +456,89 @@ private:
448 buffer_offset = offset_aligned; 456 buffer_offset = offset_aligned;
449 } 457 }
450 458
451 OwnerBuffer EnlargeBlock(OwnerBuffer buffer) { 459 std::shared_ptr<Buffer> EnlargeBlock(std::shared_ptr<Buffer> buffer) {
452 const std::size_t old_size = buffer->GetSize(); 460 const std::size_t old_size = buffer->Size();
453 const std::size_t new_size = old_size + block_page_size; 461 const std::size_t new_size = old_size + BLOCK_PAGE_SIZE;
454 const VAddr cpu_addr = buffer->GetCpuAddr(); 462 const VAddr cpu_addr = buffer->CpuAddr();
455 OwnerBuffer new_buffer = CreateBlock(cpu_addr, new_size); 463 std::shared_ptr<Buffer> new_buffer = CreateBlock(cpu_addr, new_size);
456 CopyBlock(buffer, new_buffer, 0, 0, old_size); 464 CopyBlock(*buffer, *new_buffer, 0, 0, old_size);
457 buffer->SetEpoch(epoch); 465 QueueDestruction(std::move(buffer));
458 pending_destruction.push_back(buffer); 466
459 const VAddr cpu_addr_end = cpu_addr + new_size - 1; 467 const VAddr cpu_addr_end = cpu_addr + new_size - 1;
460 u64 page_start = cpu_addr >> block_page_bits; 468 const u64 page_end = cpu_addr_end >> BLOCK_PAGE_BITS;
461 const u64 page_end = cpu_addr_end >> block_page_bits; 469 for (u64 page_start = cpu_addr >> BLOCK_PAGE_BITS; page_start <= page_end; ++page_start) {
462 while (page_start <= page_end) { 470 blocks.insert_or_assign(page_start, new_buffer);
463 blocks[page_start] = new_buffer;
464 ++page_start;
465 } 471 }
472
466 return new_buffer; 473 return new_buffer;
467 } 474 }
468 475
469 OwnerBuffer MergeBlocks(OwnerBuffer first, OwnerBuffer second) { 476 std::shared_ptr<Buffer> MergeBlocks(std::shared_ptr<Buffer> first,
470 const std::size_t size_1 = first->GetSize(); 477 std::shared_ptr<Buffer> second) {
471 const std::size_t size_2 = second->GetSize(); 478 const std::size_t size_1 = first->Size();
472 const VAddr first_addr = first->GetCpuAddr(); 479 const std::size_t size_2 = second->Size();
473 const VAddr second_addr = second->GetCpuAddr(); 480 const VAddr first_addr = first->CpuAddr();
481 const VAddr second_addr = second->CpuAddr();
474 const VAddr new_addr = std::min(first_addr, second_addr); 482 const VAddr new_addr = std::min(first_addr, second_addr);
475 const std::size_t new_size = size_1 + size_2; 483 const std::size_t new_size = size_1 + size_2;
476 OwnerBuffer new_buffer = CreateBlock(new_addr, new_size); 484
477 CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); 485 std::shared_ptr<Buffer> new_buffer = CreateBlock(new_addr, new_size);
478 CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2); 486 CopyBlock(*first, *new_buffer, 0, new_buffer->Offset(first_addr), size_1);
479 first->SetEpoch(epoch); 487 CopyBlock(*second, *new_buffer, 0, new_buffer->Offset(second_addr), size_2);
480 second->SetEpoch(epoch); 488 QueueDestruction(std::move(first));
481 pending_destruction.push_back(first); 489 QueueDestruction(std::move(second));
482 pending_destruction.push_back(second); 490
483 const VAddr cpu_addr_end = new_addr + new_size - 1; 491 const VAddr cpu_addr_end = new_addr + new_size - 1;
484 u64 page_start = new_addr >> block_page_bits; 492 const u64 page_end = cpu_addr_end >> BLOCK_PAGE_BITS;
485 const u64 page_end = cpu_addr_end >> block_page_bits; 493 for (u64 page_start = new_addr >> BLOCK_PAGE_BITS; page_start <= page_end; ++page_start) {
486 while (page_start <= page_end) { 494 blocks.insert_or_assign(page_start, new_buffer);
487 blocks[page_start] = new_buffer;
488 ++page_start;
489 } 495 }
490 return new_buffer; 496 return new_buffer;
491 } 497 }
492 498
493 OwnerBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { 499 Buffer* GetBlock(VAddr cpu_addr, std::size_t size) {
494 OwnerBuffer found; 500 std::shared_ptr<Buffer> found;
501
495 const VAddr cpu_addr_end = cpu_addr + size - 1; 502 const VAddr cpu_addr_end = cpu_addr + size - 1;
496 u64 page_start = cpu_addr >> block_page_bits; 503 const u64 page_end = cpu_addr_end >> BLOCK_PAGE_BITS;
497 const u64 page_end = cpu_addr_end >> block_page_bits; 504 for (u64 page_start = cpu_addr >> BLOCK_PAGE_BITS; page_start <= page_end; ++page_start) {
498 while (page_start <= page_end) {
499 auto it = blocks.find(page_start); 505 auto it = blocks.find(page_start);
500 if (it == blocks.end()) { 506 if (it == blocks.end()) {
501 if (found) { 507 if (found) {
502 found = EnlargeBlock(found); 508 found = EnlargeBlock(found);
503 } else { 509 continue;
504 const VAddr start_addr = (page_start << block_page_bits);
505 found = CreateBlock(start_addr, block_page_size);
506 blocks[page_start] = found;
507 }
508 } else {
509 if (found) {
510 if (found == it->second) {
511 ++page_start;
512 continue;
513 }
514 found = MergeBlocks(found, it->second);
515 } else {
516 found = it->second;
517 } 510 }
511 const VAddr start_addr = page_start << BLOCK_PAGE_BITS;
512 found = CreateBlock(start_addr, BLOCK_PAGE_SIZE);
513 blocks.insert_or_assign(page_start, found);
514 continue;
515 }
516 if (!found) {
517 found = it->second;
518 continue;
519 }
520 if (found != it->second) {
521 found = MergeBlocks(std::move(found), it->second);
518 } 522 }
519 ++page_start;
520 } 523 }
521 return found; 524 return found.get();
522 } 525 }
523 526
524 void MarkRegionAsWritten(const VAddr start, const VAddr end) { 527 void MarkRegionAsWritten(VAddr start, VAddr end) {
525 u64 page_start = start >> write_page_bit; 528 const u64 page_end = end >> WRITE_PAGE_BIT;
526 const u64 page_end = end >> write_page_bit; 529 for (u64 page_start = start >> WRITE_PAGE_BIT; page_start <= page_end; ++page_start) {
527 while (page_start <= page_end) {
528 auto it = written_pages.find(page_start); 530 auto it = written_pages.find(page_start);
529 if (it != written_pages.end()) { 531 if (it != written_pages.end()) {
530 it->second = it->second + 1; 532 it->second = it->second + 1;
531 } else { 533 } else {
532 written_pages[page_start] = 1; 534 written_pages.insert_or_assign(page_start, 1);
533 } 535 }
534 ++page_start;
535 } 536 }
536 } 537 }
537 538
538 void UnmarkRegionAsWritten(const VAddr start, const VAddr end) { 539 void UnmarkRegionAsWritten(VAddr start, VAddr end) {
539 u64 page_start = start >> write_page_bit; 540 const u64 page_end = end >> WRITE_PAGE_BIT;
540 const u64 page_end = end >> write_page_bit; 541 for (u64 page_start = start >> WRITE_PAGE_BIT; page_start <= page_end; ++page_start) {
541 while (page_start <= page_end) {
542 auto it = written_pages.find(page_start); 542 auto it = written_pages.find(page_start);
543 if (it != written_pages.end()) { 543 if (it != written_pages.end()) {
544 if (it->second > 1) { 544 if (it->second > 1) {
@@ -547,22 +547,24 @@ private:
547 written_pages.erase(it); 547 written_pages.erase(it);
548 } 548 }
549 } 549 }
550 ++page_start;
551 } 550 }
552 } 551 }
553 552
554 bool IsRegionWritten(const VAddr start, const VAddr end) const { 553 bool IsRegionWritten(VAddr start, VAddr end) const {
555 u64 page_start = start >> write_page_bit; 554 const u64 page_end = end >> WRITE_PAGE_BIT;
556 const u64 page_end = end >> write_page_bit; 555 for (u64 page_start = start >> WRITE_PAGE_BIT; page_start <= page_end; ++page_start) {
557 while (page_start <= page_end) {
558 if (written_pages.count(page_start) > 0) { 556 if (written_pages.count(page_start) > 0) {
559 return true; 557 return true;
560 } 558 }
561 ++page_start;
562 } 559 }
563 return false; 560 return false;
564 } 561 }
565 562
563 void QueueDestruction(std::shared_ptr<Buffer> buffer) {
564 buffer->SetEpoch(epoch);
565 pending_destruction.push(std::move(buffer));
566 }
567
566 void MarkForAsyncFlush(MapInterval* map) { 568 void MarkForAsyncFlush(MapInterval* map) {
567 if (!uncommitted_flushes) { 569 if (!uncommitted_flushes) {
568 uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval*>>(); 570 uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval*>>();
@@ -574,7 +576,7 @@ private:
574 Core::System& system; 576 Core::System& system;
575 577
576 std::unique_ptr<StreamBuffer> stream_buffer; 578 std::unique_ptr<StreamBuffer> stream_buffer;
577 BufferType stream_buffer_handle{}; 579 BufferType stream_buffer_handle;
578 580
579 bool invalidated = false; 581 bool invalidated = false;
580 582
@@ -586,18 +588,15 @@ private:
586 boost::intrusive::set<MapInterval, boost::intrusive::compare<MapIntervalCompare>> 588 boost::intrusive::set<MapInterval, boost::intrusive::compare<MapIntervalCompare>>
587 mapped_addresses; 589 mapped_addresses;
588 590
589 static constexpr u64 write_page_bit = 11;
590 std::unordered_map<u64, u32> written_pages; 591 std::unordered_map<u64, u32> written_pages;
592 std::unordered_map<u64, std::shared_ptr<Buffer>> blocks;
591 593
592 static constexpr u64 block_page_bits = 21; 594 std::queue<std::shared_ptr<Buffer>> pending_destruction;
593 static constexpr u64 block_page_size = 1ULL << block_page_bits;
594 std::unordered_map<u64, OwnerBuffer> blocks;
595
596 std::list<OwnerBuffer> pending_destruction;
597 u64 epoch = 0; 595 u64 epoch = 0;
598 u64 modified_ticks = 0; 596 u64 modified_ticks = 0;
599 597
600 std::vector<u8> staging_buffer; 598 std::vector<u8> staging_buffer;
599
601 std::list<MapInterval*> marked_for_unregister; 600 std::list<MapInterval*> marked_for_unregister;
602 601
603 std::shared_ptr<std::unordered_set<MapInterval*>> uncommitted_flushes; 602 std::shared_ptr<std::unordered_set<MapInterval*>> uncommitted_flushes;