summaryrefslogtreecommitdiff
path: root/src/common/ring_buffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/ring_buffer.h')
-rw-r--r--src/common/ring_buffer.h111
1 files changed, 111 insertions, 0 deletions
diff --git a/src/common/ring_buffer.h b/src/common/ring_buffer.h
new file mode 100644
index 000000000..30d934a38
--- /dev/null
+++ b/src/common/ring_buffer.h
@@ -0,0 +1,111 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <array>
9#include <atomic>
10#include <cstddef>
11#include <cstring>
12#include <type_traits>
13#include <vector>
14#include "common/common_types.h"
15
16namespace Common {
17
18/// SPSC ring buffer
19/// @tparam T Element type
20/// @tparam capacity Number of slots in ring buffer
21/// @tparam granularity Slot size in terms of number of elements
22template <typename T, size_t capacity, size_t granularity = 1>
23class RingBuffer {
24 /// A "slot" is made of `granularity` elements of `T`.
25 static constexpr size_t slot_size = granularity * sizeof(T);
26 // T must be safely memcpy-able and have a trivial default constructor.
27 static_assert(std::is_trivial_v<T>);
28 // Ensure capacity is sensible.
29 static_assert(capacity < std::numeric_limits<size_t>::max() / 2 / granularity);
30 static_assert((capacity & (capacity - 1)) == 0, "capacity must be a power of two");
31 // Ensure lock-free.
32 static_assert(std::atomic<size_t>::is_always_lock_free);
33
34public:
35 /// Pushes slots into the ring buffer
36 /// @param new_slots Pointer to the slots to push
37 /// @param slot_count Number of slots to push
38 /// @returns The number of slots actually pushed
39 size_t Push(const void* new_slots, size_t slot_count) {
40 const size_t write_index = m_write_index.load();
41 const size_t slots_free = capacity + m_read_index.load() - write_index;
42 const size_t push_count = std::min(slot_count, slots_free);
43
44 const size_t pos = write_index % capacity;
45 const size_t first_copy = std::min(capacity - pos, push_count);
46 const size_t second_copy = push_count - first_copy;
47
48 const char* in = static_cast<const char*>(new_slots);
49 std::memcpy(m_data.data() + pos * granularity, in, first_copy * slot_size);
50 in += first_copy * slot_size;
51 std::memcpy(m_data.data(), in, second_copy * slot_size);
52
53 m_write_index.store(write_index + push_count);
54
55 return push_count;
56 }
57
58 size_t Push(const std::vector<T>& input) {
59 return Push(input.data(), input.size());
60 }
61
62 /// Pops slots from the ring buffer
63 /// @param output Where to store the popped slots
64 /// @param max_slots Maximum number of slots to pop
65 /// @returns The number of slots actually popped
66 size_t Pop(void* output, size_t max_slots = ~size_t(0)) {
67 const size_t read_index = m_read_index.load();
68 const size_t slots_filled = m_write_index.load() - read_index;
69 const size_t pop_count = std::min(slots_filled, max_slots);
70
71 const size_t pos = read_index % capacity;
72 const size_t first_copy = std::min(capacity - pos, pop_count);
73 const size_t second_copy = pop_count - first_copy;
74
75 char* out = static_cast<char*>(output);
76 std::memcpy(out, m_data.data() + pos * granularity, first_copy * slot_size);
77 out += first_copy * slot_size;
78 std::memcpy(out, m_data.data(), second_copy * slot_size);
79
80 m_read_index.store(read_index + pop_count);
81
82 return pop_count;
83 }
84
85 std::vector<T> Pop(size_t max_slots = ~size_t(0)) {
86 std::vector<T> out(std::min(max_slots, capacity) * granularity);
87 const size_t count = Pop(out.data(), out.size() / granularity);
88 out.resize(count * granularity);
89 return out;
90 }
91
92 /// @returns Number of slots used
93 size_t Size() const {
94 return m_write_index.load() - m_read_index.load();
95 }
96
97 /// @returns Maximum size of ring buffer
98 constexpr size_t Capacity() const {
99 return capacity;
100 }
101
102private:
103 // It is important to align the below variables for performance reasons:
104 // Having them on the same cache-line would result in false-sharing between them.
105 alignas(128) std::atomic<size_t> m_read_index{0};
106 alignas(128) std::atomic<size_t> m_write_index{0};
107
108 std::array<T, granularity * capacity> m_data;
109};
110
111} // namespace Common