summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Lioncash2022-11-28 16:05:59 -0500
committerGravatar Lioncash2022-11-28 16:06:01 -0500
commitd0883027d60c0d6d289e23900972cfa067fc9c54 (patch)
treed8d6fef49b726db0bbb0e60f1f3eb1843f7311c3 /src
parentMerge pull request #9325 from german77/default_by_default (diff)
downloadyuzu-d0883027d60c0d6d289e23900972cfa067fc9c54.tar.gz
yuzu-d0883027d60c0d6d289e23900972cfa067fc9c54.tar.xz
yuzu-d0883027d60c0d6d289e23900972cfa067fc9c54.zip
syncpoint_manager: Reduce number of bounds checks
The only time we need to check bounds is on the first access.
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.cpp42
1 files changed, 28 insertions, 14 deletions
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
index eda2041a0..8d14116d5 100644
--- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
@@ -28,13 +28,15 @@ SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host
28SyncpointManager::~SyncpointManager() = default; 28SyncpointManager::~SyncpointManager() = default;
29 29
30u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) { 30u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) {
31 if (syncpoints.at(id).reserved) { 31 auto& syncpoint = syncpoints.at(id);
32
33 if (syncpoint.reserved) {
32 ASSERT_MSG(false, "Requested syncpoint is in use"); 34 ASSERT_MSG(false, "Requested syncpoint is in use");
33 return 0; 35 return 0;
34 } 36 }
35 37
36 syncpoints.at(id).reserved = true; 38 syncpoint.reserved = true;
37 syncpoints.at(id).interface_managed = client_managed; 39 syncpoint.interface_managed = client_managed;
38 40
39 return id; 41 return id;
40} 42}
@@ -56,8 +58,9 @@ u32 SyncpointManager::AllocateSyncpoint(bool client_managed) {
56 58
57void SyncpointManager::FreeSyncpoint(u32 id) { 59void SyncpointManager::FreeSyncpoint(u32 id) {
58 std::lock_guard lock(reservation_lock); 60 std::lock_guard lock(reservation_lock);
59 ASSERT(syncpoints.at(id).reserved); 61 auto& syncpoint = syncpoints.at(id);
60 syncpoints.at(id).reserved = false; 62 ASSERT(syncpoint.reserved);
63 syncpoint.reserved = false;
61} 64}
62 65
63bool SyncpointManager::IsSyncpointAllocated(u32 id) { 66bool SyncpointManager::IsSyncpointAllocated(u32 id) {
@@ -82,40 +85,51 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const {
82} 85}
83 86
84u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { 87u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
85 if (!syncpoints.at(id).reserved) { 88 auto& syncpoint = syncpoints.at(id);
89
90 if (!syncpoint.reserved) {
86 ASSERT(false); 91 ASSERT(false);
87 return 0; 92 return 0;
88 } 93 }
89 94
90 return syncpoints.at(id).counter_max += amount; 95 return syncpoint.counter_max += amount;
91} 96}
92 97
93u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { 98u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
94 if (!syncpoints.at(id).reserved) { 99 auto& syncpoint = syncpoints.at(id);
100
101 if (!syncpoint.reserved) {
95 ASSERT(false); 102 ASSERT(false);
96 return 0; 103 return 0;
97 } 104 }
98 105
99 return syncpoints.at(id).counter_min; 106 return syncpoint.counter_min;
100} 107}
101 108
102u32 SyncpointManager::UpdateMin(u32 id) { 109u32 SyncpointManager::UpdateMin(u32 id) {
103 if (!syncpoints.at(id).reserved) { 110 auto& syncpoint = syncpoints.at(id);
111
112 if (!syncpoint.reserved) {
104 ASSERT(false); 113 ASSERT(false);
105 return 0; 114 return 0;
106 } 115 }
107 116
108 syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id); 117 syncpoint.counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
109 return syncpoints.at(id).counter_min; 118 return syncpoint.counter_min;
110} 119}
111 120
112NvFence SyncpointManager::GetSyncpointFence(u32 id) { 121NvFence SyncpointManager::GetSyncpointFence(u32 id) {
113 if (!syncpoints.at(id).reserved) { 122 auto& syncpoint = syncpoints.at(id);
123
124 if (!syncpoint.reserved) {
114 ASSERT(false); 125 ASSERT(false);
115 return NvFence{}; 126 return NvFence{};
116 } 127 }
117 128
118 return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max}; 129 return {
130 .id = static_cast<s32>(id),
131 .value = syncpoint.counter_max,
132 };
119} 133}
120 134
121} // namespace Service::Nvidia::NvCore 135} // namespace Service::Nvidia::NvCore