summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/common/address_space.h10
-rw-r--r--src/common/algorithm.h2
-rw-r--r--src/common/bit_field.h13
-rw-r--r--src/common/multi_level_page_table.cpp4
-rw-r--r--src/common/multi_level_page_table.inc2
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp38
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h1
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp4
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp4
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp2
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.h1
-rw-r--r--src/core/hle/service/vi/vi.cpp1
-rw-r--r--src/shader_recompiler/ir_opt/texture_pass.cpp2
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h3
16 files changed, 56 insertions, 42 deletions
diff --git a/src/common/address_space.h b/src/common/address_space.h
index fd2f32b7d..8e13935af 100644
--- a/src/common/address_space.h
+++ b/src/common/address_space.h
@@ -22,7 +22,8 @@ struct EmptyStruct {};
22 */ 22 */
23template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, 23template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
24 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct> 24 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
25requires AddressSpaceValid<VaType, AddressSpaceBits> class FlatAddressSpaceMap { 25requires AddressSpaceValid<VaType, AddressSpaceBits>
26class FlatAddressSpaceMap {
26private: 27private:
27 std::function<void(VaType, VaType)> 28 std::function<void(VaType, VaType)>
28 unmapCallback{}; //!< Callback called when the mappings in an region have changed 29 unmapCallback{}; //!< Callback called when the mappings in an region have changed
@@ -40,8 +41,8 @@ protected:
40 41
41 Block() = default; 42 Block() = default;
42 43
43 Block(VaType virt, PaType phys, ExtraBlockInfo extraInfo) 44 Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_)
44 : virt(virt), phys(phys), extraInfo(extraInfo) {} 45 : virt(virt_), phys(phys_), extraInfo(extraInfo_) {}
45 46
46 constexpr bool Valid() { 47 constexpr bool Valid() {
47 return virt != UnmappedVa; 48 return virt != UnmappedVa;
@@ -102,7 +103,8 @@ public:
102 * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block 103 * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
103 */ 104 */
104template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> 105template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
105requires AddressSpaceValid<VaType, AddressSpaceBits> class FlatAllocator 106requires AddressSpaceValid<VaType, AddressSpaceBits>
107class FlatAllocator
106 : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> { 108 : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
107private: 109private:
108 using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>; 110 using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
diff --git a/src/common/algorithm.h b/src/common/algorithm.h
index 055dca142..c27c9241d 100644
--- a/src/common/algorithm.h
+++ b/src/common/algorithm.h
@@ -27,7 +27,7 @@ template <class ForwardIt, class T, class Compare = std::less<>>
27template <typename T, typename Func, typename... Args> 27template <typename T, typename Func, typename... Args>
28T FoldRight(T initial_value, Func&& func, Args&&... args) { 28T FoldRight(T initial_value, Func&& func, Args&&... args) {
29 T value{initial_value}; 29 T value{initial_value};
30 const auto high_func = [&value, &func]<typename T>(T x) { value = func(value, x); }; 30 const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); };
31 (std::invoke(high_func, std::forward<Args>(args)), ...); 31 (std::invoke(high_func, std::forward<Args>(args)), ...);
32 return value; 32 return value;
33} 33}
diff --git a/src/common/bit_field.h b/src/common/bit_field.h
index 368b7b98c..7e1df62b1 100644
--- a/src/common/bit_field.h
+++ b/src/common/bit_field.h
@@ -127,14 +127,11 @@ public:
127 } 127 }
128 } 128 }
129 129
130 BitField(T val) { 130 // This constructor and assignment operator might be considered ambiguous:
131 Assign(val); 131 // Would they initialize the storage or just the bitfield?
132 } 132 // Hence, delete them. Use the Assign method to set bitfield values!
133 133 BitField(T val) = delete;
134 BitField& operator=(T val) { 134 BitField& operator=(T val) = delete;
135 Assign(val);
136 return *this;
137 }
138 135
139 constexpr BitField() noexcept = default; 136 constexpr BitField() noexcept = default;
140 137
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp
index aed04d0b5..3a7a75aa7 100644
--- a/src/common/multi_level_page_table.cpp
+++ b/src/common/multi_level_page_table.cpp
@@ -1,8 +1,6 @@
1#include "common/multi_level_page_table.inc" 1#include "common/multi_level_page_table.inc"
2 2
3namespace Common { 3namespace Common {
4template class Common::MultiLevelPageTable<GPUVAddr>; 4template class Common::MultiLevelPageTable<u64>;
5template class Common::MultiLevelPageTable<VAddr>;
6template class Common::MultiLevelPageTable<PAddr>;
7template class Common::MultiLevelPageTable<u32>; 5template class Common::MultiLevelPageTable<u32>;
8} // namespace Common 6} // namespace Common
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc
index 9a68cad93..4def6dba8 100644
--- a/src/common/multi_level_page_table.inc
+++ b/src/common/multi_level_page_table.inc
@@ -30,7 +30,7 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
30#ifdef _WIN32 30#ifdef _WIN32
31 void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; 31 void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
32#else 32#else
33 void* base{mmap(nullptr, alloc_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; 33 void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
34 34
35 if (base == MAP_FAILED) { 35 if (base == MAP_FAILED) {
36 base = nullptr; 36 base = nullptr;
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index 86d825af9..b02dbb9c9 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -13,7 +13,8 @@
13using Core::Memory::YUZU_PAGESIZE; 13using Core::Memory::YUZU_PAGESIZE;
14 14
15namespace Service::Nvidia::NvCore { 15namespace Service::Nvidia::NvCore {
16NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) { 16NvMap::Handle::Handle(u64 size_, Id id_)
17 : size(size_), aligned_size(size), orig_size(size), id(id_) {
17 flags.raw = 0; 18 flags.raw = 0;
18} 19}
19 20
@@ -21,19 +22,21 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
21 std::scoped_lock lock(mutex); 22 std::scoped_lock lock(mutex);
22 23
23 // Handles cannot be allocated twice 24 // Handles cannot be allocated twice
24 if (allocated) 25 if (allocated) {
25 return NvResult::AccessDenied; 26 return NvResult::AccessDenied;
27 }
26 28
27 flags = pFlags; 29 flags = pFlags;
28 kind = pKind; 30 kind = pKind;
29 align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; 31 align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
30 32
31 // This flag is only applicable for handles with an address passed 33 // This flag is only applicable for handles with an address passed
32 if (pAddress) 34 if (pAddress) {
33 flags.keep_uncached_after_free = 0; 35 flags.keep_uncached_after_free.Assign(0);
34 else 36 } else {
35 LOG_CRITICAL(Service_NVDRV, 37 LOG_CRITICAL(Service_NVDRV,
36 "Mapping nvmap handles without a CPU side address is unimplemented!"); 38 "Mapping nvmap handles without a CPU side address is unimplemented!");
39 }
37 40
38 size = Common::AlignUp(size, YUZU_PAGESIZE); 41 size = Common::AlignUp(size, YUZU_PAGESIZE);
39 aligned_size = Common::AlignUp(size, align); 42 aligned_size = Common::AlignUp(size, align);
@@ -48,17 +51,19 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
48 51
49NvResult NvMap::Handle::Duplicate(bool internal_session) { 52NvResult NvMap::Handle::Duplicate(bool internal_session) {
50 // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) 53 // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
51 if (!allocated) [[unlikely]] 54 if (!allocated) [[unlikely]] {
52 return NvResult::BadValue; 55 return NvResult::BadValue;
56 }
53 57
54 std::scoped_lock lock(mutex); 58 std::scoped_lock lock(mutex);
55 59
56 // If we internally use FromId the duplication tracking of handles won't work accurately due to 60 // If we internally use FromId the duplication tracking of handles won't work accurately due to
57 // us not implementing per-process handle refs. 61 // us not implementing per-process handle refs.
58 if (internal_session) 62 if (internal_session) {
59 internal_dupes++; 63 internal_dupes++;
60 else 64 } else {
61 dupes++; 65 dupes++;
66 }
62 67
63 return NvResult::Success; 68 return NvResult::Success;
64} 69}
@@ -92,8 +97,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) {
92 std::scoped_lock lock(handles_lock); 97 std::scoped_lock lock(handles_lock);
93 98
94 auto it{handles.find(handle_description.id)}; 99 auto it{handles.find(handle_description.id)};
95 if (it != handles.end()) 100 if (it != handles.end()) {
96 handles.erase(it); 101 handles.erase(it);
102 }
97 103
98 return true; 104 return true;
99 } else { 105 } else {
@@ -102,8 +108,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) {
102} 108}
103 109
104NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) { 110NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
105 if (!size) [[unlikely]] 111 if (!size) [[unlikely]] {
106 return NvResult::BadValue; 112 return NvResult::BadValue;
113 }
107 114
108 u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; 115 u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
109 auto handle_description{std::make_shared<Handle>(size, id)}; 116 auto handle_description{std::make_shared<Handle>(size, id)};
@@ -133,8 +140,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) {
133 140
134u32 NvMap::PinHandle(NvMap::Handle::Id handle) { 141u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
135 auto handle_description{GetHandle(handle)}; 142 auto handle_description{GetHandle(handle)};
136 if (!handle_description) [[unlikely]] 143 if (!handle_description) [[unlikely]] {
137 return 0; 144 return 0;
145 }
138 146
139 std::scoped_lock lock(handle_description->mutex); 147 std::scoped_lock lock(handle_description->mutex);
140 if (!handle_description->pins) { 148 if (!handle_description->pins) {
@@ -183,8 +191,9 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
183 191
184void NvMap::UnpinHandle(Handle::Id handle) { 192void NvMap::UnpinHandle(Handle::Id handle) {
185 auto handle_description{GetHandle(handle)}; 193 auto handle_description{GetHandle(handle)};
186 if (!handle_description) 194 if (!handle_description) {
187 return; 195 return;
196 }
188 197
189 std::scoped_lock lock(handle_description->mutex); 198 std::scoped_lock lock(handle_description->mutex);
190 if (--handle_description->pins < 0) { 199 if (--handle_description->pins < 0) {
@@ -226,12 +235,13 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
226 235
227 // Try to remove the shared ptr to the handle from the map, if nothing else is using the 236 // Try to remove the shared ptr to the handle from the map, if nothing else is using the
228 // handle then it will now be freed when `handle_description` goes out of scope 237 // handle then it will now be freed when `handle_description` goes out of scope
229 if (TryRemoveHandle(*handle_description)) 238 if (TryRemoveHandle(*handle_description)) {
230 LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle); 239 LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
231 else 240 } else {
232 LOG_DEBUG(Service_NVDRV, 241 LOG_DEBUG(Service_NVDRV,
233 "Tried to free nvmap handle: {} but didn't as it still has duplicates", 242 "Tried to free nvmap handle: {} but didn't as it still has duplicates",
234 handle); 243 handle);
244 }
235 245
236 freeInfo = { 246 freeInfo = {
237 .address = handle_description->address, 247 .address = handle_description->address,
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index 4f37dcf43..1082bb58d 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -5,6 +5,7 @@
5 5
6#pragma once 6#pragma once
7 7
8#include <atomic>
8#include <list> 9#include <list>
9#include <memory> 10#include <memory>
10#include <mutex> 11#include <mutex>
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index d95a88393..d1beefba6 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -188,6 +188,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
188 188
189 allocation_map[params.offset] = { 189 allocation_map[params.offset] = {
190 .size = size, 190 .size = size,
191 .mappings{},
191 .page_size = params.page_size, 192 .page_size = params.page_size,
192 .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, 193 .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
193 .big_pages = params.page_size != VM::YUZU_PAGESIZE, 194 .big_pages = params.page_size != VM::YUZU_PAGESIZE,
@@ -474,11 +475,13 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
474 VaRegion{ 475 VaRegion{
475 .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS, 476 .offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS,
476 .page_size = VM::YUZU_PAGESIZE, 477 .page_size = VM::YUZU_PAGESIZE,
478 ._pad0_{},
477 .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart, 479 .pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart,
478 }, 480 },
479 VaRegion{ 481 VaRegion{
480 .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits, 482 .offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits,
481 .page_size = vm.big_page_size, 483 .page_size = vm.big_page_size,
484 ._pad0_{},
482 .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart, 485 .pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart,
483 }, 486 },
484 }; 487 };
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index a84e4d425..7fffb8e48 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -204,12 +204,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
204 204
205 event.wait_handle = 205 event.wait_handle =
206 host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() { 206 host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
207 auto& event = events[slot]; 207 auto& event_ = events[slot];
208 if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) == 208 if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
209 EventState::Waiting) { 209 EventState::Waiting) {
210 event.kevent->GetWritableEvent().Signal(); 210 event_.kevent->GetWritableEvent().Signal();
211 } 211 }
212 event.status.store(EventState::Signalled, std::memory_order_release); 212 event_.status.store(EventState::Signalled, std::memory_order_release);
213 }); 213 });
214 return NvResult::Timeout; 214 return NvResult::Timeout;
215} 215}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index 5e3820085..fed537039 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -12,8 +12,8 @@ namespace Service::Nvidia::Devices {
12 12
13u32 nvhost_nvdec::next_id{}; 13u32 nvhost_nvdec::next_id{};
14 14
15nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core) 15nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
16 : nvhost_nvdec_common{system_, core, NvCore::ChannelType::NvDec} {} 16 : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
17nvhost_nvdec::~nvhost_nvdec() = default; 17nvhost_nvdec::~nvhost_nvdec() = default;
18 18
19NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 19NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index 490e399f4..2e4ff988c 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -11,8 +11,8 @@ namespace Service::Nvidia::Devices {
11 11
12u32 nvhost_vic::next_id{}; 12u32 nvhost_vic::next_id{};
13 13
14nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core) 14nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
15 : nvhost_nvdec_common{system_, core, NvCore::ChannelType::VIC} {} 15 : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
16 16
17nvhost_vic::~nvhost_vic() = default; 17nvhost_vic::~nvhost_vic() = default;
18 18
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 992c117f1..f84fc8c37 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -269,7 +269,7 @@ NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
269 params.address = freeInfo->address; 269 params.address = freeInfo->address;
270 params.size = static_cast<u32>(freeInfo->size); 270 params.size = static_cast<u32>(freeInfo->size);
271 params.flags.raw = 0; 271 params.flags.raw = 0;
272 params.flags.map_uncached = freeInfo->was_uncached; 272 params.flags.map_uncached.Assign(freeInfo->was_uncached);
273 } else { 273 } else {
274 // This is possible when there's internel dups or other duplicates. 274 // This is possible when there's internel dups or other duplicates.
275 } 275 }
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index 31c45236e..b26254753 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -6,6 +6,7 @@
6#pragma once 6#pragma once
7 7
8#include <functional> 8#include <functional>
9#include <list>
9#include <memory> 10#include <memory>
10#include <string> 11#include <string>
11#include <unordered_map> 12#include <unordered_map>
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index f083811ec..9c917cacf 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -58,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size");
58class NativeWindow final { 58class NativeWindow final {
59public: 59public:
60 constexpr explicit NativeWindow(u32 id_) : id{id_} {} 60 constexpr explicit NativeWindow(u32 id_) : id{id_} {}
61 constexpr explicit NativeWindow(const NativeWindow& other) = default;
61 62
62private: 63private:
63 const u32 magic = 2; 64 const u32 magic = 2;
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index 0726d4d21..e8be58357 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -269,7 +269,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environme
269 } 269 }
270 std::optional lhs{Track(op1, env)}; 270 std::optional lhs{Track(op1, env)};
271 if (lhs) { 271 if (lhs) {
272 lhs->shift_left = std::countr_zero(op2.U32()); 272 lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32()));
273 } 273 }
274 return lhs; 274 return lhs;
275 break; 275 break;
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 2e616cee4..8e26b3f95 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -1323,7 +1323,8 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
1323 return; 1323 return;
1324 } 1324 }
1325 if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { 1325 if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
1326 address_size = gpu_memory->MaxContinousRange(gpu_addr_begin, address_size); 1326 address_size =
1327 static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
1327 } 1328 }
1328 const u32 size = address_size; // TODO: Analyze stride and number of vertices 1329 const u32 size = address_size; // TODO: Analyze stride and number of vertices
1329 vertex_buffers[index] = Binding{ 1330 vertex_buffers[index] = Binding{