diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/common/x64/xbyak_abi.h | 95 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.cpp | 21 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.h | 5 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_cp15.cpp | 81 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_cp15.h | 126 | ||||
| -rw-r--r-- | src/video_core/macro/macro_jit_x64.cpp | 12 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_device.cpp | 20 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/maxwell_to_gl.h | 82 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/maxwell_to_vk.cpp | 32 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_sampler_cache.cpp | 6 |
10 files changed, 191 insertions, 289 deletions
diff --git a/src/common/x64/xbyak_abi.h b/src/common/x64/xbyak_abi.h index 794da8a52..a5f5d4fc1 100644 --- a/src/common/x64/xbyak_abi.h +++ b/src/common/x64/xbyak_abi.h | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | namespace Common::X64 { | 12 | namespace Common::X64 { |
| 13 | 13 | ||
| 14 | inline int RegToIndex(const Xbyak::Reg& reg) { | 14 | inline std::size_t RegToIndex(const Xbyak::Reg& reg) { |
| 15 | using Kind = Xbyak::Reg::Kind; | 15 | using Kind = Xbyak::Reg::Kind; |
| 16 | ASSERT_MSG((reg.getKind() & (Kind::REG | Kind::XMM)) != 0, | 16 | ASSERT_MSG((reg.getKind() & (Kind::REG | Kind::XMM)) != 0, |
| 17 | "RegSet only support GPRs and XMM registers."); | 17 | "RegSet only support GPRs and XMM registers."); |
| @@ -19,17 +19,17 @@ inline int RegToIndex(const Xbyak::Reg& reg) { | |||
| 19 | return reg.getIdx() + (reg.getKind() == Kind::REG ? 0 : 16); | 19 | return reg.getIdx() + (reg.getKind() == Kind::REG ? 0 : 16); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | inline Xbyak::Reg64 IndexToReg64(int reg_index) { | 22 | inline Xbyak::Reg64 IndexToReg64(std::size_t reg_index) { |
| 23 | ASSERT(reg_index < 16); | 23 | ASSERT(reg_index < 16); |
| 24 | return Xbyak::Reg64(reg_index); | 24 | return Xbyak::Reg64(static_cast<int>(reg_index)); |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | inline Xbyak::Xmm IndexToXmm(int reg_index) { | 27 | inline Xbyak::Xmm IndexToXmm(std::size_t reg_index) { |
| 28 | ASSERT(reg_index >= 16 && reg_index < 32); | 28 | ASSERT(reg_index >= 16 && reg_index < 32); |
| 29 | return Xbyak::Xmm(reg_index - 16); | 29 | return Xbyak::Xmm(static_cast<int>(reg_index - 16)); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | inline Xbyak::Reg IndexToReg(int reg_index) { | 32 | inline Xbyak::Reg IndexToReg(std::size_t reg_index) { |
| 33 | if (reg_index < 16) { | 33 | if (reg_index < 16) { |
| 34 | return IndexToReg64(reg_index); | 34 | return IndexToReg64(reg_index); |
| 35 | } else { | 35 | } else { |
| @@ -151,9 +151,13 @@ constexpr size_t ABI_SHADOW_SPACE = 0; | |||
| 151 | 151 | ||
| 152 | #endif | 152 | #endif |
| 153 | 153 | ||
| 154 | inline void ABI_CalculateFrameSize(std::bitset<32> regs, size_t rsp_alignment, | 154 | struct ABIFrameInfo { |
| 155 | size_t needed_frame_size, s32* out_subtraction, | 155 | s32 subtraction; |
| 156 | s32* out_xmm_offset) { | 156 | s32 xmm_offset; |
| 157 | }; | ||
| 158 | |||
| 159 | inline ABIFrameInfo ABI_CalculateFrameSize(std::bitset<32> regs, size_t rsp_alignment, | ||
| 160 | size_t needed_frame_size) { | ||
| 157 | const auto count = (regs & ABI_ALL_GPRS).count(); | 161 | const auto count = (regs & ABI_ALL_GPRS).count(); |
| 158 | rsp_alignment -= count * 8; | 162 | rsp_alignment -= count * 8; |
| 159 | size_t subtraction = 0; | 163 | size_t subtraction = 0; |
| @@ -170,33 +174,28 @@ inline void ABI_CalculateFrameSize(std::bitset<32> regs, size_t rsp_alignment, | |||
| 170 | rsp_alignment -= subtraction; | 174 | rsp_alignment -= subtraction; |
| 171 | subtraction += rsp_alignment & 0xF; | 175 | subtraction += rsp_alignment & 0xF; |
| 172 | 176 | ||
| 173 | *out_subtraction = (s32)subtraction; | 177 | return ABIFrameInfo{static_cast<s32>(subtraction), |
| 174 | *out_xmm_offset = (s32)(subtraction - xmm_base_subtraction); | 178 | static_cast<s32>(subtraction - xmm_base_subtraction)}; |
| 175 | } | 179 | } |
| 176 | 180 | ||
| 177 | inline size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::bitset<32> regs, | 181 | inline size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::bitset<32> regs, |
| 178 | size_t rsp_alignment, size_t needed_frame_size = 0) { | 182 | size_t rsp_alignment, size_t needed_frame_size = 0) { |
| 179 | s32 subtraction, xmm_offset; | 183 | auto frame_info = ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size); |
| 180 | ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset); | 184 | |
| 181 | for (std::size_t i = 0; i < regs.size(); ++i) { | 185 | for (std::size_t i = 0; i < regs.size(); ++i) { |
| 182 | if (regs[i] && ABI_ALL_GPRS[i]) { | 186 | if (regs[i] && ABI_ALL_GPRS[i]) { |
| 183 | code.push(IndexToReg64(static_cast<int>(i))); | 187 | code.push(IndexToReg64(i)); |
| 184 | } | 188 | } |
| 185 | } | 189 | } |
| 186 | if (subtraction != 0) { | ||
| 187 | code.sub(code.rsp, subtraction); | ||
| 188 | } | ||
| 189 | 190 | ||
| 190 | for (int i = 0; i < regs.count(); i++) { | 191 | if (frame_info.subtraction != 0) { |
| 191 | if (regs.test(i) & ABI_ALL_GPRS.test(i)) { | 192 | code.sub(code.rsp, frame_info.subtraction); |
| 192 | code.push(IndexToReg64(i)); | ||
| 193 | } | ||
| 194 | } | 193 | } |
| 195 | 194 | ||
| 196 | for (std::size_t i = 0; i < regs.size(); ++i) { | 195 | for (std::size_t i = 0; i < regs.size(); ++i) { |
| 197 | if (regs[i] && ABI_ALL_XMMS[i]) { | 196 | if (regs[i] && ABI_ALL_XMMS[i]) { |
| 198 | code.movaps(code.xword[code.rsp + xmm_offset], IndexToXmm(static_cast<int>(i))); | 197 | code.movaps(code.xword[code.rsp + frame_info.xmm_offset], IndexToXmm(i)); |
| 199 | xmm_offset += 0x10; | 198 | frame_info.xmm_offset += 0x10; |
| 200 | } | 199 | } |
| 201 | } | 200 | } |
| 202 | 201 | ||
| @@ -205,59 +204,23 @@ inline size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::b | |||
| 205 | 204 | ||
| 206 | inline void ABI_PopRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::bitset<32> regs, | 205 | inline void ABI_PopRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::bitset<32> regs, |
| 207 | size_t rsp_alignment, size_t needed_frame_size = 0) { | 206 | size_t rsp_alignment, size_t needed_frame_size = 0) { |
| 208 | s32 subtraction, xmm_offset; | 207 | auto frame_info = ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size); |
| 209 | ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset); | ||
| 210 | 208 | ||
| 211 | for (std::size_t i = 0; i < regs.size(); ++i) { | 209 | for (std::size_t i = 0; i < regs.size(); ++i) { |
| 212 | if (regs[i] && ABI_ALL_XMMS[i]) { | 210 | if (regs[i] && ABI_ALL_XMMS[i]) { |
| 213 | code.movaps(IndexToXmm(static_cast<int>(i)), code.xword[code.rsp + xmm_offset]); | 211 | code.movaps(IndexToXmm(i), code.xword[code.rsp + frame_info.xmm_offset]); |
| 214 | xmm_offset += 0x10; | 212 | frame_info.xmm_offset += 0x10; |
| 215 | } | 213 | } |
| 216 | } | 214 | } |
| 217 | 215 | ||
| 218 | if (subtraction != 0) { | 216 | if (frame_info.subtraction != 0) { |
| 219 | code.add(code.rsp, subtraction); | 217 | code.add(code.rsp, frame_info.subtraction); |
| 220 | } | 218 | } |
| 221 | 219 | ||
| 222 | // GPRs need to be popped in reverse order | 220 | // GPRs need to be popped in reverse order |
| 223 | for (int i = 15; i >= 0; i--) { | 221 | for (std::size_t j = 0; j < regs.size(); ++j) { |
| 224 | if (regs[i]) { | 222 | const std::size_t i = regs.size() - j - 1; |
| 225 | code.pop(IndexToReg64(i)); | ||
| 226 | } | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | inline size_t ABI_PushRegistersAndAdjustStackGPS(Xbyak::CodeGenerator& code, std::bitset<32> regs, | ||
| 231 | size_t rsp_alignment, | ||
| 232 | size_t needed_frame_size = 0) { | ||
| 233 | s32 subtraction, xmm_offset; | ||
| 234 | ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset); | ||
| 235 | |||
| 236 | for (std::size_t i = 0; i < regs.size(); ++i) { | ||
| 237 | if (regs[i] && ABI_ALL_GPRS[i]) { | 223 | if (regs[i] && ABI_ALL_GPRS[i]) { |
| 238 | code.push(IndexToReg64(static_cast<int>(i))); | ||
| 239 | } | ||
| 240 | } | ||
| 241 | |||
| 242 | if (subtraction != 0) { | ||
| 243 | code.sub(code.rsp, subtraction); | ||
| 244 | } | ||
| 245 | |||
| 246 | return ABI_SHADOW_SPACE; | ||
| 247 | } | ||
| 248 | |||
| 249 | inline void ABI_PopRegistersAndAdjustStackGPS(Xbyak::CodeGenerator& code, std::bitset<32> regs, | ||
| 250 | size_t rsp_alignment, size_t needed_frame_size = 0) { | ||
| 251 | s32 subtraction, xmm_offset; | ||
| 252 | ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset); | ||
| 253 | |||
| 254 | if (subtraction != 0) { | ||
| 255 | code.add(code.rsp, subtraction); | ||
| 256 | } | ||
| 257 | |||
| 258 | // GPRs need to be popped in reverse order | ||
| 259 | for (int i = 15; i >= 0; i--) { | ||
| 260 | if (regs[i]) { | ||
| 261 | code.pop(IndexToReg64(i)); | 224 | code.pop(IndexToReg64(i)); |
| 262 | } | 225 | } |
| 263 | } | 226 | } |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 9bc86e3b9..19d798dc7 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp | |||
| @@ -50,7 +50,8 @@ public: | |||
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | void InterpreterFallback(u32 pc, std::size_t num_instructions) override { | 52 | void InterpreterFallback(u32 pc, std::size_t num_instructions) override { |
| 53 | UNIMPLEMENTED(); | 53 | UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc, |
| 54 | MemoryReadCode(pc)); | ||
| 54 | } | 55 | } |
| 55 | 56 | ||
| 56 | void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { | 57 | void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { |
| @@ -89,8 +90,6 @@ public: | |||
| 89 | 90 | ||
| 90 | ARM_Dynarmic_32& parent; | 91 | ARM_Dynarmic_32& parent; |
| 91 | std::size_t num_interpreted_instructions{}; | 92 | std::size_t num_interpreted_instructions{}; |
| 92 | u64 tpidrro_el0{}; | ||
| 93 | u64 tpidr_el0{}; | ||
| 94 | }; | 93 | }; |
| 95 | 94 | ||
| 96 | std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table, | 95 | std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table, |
| @@ -99,7 +98,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& | |||
| 99 | config.callbacks = cb.get(); | 98 | config.callbacks = cb.get(); |
| 100 | // TODO(bunnei): Implement page table for 32-bit | 99 | // TODO(bunnei): Implement page table for 32-bit |
| 101 | // config.page_table = &page_table.pointers; | 100 | // config.page_table = &page_table.pointers; |
| 102 | config.coprocessors[15] = std::make_shared<DynarmicCP15>((u32*)&CP15_regs[0]); | 101 | config.coprocessors[15] = cp15; |
| 103 | config.define_unpredictable_behaviour = true; | 102 | config.define_unpredictable_behaviour = true; |
| 104 | return std::make_unique<Dynarmic::A32::Jit>(config); | 103 | return std::make_unique<Dynarmic::A32::Jit>(config); |
| 105 | } | 104 | } |
| @@ -112,13 +111,13 @@ void ARM_Dynarmic_32::Run() { | |||
| 112 | } | 111 | } |
| 113 | 112 | ||
| 114 | void ARM_Dynarmic_32::Step() { | 113 | void ARM_Dynarmic_32::Step() { |
| 115 | cb->InterpreterFallback(jit->Regs()[15], 1); | 114 | jit->Step(); |
| 116 | } | 115 | } |
| 117 | 116 | ||
| 118 | ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, | 117 | ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, |
| 119 | std::size_t core_index) | 118 | std::size_t core_index) |
| 120 | : ARM_Interface{system}, | 119 | : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks32>(*this)), |
| 121 | cb(std::make_unique<DynarmicCallbacks32>(*this)), core_index{core_index}, | 120 | cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index}, |
| 122 | exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} | 121 | exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} |
| 123 | 122 | ||
| 124 | ARM_Dynarmic_32::~ARM_Dynarmic_32() = default; | 123 | ARM_Dynarmic_32::~ARM_Dynarmic_32() = default; |
| @@ -154,19 +153,19 @@ void ARM_Dynarmic_32::SetPSTATE(u32 cpsr) { | |||
| 154 | } | 153 | } |
| 155 | 154 | ||
| 156 | u64 ARM_Dynarmic_32::GetTlsAddress() const { | 155 | u64 ARM_Dynarmic_32::GetTlsAddress() const { |
| 157 | return CP15_regs[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)]; | 156 | return cp15->uro; |
| 158 | } | 157 | } |
| 159 | 158 | ||
| 160 | void ARM_Dynarmic_32::SetTlsAddress(VAddr address) { | 159 | void ARM_Dynarmic_32::SetTlsAddress(VAddr address) { |
| 161 | CP15_regs[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)] = static_cast<u32>(address); | 160 | cp15->uro = static_cast<u32>(address); |
| 162 | } | 161 | } |
| 163 | 162 | ||
| 164 | u64 ARM_Dynarmic_32::GetTPIDR_EL0() const { | 163 | u64 ARM_Dynarmic_32::GetTPIDR_EL0() const { |
| 165 | return cb->tpidr_el0; | 164 | return cp15->uprw; |
| 166 | } | 165 | } |
| 167 | 166 | ||
| 168 | void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) { | 167 | void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) { |
| 169 | cb->tpidr_el0 = value; | 168 | cp15->uprw = static_cast<u32>(value); |
| 170 | } | 169 | } |
| 171 | 170 | ||
| 172 | void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { | 171 | void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h index 8ba9cea8f..e5b92d7bb 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.h +++ b/src/core/arm/dynarmic/arm_dynarmic_32.h | |||
| @@ -22,6 +22,7 @@ class Memory; | |||
| 22 | namespace Core { | 22 | namespace Core { |
| 23 | 23 | ||
| 24 | class DynarmicCallbacks32; | 24 | class DynarmicCallbacks32; |
| 25 | class DynarmicCP15; | ||
| 25 | class DynarmicExclusiveMonitor; | 26 | class DynarmicExclusiveMonitor; |
| 26 | class System; | 27 | class System; |
| 27 | 28 | ||
| @@ -66,12 +67,14 @@ private: | |||
| 66 | std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>; | 67 | std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>; |
| 67 | 68 | ||
| 68 | friend class DynarmicCallbacks32; | 69 | friend class DynarmicCallbacks32; |
| 70 | friend class DynarmicCP15; | ||
| 71 | |||
| 69 | std::unique_ptr<DynarmicCallbacks32> cb; | 72 | std::unique_ptr<DynarmicCallbacks32> cb; |
| 70 | JitCacheType jit_cache; | 73 | JitCacheType jit_cache; |
| 71 | std::shared_ptr<Dynarmic::A32::Jit> jit; | 74 | std::shared_ptr<Dynarmic::A32::Jit> jit; |
| 75 | std::shared_ptr<DynarmicCP15> cp15; | ||
| 72 | std::size_t core_index; | 76 | std::size_t core_index; |
| 73 | DynarmicExclusiveMonitor& exclusive_monitor; | 77 | DynarmicExclusiveMonitor& exclusive_monitor; |
| 74 | std::array<u32, 84> CP15_regs{}; | ||
| 75 | }; | 78 | }; |
| 76 | 79 | ||
| 77 | } // namespace Core | 80 | } // namespace Core |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp index 3fdcdebde..d43e4dd70 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp | |||
| @@ -2,79 +2,132 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <fmt/format.h> | ||
| 6 | #include "common/logging/log.h" | ||
| 7 | #include "core/arm/dynarmic/arm_dynarmic_32.h" | ||
| 5 | #include "core/arm/dynarmic/arm_dynarmic_cp15.h" | 8 | #include "core/arm/dynarmic/arm_dynarmic_cp15.h" |
| 9 | #include "core/core.h" | ||
| 10 | #include "core/core_timing.h" | ||
| 11 | #include "core/core_timing_util.h" | ||
| 6 | 12 | ||
| 7 | using Callback = Dynarmic::A32::Coprocessor::Callback; | 13 | using Callback = Dynarmic::A32::Coprocessor::Callback; |
| 8 | using CallbackOrAccessOneWord = Dynarmic::A32::Coprocessor::CallbackOrAccessOneWord; | 14 | using CallbackOrAccessOneWord = Dynarmic::A32::Coprocessor::CallbackOrAccessOneWord; |
| 9 | using CallbackOrAccessTwoWords = Dynarmic::A32::Coprocessor::CallbackOrAccessTwoWords; | 15 | using CallbackOrAccessTwoWords = Dynarmic::A32::Coprocessor::CallbackOrAccessTwoWords; |
| 10 | 16 | ||
| 17 | template <> | ||
| 18 | struct fmt::formatter<Dynarmic::A32::CoprocReg> { | ||
| 19 | constexpr auto parse(format_parse_context& ctx) { | ||
| 20 | return ctx.begin(); | ||
| 21 | } | ||
| 22 | template <typename FormatContext> | ||
| 23 | auto format(const Dynarmic::A32::CoprocReg& reg, FormatContext& ctx) { | ||
| 24 | return format_to(ctx.out(), "cp{}", static_cast<size_t>(reg)); | ||
| 25 | } | ||
| 26 | }; | ||
| 27 | |||
| 28 | namespace Core { | ||
| 29 | |||
| 30 | static u32 dummy_value; | ||
| 31 | |||
| 11 | std::optional<Callback> DynarmicCP15::CompileInternalOperation(bool two, unsigned opc1, | 32 | std::optional<Callback> DynarmicCP15::CompileInternalOperation(bool two, unsigned opc1, |
| 12 | CoprocReg CRd, CoprocReg CRn, | 33 | CoprocReg CRd, CoprocReg CRn, |
| 13 | CoprocReg CRm, unsigned opc2) { | 34 | CoprocReg CRm, unsigned opc2) { |
| 35 | LOG_CRITICAL(Core_ARM, "CP15: cdp{} p15, {}, {}, {}, {}, {}", two ? "2" : "", opc1, CRd, CRn, | ||
| 36 | CRm, opc2); | ||
| 14 | return {}; | 37 | return {}; |
| 15 | } | 38 | } |
| 16 | 39 | ||
| 17 | CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn, | 40 | CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn, |
| 18 | CoprocReg CRm, unsigned opc2) { | 41 | CoprocReg CRm, unsigned opc2) { |
| 19 | // TODO(merry): Privileged CP15 registers | ||
| 20 | |||
| 21 | if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C5 && opc2 == 4) { | 42 | if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C5 && opc2 == 4) { |
| 43 | // CP15_FLUSH_PREFETCH_BUFFER | ||
| 22 | // This is a dummy write, we ignore the value written here. | 44 | // This is a dummy write, we ignore the value written here. |
| 23 | return &CP15[static_cast<std::size_t>(CP15Register::CP15_FLUSH_PREFETCH_BUFFER)]; | 45 | return &dummy_value; |
| 24 | } | 46 | } |
| 25 | 47 | ||
| 26 | if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C10) { | 48 | if (!two && CRn == CoprocReg::C7 && opc1 == 0 && CRm == CoprocReg::C10) { |
| 27 | switch (opc2) { | 49 | switch (opc2) { |
| 28 | case 4: | 50 | case 4: |
| 51 | // CP15_DATA_SYNC_BARRIER | ||
| 29 | // This is a dummy write, we ignore the value written here. | 52 | // This is a dummy write, we ignore the value written here. |
| 30 | return &CP15[static_cast<std::size_t>(CP15Register::CP15_DATA_SYNC_BARRIER)]; | 53 | return &dummy_value; |
| 31 | case 5: | 54 | case 5: |
| 55 | // CP15_DATA_MEMORY_BARRIER | ||
| 32 | // This is a dummy write, we ignore the value written here. | 56 | // This is a dummy write, we ignore the value written here. |
| 33 | return &CP15[static_cast<std::size_t>(CP15Register::CP15_DATA_MEMORY_BARRIER)]; | 57 | return &dummy_value; |
| 34 | default: | ||
| 35 | return {}; | ||
| 36 | } | 58 | } |
| 37 | } | 59 | } |
| 38 | 60 | ||
| 39 | if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0 && opc2 == 2) { | 61 | if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0 && opc2 == 2) { |
| 40 | return &CP15[static_cast<std::size_t>(CP15Register::CP15_THREAD_UPRW)]; | 62 | // CP15_THREAD_UPRW |
| 63 | return &uprw; | ||
| 41 | } | 64 | } |
| 42 | 65 | ||
| 66 | LOG_CRITICAL(Core_ARM, "CP15: mcr{} p15, {}, <Rt>, {}, {}, {}", two ? "2" : "", opc1, CRn, CRm, | ||
| 67 | opc2); | ||
| 43 | return {}; | 68 | return {}; |
| 44 | } | 69 | } |
| 45 | 70 | ||
| 46 | CallbackOrAccessTwoWords DynarmicCP15::CompileSendTwoWords(bool two, unsigned opc, CoprocReg CRm) { | 71 | CallbackOrAccessTwoWords DynarmicCP15::CompileSendTwoWords(bool two, unsigned opc, CoprocReg CRm) { |
| 72 | LOG_CRITICAL(Core_ARM, "CP15: mcrr{} p15, {}, <Rt>, <Rt2>, {}", two ? "2" : "", opc, CRm); | ||
| 47 | return {}; | 73 | return {}; |
| 48 | } | 74 | } |
| 49 | 75 | ||
| 50 | CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1, CoprocReg CRn, | 76 | CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1, CoprocReg CRn, |
| 51 | CoprocReg CRm, unsigned opc2) { | 77 | CoprocReg CRm, unsigned opc2) { |
| 52 | // TODO(merry): Privileged CP15 registers | ||
| 53 | |||
| 54 | if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0) { | 78 | if (!two && CRn == CoprocReg::C13 && opc1 == 0 && CRm == CoprocReg::C0) { |
| 55 | switch (opc2) { | 79 | switch (opc2) { |
| 56 | case 2: | 80 | case 2: |
| 57 | return &CP15[static_cast<std::size_t>(CP15Register::CP15_THREAD_UPRW)]; | 81 | // CP15_THREAD_UPRW |
| 82 | return &uprw; | ||
| 58 | case 3: | 83 | case 3: |
| 59 | return &CP15[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)]; | 84 | // CP15_THREAD_URO |
| 60 | default: | 85 | return &uro; |
| 61 | return {}; | ||
| 62 | } | 86 | } |
| 63 | } | 87 | } |
| 64 | 88 | ||
| 89 | LOG_CRITICAL(Core_ARM, "CP15: mrc{} p15, {}, <Rt>, {}, {}, {}", two ? "2" : "", opc1, CRn, CRm, | ||
| 90 | opc2); | ||
| 65 | return {}; | 91 | return {}; |
| 66 | } | 92 | } |
| 67 | 93 | ||
| 68 | CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) { | 94 | CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) { |
| 95 | if (!two && opc == 0 && CRm == CoprocReg::C14) { | ||
| 96 | // CNTPCT | ||
| 97 | const auto callback = static_cast<u64 (*)(Dynarmic::A32::Jit*, void*, u32, u32)>( | ||
| 98 | [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 { | ||
| 99 | ARM_Dynarmic_32& parent = *(ARM_Dynarmic_32*)arg; | ||
| 100 | return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks()); | ||
| 101 | }); | ||
| 102 | return Dynarmic::A32::Coprocessor::Callback{callback, (void*)&parent}; | ||
| 103 | } | ||
| 104 | |||
| 105 | LOG_CRITICAL(Core_ARM, "CP15: mrrc{} p15, {}, <Rt>, <Rt2>, {}", two ? "2" : "", opc, CRm); | ||
| 69 | return {}; | 106 | return {}; |
| 70 | } | 107 | } |
| 71 | 108 | ||
| 72 | std::optional<Callback> DynarmicCP15::CompileLoadWords(bool two, bool long_transfer, CoprocReg CRd, | 109 | std::optional<Callback> DynarmicCP15::CompileLoadWords(bool two, bool long_transfer, CoprocReg CRd, |
| 73 | std::optional<u8> option) { | 110 | std::optional<u8> option) { |
| 111 | if (option) { | ||
| 112 | LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...], {}", two ? "2" : "", | ||
| 113 | long_transfer ? "l" : "", CRd, *option); | ||
| 114 | } else { | ||
| 115 | LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...]", two ? "2" : "", | ||
| 116 | long_transfer ? "l" : "", CRd); | ||
| 117 | } | ||
| 74 | return {}; | 118 | return {}; |
| 75 | } | 119 | } |
| 76 | 120 | ||
| 77 | std::optional<Callback> DynarmicCP15::CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, | 121 | std::optional<Callback> DynarmicCP15::CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, |
| 78 | std::optional<u8> option) { | 122 | std::optional<u8> option) { |
| 123 | if (option) { | ||
| 124 | LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...], {}", two ? "2" : "", | ||
| 125 | long_transfer ? "l" : "", CRd, *option); | ||
| 126 | } else { | ||
| 127 | LOG_CRITICAL(Core_ARM, "CP15: mrrc{}{} p15, {}, [...]", two ? "2" : "", | ||
| 128 | long_transfer ? "l" : "", CRd); | ||
| 129 | } | ||
| 79 | return {}; | 130 | return {}; |
| 80 | } | 131 | } |
| 132 | |||
| 133 | } // namespace Core | ||
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.h b/src/core/arm/dynarmic/arm_dynarmic_cp15.h index 07bcde5f9..7356d252e 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_cp15.h +++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.h | |||
| @@ -10,128 +10,15 @@ | |||
| 10 | #include <dynarmic/A32/coprocessor.h> | 10 | #include <dynarmic/A32/coprocessor.h> |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | 12 | ||
| 13 | enum class CP15Register { | 13 | namespace Core { |
| 14 | // c0 - Information registers | ||
| 15 | CP15_MAIN_ID, | ||
| 16 | CP15_CACHE_TYPE, | ||
| 17 | CP15_TCM_STATUS, | ||
| 18 | CP15_TLB_TYPE, | ||
| 19 | CP15_CPU_ID, | ||
| 20 | CP15_PROCESSOR_FEATURE_0, | ||
| 21 | CP15_PROCESSOR_FEATURE_1, | ||
| 22 | CP15_DEBUG_FEATURE_0, | ||
| 23 | CP15_AUXILIARY_FEATURE_0, | ||
| 24 | CP15_MEMORY_MODEL_FEATURE_0, | ||
| 25 | CP15_MEMORY_MODEL_FEATURE_1, | ||
| 26 | CP15_MEMORY_MODEL_FEATURE_2, | ||
| 27 | CP15_MEMORY_MODEL_FEATURE_3, | ||
| 28 | CP15_ISA_FEATURE_0, | ||
| 29 | CP15_ISA_FEATURE_1, | ||
| 30 | CP15_ISA_FEATURE_2, | ||
| 31 | CP15_ISA_FEATURE_3, | ||
| 32 | CP15_ISA_FEATURE_4, | ||
| 33 | 14 | ||
| 34 | // c1 - Control registers | 15 | class ARM_Dynarmic_32; |
| 35 | CP15_CONTROL, | ||
| 36 | CP15_AUXILIARY_CONTROL, | ||
| 37 | CP15_COPROCESSOR_ACCESS_CONTROL, | ||
| 38 | |||
| 39 | // c2 - Translation table registers | ||
| 40 | CP15_TRANSLATION_BASE_TABLE_0, | ||
| 41 | CP15_TRANSLATION_BASE_TABLE_1, | ||
| 42 | CP15_TRANSLATION_BASE_CONTROL, | ||
| 43 | CP15_DOMAIN_ACCESS_CONTROL, | ||
| 44 | CP15_RESERVED, | ||
| 45 | |||
| 46 | // c5 - Fault status registers | ||
| 47 | CP15_FAULT_STATUS, | ||
| 48 | CP15_INSTR_FAULT_STATUS, | ||
| 49 | CP15_COMBINED_DATA_FSR = CP15_FAULT_STATUS, | ||
| 50 | CP15_INST_FSR, | ||
| 51 | |||
| 52 | // c6 - Fault Address registers | ||
| 53 | CP15_FAULT_ADDRESS, | ||
| 54 | CP15_COMBINED_DATA_FAR = CP15_FAULT_ADDRESS, | ||
| 55 | CP15_WFAR, | ||
| 56 | CP15_IFAR, | ||
| 57 | |||
| 58 | // c7 - Cache operation registers | ||
| 59 | CP15_WAIT_FOR_INTERRUPT, | ||
| 60 | CP15_PHYS_ADDRESS, | ||
| 61 | CP15_INVALIDATE_INSTR_CACHE, | ||
| 62 | CP15_INVALIDATE_INSTR_CACHE_USING_MVA, | ||
| 63 | CP15_INVALIDATE_INSTR_CACHE_USING_INDEX, | ||
| 64 | CP15_FLUSH_PREFETCH_BUFFER, | ||
| 65 | CP15_FLUSH_BRANCH_TARGET_CACHE, | ||
| 66 | CP15_FLUSH_BRANCH_TARGET_CACHE_ENTRY, | ||
| 67 | CP15_INVALIDATE_DATA_CACHE, | ||
| 68 | CP15_INVALIDATE_DATA_CACHE_LINE_USING_MVA, | ||
| 69 | CP15_INVALIDATE_DATA_CACHE_LINE_USING_INDEX, | ||
| 70 | CP15_INVALIDATE_DATA_AND_INSTR_CACHE, | ||
| 71 | CP15_CLEAN_DATA_CACHE, | ||
| 72 | CP15_CLEAN_DATA_CACHE_LINE_USING_MVA, | ||
| 73 | CP15_CLEAN_DATA_CACHE_LINE_USING_INDEX, | ||
| 74 | CP15_DATA_SYNC_BARRIER, | ||
| 75 | CP15_DATA_MEMORY_BARRIER, | ||
| 76 | CP15_CLEAN_AND_INVALIDATE_DATA_CACHE, | ||
| 77 | CP15_CLEAN_AND_INVALIDATE_DATA_CACHE_LINE_USING_MVA, | ||
| 78 | CP15_CLEAN_AND_INVALIDATE_DATA_CACHE_LINE_USING_INDEX, | ||
| 79 | |||
| 80 | // c8 - TLB operations | ||
| 81 | CP15_INVALIDATE_ITLB, | ||
| 82 | CP15_INVALIDATE_ITLB_SINGLE_ENTRY, | ||
| 83 | CP15_INVALIDATE_ITLB_ENTRY_ON_ASID_MATCH, | ||
| 84 | CP15_INVALIDATE_ITLB_ENTRY_ON_MVA, | ||
| 85 | CP15_INVALIDATE_DTLB, | ||
| 86 | CP15_INVALIDATE_DTLB_SINGLE_ENTRY, | ||
| 87 | CP15_INVALIDATE_DTLB_ENTRY_ON_ASID_MATCH, | ||
| 88 | CP15_INVALIDATE_DTLB_ENTRY_ON_MVA, | ||
| 89 | CP15_INVALIDATE_UTLB, | ||
| 90 | CP15_INVALIDATE_UTLB_SINGLE_ENTRY, | ||
| 91 | CP15_INVALIDATE_UTLB_ENTRY_ON_ASID_MATCH, | ||
| 92 | CP15_INVALIDATE_UTLB_ENTRY_ON_MVA, | ||
| 93 | |||
| 94 | // c9 - Data cache lockdown register | ||
| 95 | CP15_DATA_CACHE_LOCKDOWN, | ||
| 96 | |||
| 97 | // c10 - TLB/Memory map registers | ||
| 98 | CP15_TLB_LOCKDOWN, | ||
| 99 | CP15_PRIMARY_REGION_REMAP, | ||
| 100 | CP15_NORMAL_REGION_REMAP, | ||
| 101 | |||
| 102 | // c13 - Thread related registers | ||
| 103 | CP15_PID, | ||
| 104 | CP15_CONTEXT_ID, | ||
| 105 | CP15_THREAD_UPRW, // Thread ID register - User/Privileged Read/Write | ||
| 106 | CP15_THREAD_URO, // Thread ID register - User Read Only (Privileged R/W) | ||
| 107 | CP15_THREAD_PRW, // Thread ID register - Privileged R/W only. | ||
| 108 | |||
| 109 | // c15 - Performance and TLB lockdown registers | ||
| 110 | CP15_PERFORMANCE_MONITOR_CONTROL, | ||
| 111 | CP15_CYCLE_COUNTER, | ||
| 112 | CP15_COUNT_0, | ||
| 113 | CP15_COUNT_1, | ||
| 114 | CP15_READ_MAIN_TLB_LOCKDOWN_ENTRY, | ||
| 115 | CP15_WRITE_MAIN_TLB_LOCKDOWN_ENTRY, | ||
| 116 | CP15_MAIN_TLB_LOCKDOWN_VIRT_ADDRESS, | ||
| 117 | CP15_MAIN_TLB_LOCKDOWN_PHYS_ADDRESS, | ||
| 118 | CP15_MAIN_TLB_LOCKDOWN_ATTRIBUTE, | ||
| 119 | CP15_TLB_DEBUG_CONTROL, | ||
| 120 | |||
| 121 | // Skyeye defined | ||
| 122 | CP15_TLB_FAULT_ADDR, | ||
| 123 | CP15_TLB_FAULT_STATUS, | ||
| 124 | |||
| 125 | // Not an actual register. | ||
| 126 | // All registers should be defined above this. | ||
| 127 | CP15_REGISTER_COUNT, | ||
| 128 | }; | ||
| 129 | 16 | ||
| 130 | class DynarmicCP15 final : public Dynarmic::A32::Coprocessor { | 17 | class DynarmicCP15 final : public Dynarmic::A32::Coprocessor { |
| 131 | public: | 18 | public: |
| 132 | using CoprocReg = Dynarmic::A32::CoprocReg; | 19 | using CoprocReg = Dynarmic::A32::CoprocReg; |
| 133 | 20 | ||
| 134 | explicit DynarmicCP15(u32* cp15) : CP15(cp15){}; | 21 | explicit DynarmicCP15(ARM_Dynarmic_32& parent) : parent(parent) {} |
| 135 | 22 | ||
| 136 | std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd, | 23 | std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd, |
| 137 | CoprocReg CRn, CoprocReg CRm, | 24 | CoprocReg CRn, CoprocReg CRm, |
| @@ -147,6 +34,9 @@ public: | |||
| 147 | std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, | 34 | std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, |
| 148 | std::optional<u8> option) override; | 35 | std::optional<u8> option) override; |
| 149 | 36 | ||
| 150 | private: | 37 | ARM_Dynarmic_32& parent; |
| 151 | u32* CP15{}; | 38 | u32 uprw; |
| 39 | u32 uro; | ||
| 152 | }; | 40 | }; |
| 41 | |||
| 42 | } // namespace Core | ||
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp index 30a7e1fe9..bee34a7c0 100644 --- a/src/video_core/macro/macro_jit_x64.cpp +++ b/src/video_core/macro/macro_jit_x64.cpp | |||
| @@ -298,22 +298,22 @@ void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) { | |||
| 298 | sub(result, opcode.immediate * -1); | 298 | sub(result, opcode.immediate * -1); |
| 299 | } | 299 | } |
| 300 | } | 300 | } |
| 301 | Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0); | 301 | Common::X64::ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); |
| 302 | mov(Common::X64::ABI_PARAM1, qword[STATE]); | 302 | mov(Common::X64::ABI_PARAM1, qword[STATE]); |
| 303 | mov(Common::X64::ABI_PARAM2, RESULT); | 303 | mov(Common::X64::ABI_PARAM2, RESULT); |
| 304 | Common::X64::CallFarFunction(*this, &Read); | 304 | Common::X64::CallFarFunction(*this, &Read); |
| 305 | Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0); | 305 | Common::X64::ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); |
| 306 | mov(RESULT, Common::X64::ABI_RETURN.cvt32()); | 306 | mov(RESULT, Common::X64::ABI_RETURN.cvt32()); |
| 307 | Compile_ProcessResult(opcode.result_operation, opcode.dst); | 307 | Compile_ProcessResult(opcode.result_operation, opcode.dst); |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) { | 310 | void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) { |
| 311 | Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0); | 311 | Common::X64::ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); |
| 312 | mov(Common::X64::ABI_PARAM1, qword[STATE]); | 312 | mov(Common::X64::ABI_PARAM1, qword[STATE]); |
| 313 | mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS); | 313 | mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS); |
| 314 | mov(Common::X64::ABI_PARAM3, value); | 314 | mov(Common::X64::ABI_PARAM3, value); |
| 315 | Common::X64::CallFarFunction(*this, &Send); | 315 | Common::X64::CallFarFunction(*this, &Send); |
| 316 | Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0); | 316 | Common::X64::ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); |
| 317 | 317 | ||
| 318 | Xbyak::Label dont_process{}; | 318 | Xbyak::Label dont_process{}; |
| 319 | // Get increment | 319 | // Get increment |
| @@ -417,7 +417,7 @@ void MacroJITx64Impl::Compile() { | |||
| 417 | bool keep_executing = true; | 417 | bool keep_executing = true; |
| 418 | labels.fill(Xbyak::Label()); | 418 | labels.fill(Xbyak::Label()); |
| 419 | 419 | ||
| 420 | Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8); | 420 | Common::X64::ABI_PushRegistersAndAdjustStack(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8); |
| 421 | // JIT state | 421 | // JIT state |
| 422 | mov(STATE, Common::X64::ABI_PARAM1); | 422 | mov(STATE, Common::X64::ABI_PARAM1); |
| 423 | mov(PARAMETERS, Common::X64::ABI_PARAM2); | 423 | mov(PARAMETERS, Common::X64::ABI_PARAM2); |
| @@ -455,7 +455,7 @@ void MacroJITx64Impl::Compile() { | |||
| 455 | 455 | ||
| 456 | L(end_of_code); | 456 | L(end_of_code); |
| 457 | 457 | ||
| 458 | Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8); | 458 | Common::X64::ABI_PopRegistersAndAdjustStack(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8); |
| 459 | ret(); | 459 | ret(); |
| 460 | ready(); | 460 | ready(); |
| 461 | program = getCode<ProgramType>(); | 461 | program = getCode<ProgramType>(); |
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index e245e27ec..b31d604e4 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp | |||
| @@ -123,16 +123,24 @@ std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindin | |||
| 123 | u32 num_images = GetInteger<u32>(GL_MAX_IMAGE_UNITS); | 123 | u32 num_images = GetInteger<u32>(GL_MAX_IMAGE_UNITS); |
| 124 | u32 base_images = 0; | 124 | u32 base_images = 0; |
| 125 | 125 | ||
| 126 | // Reserve more image bindings on fragment and vertex stages. | 126 | // GL_MAX_IMAGE_UNITS is guaranteed by the spec to have a minimum value of 8. |
| 127 | // Due to the limitation of GL_MAX_IMAGE_UNITS, reserve at least 4 image bindings on the | ||
| 128 | // fragment stage, and at least 1 for the rest of the stages. | ||
| 129 | // So far games are observed to use 1 image binding on vertex and 4 on fragment stages. | ||
| 130 | |||
| 131 | // Reserve at least 4 image bindings on the fragment stage. | ||
| 127 | bindings[4].image = | 132 | bindings[4].image = |
| 128 | Extract(base_images, num_images, num_images / NumStages + 2, LimitImages[4]); | 133 | Extract(base_images, num_images, std::max(4U, num_images / NumStages), LimitImages[4]); |
| 129 | bindings[0].image = | 134 | |
| 130 | Extract(base_images, num_images, num_images / NumStages + 1, LimitImages[0]); | 135 | // This is guaranteed to be at least 1. |
| 136 | const u32 total_extracted_images = num_images / (NumStages - 1); | ||
| 131 | 137 | ||
| 132 | // Reserve the other image bindings. | 138 | // Reserve the other image bindings. |
| 133 | const u32 total_extracted_images = num_images / (NumStages - 2); | 139 | for (std::size_t i = 0; i < NumStages; ++i) { |
| 134 | for (std::size_t i = 2; i < NumStages; ++i) { | ||
| 135 | const std::size_t stage = stage_swizzle[i]; | 140 | const std::size_t stage = stage_swizzle[i]; |
| 141 | if (stage == 4) { | ||
| 142 | continue; | ||
| 143 | } | ||
| 136 | bindings[stage].image = | 144 | bindings[stage].image = |
| 137 | Extract(base_images, num_images, total_extracted_images, LimitImages[stage]); | 145 | Extract(base_images, num_images, total_extracted_images, LimitImages[stage]); |
| 138 | } | 146 | } |
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 994ae98eb..35e329240 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h | |||
| @@ -46,10 +46,8 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) { | |||
| 46 | return GL_UNSIGNED_INT; | 46 | return GL_UNSIGNED_INT; |
| 47 | case Maxwell::VertexAttribute::Size::Size_10_10_10_2: | 47 | case Maxwell::VertexAttribute::Size::Size_10_10_10_2: |
| 48 | return GL_UNSIGNED_INT_2_10_10_10_REV; | 48 | return GL_UNSIGNED_INT_2_10_10_10_REV; |
| 49 | default: | ||
| 50 | LOG_ERROR(Render_OpenGL, "Unimplemented vertex size={}", attrib.SizeString()); | ||
| 51 | return {}; | ||
| 52 | } | 49 | } |
| 50 | break; | ||
| 53 | case Maxwell::VertexAttribute::Type::SignedInt: | 51 | case Maxwell::VertexAttribute::Type::SignedInt: |
| 54 | case Maxwell::VertexAttribute::Type::SignedNorm: | 52 | case Maxwell::VertexAttribute::Type::SignedNorm: |
| 55 | switch (attrib.size) { | 53 | switch (attrib.size) { |
| @@ -70,10 +68,8 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) { | |||
| 70 | return GL_INT; | 68 | return GL_INT; |
| 71 | case Maxwell::VertexAttribute::Size::Size_10_10_10_2: | 69 | case Maxwell::VertexAttribute::Size::Size_10_10_10_2: |
| 72 | return GL_INT_2_10_10_10_REV; | 70 | return GL_INT_2_10_10_10_REV; |
| 73 | default: | ||
| 74 | LOG_ERROR(Render_OpenGL, "Unimplemented vertex size={}", attrib.SizeString()); | ||
| 75 | return {}; | ||
| 76 | } | 71 | } |
| 72 | break; | ||
| 77 | case Maxwell::VertexAttribute::Type::Float: | 73 | case Maxwell::VertexAttribute::Type::Float: |
| 78 | switch (attrib.size) { | 74 | switch (attrib.size) { |
| 79 | case Maxwell::VertexAttribute::Size::Size_16: | 75 | case Maxwell::VertexAttribute::Size::Size_16: |
| @@ -86,10 +82,8 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) { | |||
| 86 | case Maxwell::VertexAttribute::Size::Size_32_32_32: | 82 | case Maxwell::VertexAttribute::Size::Size_32_32_32: |
| 87 | case Maxwell::VertexAttribute::Size::Size_32_32_32_32: | 83 | case Maxwell::VertexAttribute::Size::Size_32_32_32_32: |
| 88 | return GL_FLOAT; | 84 | return GL_FLOAT; |
| 89 | default: | ||
| 90 | LOG_ERROR(Render_OpenGL, "Unimplemented vertex size={}", attrib.SizeString()); | ||
| 91 | return {}; | ||
| 92 | } | 85 | } |
| 86 | break; | ||
| 93 | case Maxwell::VertexAttribute::Type::UnsignedScaled: | 87 | case Maxwell::VertexAttribute::Type::UnsignedScaled: |
| 94 | switch (attrib.size) { | 88 | switch (attrib.size) { |
| 95 | case Maxwell::VertexAttribute::Size::Size_8: | 89 | case Maxwell::VertexAttribute::Size::Size_8: |
| @@ -102,10 +96,8 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) { | |||
| 102 | case Maxwell::VertexAttribute::Size::Size_16_16_16: | 96 | case Maxwell::VertexAttribute::Size::Size_16_16_16: |
| 103 | case Maxwell::VertexAttribute::Size::Size_16_16_16_16: | 97 | case Maxwell::VertexAttribute::Size::Size_16_16_16_16: |
| 104 | return GL_UNSIGNED_SHORT; | 98 | return GL_UNSIGNED_SHORT; |
| 105 | default: | ||
| 106 | LOG_ERROR(Render_OpenGL, "Unimplemented vertex size={}", attrib.SizeString()); | ||
| 107 | return {}; | ||
| 108 | } | 99 | } |
| 100 | break; | ||
| 109 | case Maxwell::VertexAttribute::Type::SignedScaled: | 101 | case Maxwell::VertexAttribute::Type::SignedScaled: |
| 110 | switch (attrib.size) { | 102 | switch (attrib.size) { |
| 111 | case Maxwell::VertexAttribute::Size::Size_8: | 103 | case Maxwell::VertexAttribute::Size::Size_8: |
| @@ -118,14 +110,12 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) { | |||
| 118 | case Maxwell::VertexAttribute::Size::Size_16_16_16: | 110 | case Maxwell::VertexAttribute::Size::Size_16_16_16: |
| 119 | case Maxwell::VertexAttribute::Size::Size_16_16_16_16: | 111 | case Maxwell::VertexAttribute::Size::Size_16_16_16_16: |
| 120 | return GL_SHORT; | 112 | return GL_SHORT; |
| 121 | default: | ||
| 122 | LOG_ERROR(Render_OpenGL, "Unimplemented vertex size={}", attrib.SizeString()); | ||
| 123 | return {}; | ||
| 124 | } | 113 | } |
| 125 | default: | 114 | break; |
| 126 | LOG_ERROR(Render_OpenGL, "Unimplemented vertex type={}", attrib.TypeString()); | ||
| 127 | return {}; | ||
| 128 | } | 115 | } |
| 116 | UNIMPLEMENTED_MSG("Unimplemented vertex type={} and size={}", attrib.TypeString(), | ||
| 117 | attrib.SizeString()); | ||
| 118 | return {}; | ||
| 129 | } | 119 | } |
| 130 | 120 | ||
| 131 | inline GLenum IndexFormat(Maxwell::IndexFormat index_format) { | 121 | inline GLenum IndexFormat(Maxwell::IndexFormat index_format) { |
| @@ -137,8 +127,7 @@ inline GLenum IndexFormat(Maxwell::IndexFormat index_format) { | |||
| 137 | case Maxwell::IndexFormat::UnsignedInt: | 127 | case Maxwell::IndexFormat::UnsignedInt: |
| 138 | return GL_UNSIGNED_INT; | 128 | return GL_UNSIGNED_INT; |
| 139 | } | 129 | } |
| 140 | LOG_CRITICAL(Render_OpenGL, "Unimplemented index_format={}", static_cast<u32>(index_format)); | 130 | UNREACHABLE_MSG("Invalid index_format={}", static_cast<u32>(index_format)); |
| 141 | UNREACHABLE(); | ||
| 142 | return {}; | 131 | return {}; |
| 143 | } | 132 | } |
| 144 | 133 | ||
| @@ -180,33 +169,32 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) { | |||
| 180 | } | 169 | } |
| 181 | 170 | ||
| 182 | inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, | 171 | inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, |
| 183 | Tegra::Texture::TextureMipmapFilter mip_filter_mode) { | 172 | Tegra::Texture::TextureMipmapFilter mipmap_filter_mode) { |
| 184 | switch (filter_mode) { | 173 | switch (filter_mode) { |
| 185 | case Tegra::Texture::TextureFilter::Linear: { | 174 | case Tegra::Texture::TextureFilter::Nearest: |
| 186 | switch (mip_filter_mode) { | 175 | switch (mipmap_filter_mode) { |
| 187 | case Tegra::Texture::TextureMipmapFilter::None: | 176 | case Tegra::Texture::TextureMipmapFilter::None: |
| 188 | return GL_LINEAR; | 177 | return GL_NEAREST; |
| 189 | case Tegra::Texture::TextureMipmapFilter::Nearest: | 178 | case Tegra::Texture::TextureMipmapFilter::Nearest: |
| 190 | return GL_LINEAR_MIPMAP_NEAREST; | 179 | return GL_NEAREST_MIPMAP_NEAREST; |
| 191 | case Tegra::Texture::TextureMipmapFilter::Linear: | 180 | case Tegra::Texture::TextureMipmapFilter::Linear: |
| 192 | return GL_LINEAR_MIPMAP_LINEAR; | 181 | return GL_NEAREST_MIPMAP_LINEAR; |
| 193 | } | 182 | } |
| 194 | break; | 183 | break; |
| 195 | } | 184 | case Tegra::Texture::TextureFilter::Linear: |
| 196 | case Tegra::Texture::TextureFilter::Nearest: { | 185 | switch (mipmap_filter_mode) { |
| 197 | switch (mip_filter_mode) { | ||
| 198 | case Tegra::Texture::TextureMipmapFilter::None: | 186 | case Tegra::Texture::TextureMipmapFilter::None: |
| 199 | return GL_NEAREST; | 187 | return GL_LINEAR; |
| 200 | case Tegra::Texture::TextureMipmapFilter::Nearest: | 188 | case Tegra::Texture::TextureMipmapFilter::Nearest: |
| 201 | return GL_NEAREST_MIPMAP_NEAREST; | 189 | return GL_LINEAR_MIPMAP_NEAREST; |
| 202 | case Tegra::Texture::TextureMipmapFilter::Linear: | 190 | case Tegra::Texture::TextureMipmapFilter::Linear: |
| 203 | return GL_NEAREST_MIPMAP_LINEAR; | 191 | return GL_LINEAR_MIPMAP_LINEAR; |
| 204 | } | 192 | } |
| 205 | break; | 193 | break; |
| 206 | } | 194 | } |
| 207 | } | 195 | UNREACHABLE_MSG("Invalid texture filter mode={} and mipmap filter mode={}", |
| 208 | LOG_ERROR(Render_OpenGL, "Unimplemented texture filter mode={}", static_cast<u32>(filter_mode)); | 196 | static_cast<u32>(filter_mode), static_cast<u32>(mipmap_filter_mode)); |
| 209 | return GL_LINEAR; | 197 | return GL_NEAREST; |
| 210 | } | 198 | } |
| 211 | 199 | ||
| 212 | inline GLenum WrapMode(Tegra::Texture::WrapMode wrap_mode) { | 200 | inline GLenum WrapMode(Tegra::Texture::WrapMode wrap_mode) { |
| @@ -229,10 +217,9 @@ inline GLenum WrapMode(Tegra::Texture::WrapMode wrap_mode) { | |||
| 229 | } else { | 217 | } else { |
| 230 | return GL_MIRROR_CLAMP_TO_EDGE; | 218 | return GL_MIRROR_CLAMP_TO_EDGE; |
| 231 | } | 219 | } |
| 232 | default: | ||
| 233 | LOG_ERROR(Render_OpenGL, "Unimplemented texture wrap mode={}", static_cast<u32>(wrap_mode)); | ||
| 234 | return GL_REPEAT; | ||
| 235 | } | 220 | } |
| 221 | UNIMPLEMENTED_MSG("Unimplemented texture wrap mode={}", static_cast<u32>(wrap_mode)); | ||
| 222 | return GL_REPEAT; | ||
| 236 | } | 223 | } |
| 237 | 224 | ||
| 238 | inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) { | 225 | inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) { |
| @@ -254,8 +241,7 @@ inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) { | |||
| 254 | case Tegra::Texture::DepthCompareFunc::Always: | 241 | case Tegra::Texture::DepthCompareFunc::Always: |
| 255 | return GL_ALWAYS; | 242 | return GL_ALWAYS; |
| 256 | } | 243 | } |
| 257 | LOG_ERROR(Render_OpenGL, "Unimplemented texture depth compare function ={}", | 244 | UNIMPLEMENTED_MSG("Unimplemented texture depth compare function={}", static_cast<u32>(func)); |
| 258 | static_cast<u32>(func)); | ||
| 259 | return GL_GREATER; | 245 | return GL_GREATER; |
| 260 | } | 246 | } |
| 261 | 247 | ||
| @@ -277,7 +263,7 @@ inline GLenum BlendEquation(Maxwell::Blend::Equation equation) { | |||
| 277 | case Maxwell::Blend::Equation::MaxGL: | 263 | case Maxwell::Blend::Equation::MaxGL: |
| 278 | return GL_MAX; | 264 | return GL_MAX; |
| 279 | } | 265 | } |
| 280 | LOG_ERROR(Render_OpenGL, "Unimplemented blend equation={}", static_cast<u32>(equation)); | 266 | UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation)); |
| 281 | return GL_FUNC_ADD; | 267 | return GL_FUNC_ADD; |
| 282 | } | 268 | } |
| 283 | 269 | ||
| @@ -341,7 +327,7 @@ inline GLenum BlendFunc(Maxwell::Blend::Factor factor) { | |||
| 341 | case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: | 327 | case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: |
| 342 | return GL_ONE_MINUS_CONSTANT_ALPHA; | 328 | return GL_ONE_MINUS_CONSTANT_ALPHA; |
| 343 | } | 329 | } |
| 344 | LOG_ERROR(Render_OpenGL, "Unimplemented blend factor={}", static_cast<u32>(factor)); | 330 | UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor)); |
| 345 | return GL_ZERO; | 331 | return GL_ZERO; |
| 346 | } | 332 | } |
| 347 | 333 | ||
| @@ -361,7 +347,7 @@ inline GLenum SwizzleSource(Tegra::Texture::SwizzleSource source) { | |||
| 361 | case Tegra::Texture::SwizzleSource::OneFloat: | 347 | case Tegra::Texture::SwizzleSource::OneFloat: |
| 362 | return GL_ONE; | 348 | return GL_ONE; |
| 363 | } | 349 | } |
| 364 | LOG_ERROR(Render_OpenGL, "Unimplemented swizzle source={}", static_cast<u32>(source)); | 350 | UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(source)); |
| 365 | return GL_ZERO; | 351 | return GL_ZERO; |
| 366 | } | 352 | } |
| 367 | 353 | ||
| @@ -392,7 +378,7 @@ inline GLenum ComparisonOp(Maxwell::ComparisonOp comparison) { | |||
| 392 | case Maxwell::ComparisonOp::AlwaysOld: | 378 | case Maxwell::ComparisonOp::AlwaysOld: |
| 393 | return GL_ALWAYS; | 379 | return GL_ALWAYS; |
| 394 | } | 380 | } |
| 395 | LOG_ERROR(Render_OpenGL, "Unimplemented comparison op={}", static_cast<u32>(comparison)); | 381 | UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison)); |
| 396 | return GL_ALWAYS; | 382 | return GL_ALWAYS; |
| 397 | } | 383 | } |
| 398 | 384 | ||
| @@ -423,7 +409,7 @@ inline GLenum StencilOp(Maxwell::StencilOp stencil) { | |||
| 423 | case Maxwell::StencilOp::DecrWrapOGL: | 409 | case Maxwell::StencilOp::DecrWrapOGL: |
| 424 | return GL_DECR_WRAP; | 410 | return GL_DECR_WRAP; |
| 425 | } | 411 | } |
| 426 | LOG_ERROR(Render_OpenGL, "Unimplemented stencil op={}", static_cast<u32>(stencil)); | 412 | UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil)); |
| 427 | return GL_KEEP; | 413 | return GL_KEEP; |
| 428 | } | 414 | } |
| 429 | 415 | ||
| @@ -434,7 +420,7 @@ inline GLenum FrontFace(Maxwell::FrontFace front_face) { | |||
| 434 | case Maxwell::FrontFace::CounterClockWise: | 420 | case Maxwell::FrontFace::CounterClockWise: |
| 435 | return GL_CCW; | 421 | return GL_CCW; |
| 436 | } | 422 | } |
| 437 | LOG_ERROR(Render_OpenGL, "Unimplemented front face cull={}", static_cast<u32>(front_face)); | 423 | UNIMPLEMENTED_MSG("Unimplemented front face cull={}", static_cast<u32>(front_face)); |
| 438 | return GL_CCW; | 424 | return GL_CCW; |
| 439 | } | 425 | } |
| 440 | 426 | ||
| @@ -447,7 +433,7 @@ inline GLenum CullFace(Maxwell::CullFace cull_face) { | |||
| 447 | case Maxwell::CullFace::FrontAndBack: | 433 | case Maxwell::CullFace::FrontAndBack: |
| 448 | return GL_FRONT_AND_BACK; | 434 | return GL_FRONT_AND_BACK; |
| 449 | } | 435 | } |
| 450 | LOG_ERROR(Render_OpenGL, "Unimplemented cull face={}", static_cast<u32>(cull_face)); | 436 | UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face)); |
| 451 | return GL_BACK; | 437 | return GL_BACK; |
| 452 | } | 438 | } |
| 453 | 439 | ||
| @@ -486,7 +472,7 @@ inline GLenum LogicOp(Maxwell::LogicOperation operation) { | |||
| 486 | case Maxwell::LogicOperation::Set: | 472 | case Maxwell::LogicOperation::Set: |
| 487 | return GL_SET; | 473 | return GL_SET; |
| 488 | } | 474 | } |
| 489 | LOG_ERROR(Render_OpenGL, "Unimplemented logic operation={}", static_cast<u32>(operation)); | 475 | UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast<u32>(operation)); |
| 490 | return GL_COPY; | 476 | return GL_COPY; |
| 491 | } | 477 | } |
| 492 | 478 | ||
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 62e950d31..1f2b6734b 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp | |||
| @@ -21,29 +21,29 @@ namespace Sampler { | |||
| 21 | 21 | ||
| 22 | VkFilter Filter(Tegra::Texture::TextureFilter filter) { | 22 | VkFilter Filter(Tegra::Texture::TextureFilter filter) { |
| 23 | switch (filter) { | 23 | switch (filter) { |
| 24 | case Tegra::Texture::TextureFilter::Linear: | ||
| 25 | return VK_FILTER_LINEAR; | ||
| 26 | case Tegra::Texture::TextureFilter::Nearest: | 24 | case Tegra::Texture::TextureFilter::Nearest: |
| 27 | return VK_FILTER_NEAREST; | 25 | return VK_FILTER_NEAREST; |
| 26 | case Tegra::Texture::TextureFilter::Linear: | ||
| 27 | return VK_FILTER_LINEAR; | ||
| 28 | } | 28 | } |
| 29 | UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter)); | 29 | UNREACHABLE_MSG("Invalid sampler filter={}", static_cast<u32>(filter)); |
| 30 | return {}; | 30 | return {}; |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) { | 33 | VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) { |
| 34 | switch (mipmap_filter) { | 34 | switch (mipmap_filter) { |
| 35 | case Tegra::Texture::TextureMipmapFilter::None: | 35 | case Tegra::Texture::TextureMipmapFilter::None: |
| 36 | // TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping | 36 | // There are no Vulkan filter modes that directly correspond to OpenGL minification filters |
| 37 | // (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to | 37 | // of GL_LINEAR or GL_NEAREST, but they can be emulated using |
| 38 | // use an image view with a single mipmap level to emulate this. | 38 | // VK_SAMPLER_MIPMAP_MODE_NEAREST, minLod = 0, and maxLod = 0.25, and using minFilter = |
| 39 | return VK_SAMPLER_MIPMAP_MODE_LINEAR; | 39 | // VK_FILTER_LINEAR or minFilter = VK_FILTER_NEAREST, respectively. |
| 40 | ; | 40 | return VK_SAMPLER_MIPMAP_MODE_NEAREST; |
| 41 | case Tegra::Texture::TextureMipmapFilter::Linear: | ||
| 42 | return VK_SAMPLER_MIPMAP_MODE_LINEAR; | ||
| 43 | case Tegra::Texture::TextureMipmapFilter::Nearest: | 41 | case Tegra::Texture::TextureMipmapFilter::Nearest: |
| 44 | return VK_SAMPLER_MIPMAP_MODE_NEAREST; | 42 | return VK_SAMPLER_MIPMAP_MODE_NEAREST; |
| 43 | case Tegra::Texture::TextureMipmapFilter::Linear: | ||
| 44 | return VK_SAMPLER_MIPMAP_MODE_LINEAR; | ||
| 45 | } | 45 | } |
| 46 | UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter)); | 46 | UNREACHABLE_MSG("Invalid sampler mipmap mode={}", static_cast<u32>(mipmap_filter)); |
| 47 | return {}; | 47 | return {}; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| @@ -78,10 +78,9 @@ VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode w | |||
| 78 | case Tegra::Texture::WrapMode::MirrorOnceBorder: | 78 | case Tegra::Texture::WrapMode::MirrorOnceBorder: |
| 79 | UNIMPLEMENTED(); | 79 | UNIMPLEMENTED(); |
| 80 | return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; | 80 | return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; |
| 81 | default: | ||
| 82 | UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); | ||
| 83 | return {}; | ||
| 84 | } | 81 | } |
| 82 | UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); | ||
| 83 | return {}; | ||
| 85 | } | 84 | } |
| 86 | 85 | ||
| 87 | VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) { | 86 | VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) { |
| @@ -288,10 +287,9 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device, | |||
| 288 | return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; | 287 | return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; |
| 289 | case Maxwell::PrimitiveTopology::Patches: | 288 | case Maxwell::PrimitiveTopology::Patches: |
| 290 | return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; | 289 | return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; |
| 291 | default: | ||
| 292 | UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); | ||
| 293 | return {}; | ||
| 294 | } | 290 | } |
| 291 | UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); | ||
| 292 | return {}; | ||
| 295 | } | 293 | } |
| 296 | 294 | ||
| 297 | VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { | 295 | VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { |
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp index e6f2fa553..616eacc36 100644 --- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include "video_core/renderer_vulkan/wrapper.h" | 9 | #include "video_core/renderer_vulkan/wrapper.h" |
| 10 | #include "video_core/textures/texture.h" | 10 | #include "video_core/textures/texture.h" |
| 11 | 11 | ||
| 12 | using Tegra::Texture::TextureMipmapFilter; | ||
| 13 | |||
| 12 | namespace Vulkan { | 14 | namespace Vulkan { |
| 13 | 15 | ||
| 14 | namespace { | 16 | namespace { |
| @@ -63,8 +65,8 @@ vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) c | |||
| 63 | ci.maxAnisotropy = tsc.GetMaxAnisotropy(); | 65 | ci.maxAnisotropy = tsc.GetMaxAnisotropy(); |
| 64 | ci.compareEnable = tsc.depth_compare_enabled; | 66 | ci.compareEnable = tsc.depth_compare_enabled; |
| 65 | ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func); | 67 | ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func); |
| 66 | ci.minLod = tsc.GetMinLod(); | 68 | ci.minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.GetMinLod(); |
| 67 | ci.maxLod = tsc.GetMaxLod(); | 69 | ci.maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.GetMaxLod(); |
| 68 | ci.borderColor = arbitrary_borders ? VK_BORDER_COLOR_INT_CUSTOM_EXT : ConvertBorderColor(color); | 70 | ci.borderColor = arbitrary_borders ? VK_BORDER_COLOR_INT_CUSTOM_EXT : ConvertBorderColor(color); |
| 69 | ci.unnormalizedCoordinates = VK_FALSE; | 71 | ci.unnormalizedCoordinates = VK_FALSE; |
| 70 | return device.GetLogical().CreateSampler(ci); | 72 | return device.GetLogical().CreateSampler(ci); |