diff options
Diffstat (limited to 'src/core/hw')
| -rw-r--r-- | src/core/hw/aes/arithmetic128.cpp | 47 | ||||
| -rw-r--r-- | src/core/hw/aes/arithmetic128.h | 17 | ||||
| -rw-r--r-- | src/core/hw/aes/ccm.h | 40 | ||||
| -rw-r--r-- | src/core/hw/aes/key.cpp | 173 | ||||
| -rw-r--r-- | src/core/hw/aes/key.h | 37 | ||||
| -rw-r--r-- | src/core/hw/gpu.cpp | 573 | ||||
| -rw-r--r-- | src/core/hw/gpu.h | 334 |
7 files changed, 0 insertions, 1221 deletions
diff --git a/src/core/hw/aes/arithmetic128.cpp b/src/core/hw/aes/arithmetic128.cpp deleted file mode 100644 index 55b954a52..000000000 --- a/src/core/hw/aes/arithmetic128.cpp +++ /dev/null | |||
| @@ -1,47 +0,0 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <functional> | ||
| 7 | #include "core/hw/aes/arithmetic128.h" | ||
| 8 | |||
| 9 | namespace HW { | ||
| 10 | namespace AES { | ||
| 11 | |||
| 12 | AESKey Lrot128(const AESKey& in, u32 rot) { | ||
| 13 | AESKey out; | ||
| 14 | rot %= 128; | ||
| 15 | const u32 byte_shift = rot / 8; | ||
| 16 | const u32 bit_shift = rot % 8; | ||
| 17 | |||
| 18 | for (u32 i = 0; i < 16; i++) { | ||
| 19 | const u32 wrap_index_a = (i + byte_shift) % 16; | ||
| 20 | const u32 wrap_index_b = (i + byte_shift + 1) % 16; | ||
| 21 | out[i] = ((in[wrap_index_a] << bit_shift) | (in[wrap_index_b] >> (8 - bit_shift))) & 0xFF; | ||
| 22 | } | ||
| 23 | return out; | ||
| 24 | } | ||
| 25 | |||
| 26 | AESKey Add128(const AESKey& a, const AESKey& b) { | ||
| 27 | AESKey out; | ||
| 28 | u32 carry = 0; | ||
| 29 | u32 sum = 0; | ||
| 30 | |||
| 31 | for (int i = 15; i >= 0; i--) { | ||
| 32 | sum = a[i] + b[i] + carry; | ||
| 33 | carry = sum >> 8; | ||
| 34 | out[i] = static_cast<u8>(sum & 0xff); | ||
| 35 | } | ||
| 36 | |||
| 37 | return out; | ||
| 38 | } | ||
| 39 | |||
| 40 | AESKey Xor128(const AESKey& a, const AESKey& b) { | ||
| 41 | AESKey out; | ||
| 42 | std::transform(a.cbegin(), a.cend(), b.cbegin(), out.begin(), std::bit_xor<>()); | ||
| 43 | return out; | ||
| 44 | } | ||
| 45 | |||
| 46 | } // namespace AES | ||
| 47 | } // namespace HW | ||
diff --git a/src/core/hw/aes/arithmetic128.h b/src/core/hw/aes/arithmetic128.h deleted file mode 100644 index d670e2ce2..000000000 --- a/src/core/hw/aes/arithmetic128.h +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "core/hw/aes/key.h" | ||
| 9 | |||
| 10 | namespace HW { | ||
| 11 | namespace AES { | ||
| 12 | AESKey Lrot128(const AESKey& in, u32 rot); | ||
| 13 | AESKey Add128(const AESKey& a, const AESKey& b); | ||
| 14 | AESKey Xor128(const AESKey& a, const AESKey& b); | ||
| 15 | |||
| 16 | } // namspace AES | ||
| 17 | } // namespace HW | ||
diff --git a/src/core/hw/aes/ccm.h b/src/core/hw/aes/ccm.h deleted file mode 100644 index bf4146e80..000000000 --- a/src/core/hw/aes/ccm.h +++ /dev/null | |||
| @@ -1,40 +0,0 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | #include <cstddef> | ||
| 9 | #include <vector> | ||
| 10 | #include "common/common_types.h" | ||
| 11 | |||
| 12 | namespace HW { | ||
| 13 | namespace AES { | ||
| 14 | |||
| 15 | constexpr size_t CCM_NONCE_SIZE = 12; | ||
| 16 | constexpr size_t CCM_MAC_SIZE = 16; | ||
| 17 | |||
| 18 | using CCMNonce = std::array<u8, CCM_NONCE_SIZE>; | ||
| 19 | |||
| 20 | /** | ||
| 21 | * Encrypts and adds a MAC to the given data using AES-CCM algorithm. | ||
| 22 | * @param pdata The plain text data to encrypt | ||
| 23 | * @param nonce The nonce data to use for encryption | ||
| 24 | * @param slot_id The slot ID of the key to use for encryption | ||
| 25 | * @returns a vector of u8 containing the encrypted data with MAC at the end | ||
| 26 | */ | ||
| 27 | std::vector<u8> EncryptSignCCM(const std::vector<u8>& pdata, const CCMNonce& nonce, size_t slot_id); | ||
| 28 | |||
| 29 | /** | ||
| 30 | * Decrypts and verify the MAC of the given data using AES-CCM algorithm. | ||
| 31 | * @param cipher The cipher text data to decrypt, with MAC at the end to verify | ||
| 32 | * @param nonce The nonce data to use for decryption | ||
| 33 | * @param slot_id The slot ID of the key to use for decryption | ||
| 34 | * @returns a vector of u8 containing the decrypted data; an empty vector if the verification fails | ||
| 35 | */ | ||
| 36 | std::vector<u8> DecryptVerifyCCM(const std::vector<u8>& cipher, const CCMNonce& nonce, | ||
| 37 | size_t slot_id); | ||
| 38 | |||
| 39 | } // namespace AES | ||
| 40 | } // namespace HW | ||
diff --git a/src/core/hw/aes/key.cpp b/src/core/hw/aes/key.cpp deleted file mode 100644 index 4e8a8a59a..000000000 --- a/src/core/hw/aes/key.cpp +++ /dev/null | |||
| @@ -1,173 +0,0 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <exception> | ||
| 7 | #include <sstream> | ||
| 8 | #include <boost/optional.hpp> | ||
| 9 | #include "common/common_paths.h" | ||
| 10 | #include "common/file_util.h" | ||
| 11 | #include "common/logging/log.h" | ||
| 12 | #include "common/string_util.h" | ||
| 13 | #include "core/hw/aes/arithmetic128.h" | ||
| 14 | #include "core/hw/aes/key.h" | ||
| 15 | |||
| 16 | namespace HW { | ||
| 17 | namespace AES { | ||
| 18 | |||
| 19 | namespace { | ||
| 20 | |||
| 21 | boost::optional<AESKey> generator_constant; | ||
| 22 | |||
| 23 | struct KeySlot { | ||
| 24 | boost::optional<AESKey> x; | ||
| 25 | boost::optional<AESKey> y; | ||
| 26 | boost::optional<AESKey> normal; | ||
| 27 | |||
| 28 | void SetKeyX(const AESKey& key) { | ||
| 29 | x = key; | ||
| 30 | if (y && generator_constant) { | ||
| 31 | GenerateNormalKey(); | ||
| 32 | } | ||
| 33 | } | ||
| 34 | |||
| 35 | void SetKeyY(const AESKey& key) { | ||
| 36 | y = key; | ||
| 37 | if (x && generator_constant) { | ||
| 38 | GenerateNormalKey(); | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | void SetNormalKey(const AESKey& key) { | ||
| 43 | normal = key; | ||
| 44 | } | ||
| 45 | |||
| 46 | void GenerateNormalKey() { | ||
| 47 | normal = Lrot128(Add128(Xor128(Lrot128(*x, 2), *y), *generator_constant), 87); | ||
| 48 | } | ||
| 49 | |||
| 50 | void Clear() { | ||
| 51 | x.reset(); | ||
| 52 | y.reset(); | ||
| 53 | normal.reset(); | ||
| 54 | } | ||
| 55 | }; | ||
| 56 | |||
| 57 | std::array<KeySlot, KeySlotID::MaxKeySlotID> key_slots; | ||
| 58 | |||
| 59 | void ClearAllKeys() { | ||
| 60 | for (KeySlot& slot : key_slots) { | ||
| 61 | slot.Clear(); | ||
| 62 | } | ||
| 63 | generator_constant.reset(); | ||
| 64 | } | ||
| 65 | |||
| 66 | AESKey HexToKey(const std::string& hex) { | ||
| 67 | if (hex.size() < 32) { | ||
| 68 | throw std::invalid_argument("hex string is too short"); | ||
| 69 | } | ||
| 70 | |||
| 71 | AESKey key; | ||
| 72 | for (size_t i = 0; i < key.size(); ++i) { | ||
| 73 | key[i] = static_cast<u8>(std::stoi(hex.substr(i * 2, 2), 0, 16)); | ||
| 74 | } | ||
| 75 | |||
| 76 | return key; | ||
| 77 | } | ||
| 78 | |||
| 79 | void LoadPresetKeys() { | ||
| 80 | const std::string filepath = FileUtil::GetUserPath(D_SYSDATA_IDX) + AES_KEYS; | ||
| 81 | FileUtil::CreateFullPath(filepath); // Create path if not already created | ||
| 82 | std::ifstream file; | ||
| 83 | OpenFStream(file, filepath, std::ios_base::in); | ||
| 84 | if (!file) { | ||
| 85 | return; | ||
| 86 | } | ||
| 87 | |||
| 88 | while (!file.eof()) { | ||
| 89 | std::string line; | ||
| 90 | std::getline(file, line); | ||
| 91 | std::vector<std::string> parts; | ||
| 92 | Common::SplitString(line, '=', parts); | ||
| 93 | if (parts.size() != 2) { | ||
| 94 | LOG_ERROR(HW_AES, "Failed to parse %s", line.c_str()); | ||
| 95 | continue; | ||
| 96 | } | ||
| 97 | |||
| 98 | const std::string& name = parts[0]; | ||
| 99 | AESKey key; | ||
| 100 | try { | ||
| 101 | key = HexToKey(parts[1]); | ||
| 102 | } catch (const std::logic_error& e) { | ||
| 103 | LOG_ERROR(HW_AES, "Invalid key %s: %s", parts[1].c_str(), e.what()); | ||
| 104 | continue; | ||
| 105 | } | ||
| 106 | |||
| 107 | if (name == "generator") { | ||
| 108 | generator_constant = key; | ||
| 109 | continue; | ||
| 110 | } | ||
| 111 | |||
| 112 | size_t slot_id; | ||
| 113 | char key_type; | ||
| 114 | if (std::sscanf(name.c_str(), "slot0x%zXKey%c", &slot_id, &key_type) != 2) { | ||
| 115 | LOG_ERROR(HW_AES, "Invalid key name %s", name.c_str()); | ||
| 116 | continue; | ||
| 117 | } | ||
| 118 | |||
| 119 | if (slot_id >= MaxKeySlotID) { | ||
| 120 | LOG_ERROR(HW_AES, "Out of range slot ID 0x%zX", slot_id); | ||
| 121 | continue; | ||
| 122 | } | ||
| 123 | |||
| 124 | switch (key_type) { | ||
| 125 | case 'X': | ||
| 126 | key_slots.at(slot_id).SetKeyX(key); | ||
| 127 | break; | ||
| 128 | case 'Y': | ||
| 129 | key_slots.at(slot_id).SetKeyY(key); | ||
| 130 | break; | ||
| 131 | case 'N': | ||
| 132 | key_slots.at(slot_id).SetNormalKey(key); | ||
| 133 | break; | ||
| 134 | default: | ||
| 135 | LOG_ERROR(HW_AES, "Invalid key type %c", key_type); | ||
| 136 | break; | ||
| 137 | } | ||
| 138 | } | ||
| 139 | } | ||
| 140 | |||
| 141 | } // namespace | ||
| 142 | |||
| 143 | void InitKeys() { | ||
| 144 | ClearAllKeys(); | ||
| 145 | LoadPresetKeys(); | ||
| 146 | } | ||
| 147 | |||
| 148 | void SetGeneratorConstant(const AESKey& key) { | ||
| 149 | generator_constant = key; | ||
| 150 | } | ||
| 151 | |||
| 152 | void SetKeyX(size_t slot_id, const AESKey& key) { | ||
| 153 | key_slots.at(slot_id).SetKeyX(key); | ||
| 154 | } | ||
| 155 | |||
| 156 | void SetKeyY(size_t slot_id, const AESKey& key) { | ||
| 157 | key_slots.at(slot_id).SetKeyY(key); | ||
| 158 | } | ||
| 159 | |||
| 160 | void SetNormalKey(size_t slot_id, const AESKey& key) { | ||
| 161 | key_slots.at(slot_id).SetNormalKey(key); | ||
| 162 | } | ||
| 163 | |||
| 164 | bool IsNormalKeyAvailable(size_t slot_id) { | ||
| 165 | return key_slots.at(slot_id).normal.is_initialized(); | ||
| 166 | } | ||
| 167 | |||
| 168 | AESKey GetNormalKey(size_t slot_id) { | ||
| 169 | return key_slots.at(slot_id).normal.value_or(AESKey{}); | ||
| 170 | } | ||
| 171 | |||
| 172 | } // namespace AES | ||
| 173 | } // namespace HW | ||
diff --git a/src/core/hw/aes/key.h b/src/core/hw/aes/key.h deleted file mode 100644 index c9f1342f4..000000000 --- a/src/core/hw/aes/key.h +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | #include <cstddef> | ||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | namespace HW { | ||
| 12 | namespace AES { | ||
| 13 | |||
| 14 | enum KeySlotID : size_t { | ||
| 15 | // AES Keyslot used to generate the UDS data frame CCMP key. | ||
| 16 | UDSDataKey = 0x2D, | ||
| 17 | APTWrap = 0x31, | ||
| 18 | |||
| 19 | MaxKeySlotID = 0x40, | ||
| 20 | }; | ||
| 21 | |||
| 22 | constexpr size_t AES_BLOCK_SIZE = 16; | ||
| 23 | |||
| 24 | using AESKey = std::array<u8, AES_BLOCK_SIZE>; | ||
| 25 | |||
| 26 | void InitKeys(); | ||
| 27 | |||
| 28 | void SetGeneratorConstant(const AESKey& key); | ||
| 29 | void SetKeyX(size_t slot_id, const AESKey& key); | ||
| 30 | void SetKeyY(size_t slot_id, const AESKey& key); | ||
| 31 | void SetNormalKey(size_t slot_id, const AESKey& key); | ||
| 32 | |||
| 33 | bool IsNormalKeyAvailable(size_t slot_id); | ||
| 34 | AESKey GetNormalKey(size_t slot_id); | ||
| 35 | |||
| 36 | } // namspace AES | ||
| 37 | } // namespace HW | ||
diff --git a/src/core/hw/gpu.cpp b/src/core/hw/gpu.cpp deleted file mode 100644 index 47ab14ae9..000000000 --- a/src/core/hw/gpu.cpp +++ /dev/null | |||
| @@ -1,573 +0,0 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <cstring> | ||
| 6 | #include <numeric> | ||
| 7 | #include <type_traits> | ||
| 8 | #include "common/alignment.h" | ||
| 9 | #include "common/color.h" | ||
| 10 | #include "common/common_types.h" | ||
| 11 | #include "common/logging/log.h" | ||
| 12 | #include "common/microprofile.h" | ||
| 13 | #include "common/vector_math.h" | ||
| 14 | #include "core/core_timing.h" | ||
| 15 | #include "core/hle/service/gsp_gpu.h" | ||
| 16 | #include "core/hw/gpu.h" | ||
| 17 | #include "core/hw/hw.h" | ||
| 18 | #include "core/memory.h" | ||
| 19 | #include "core/tracer/recorder.h" | ||
| 20 | #include "video_core/command_processor.h" | ||
| 21 | #include "video_core/debug_utils/debug_utils.h" | ||
| 22 | #include "video_core/rasterizer_interface.h" | ||
| 23 | #include "video_core/renderer_base.h" | ||
| 24 | #include "video_core/utils.h" | ||
| 25 | #include "video_core/video_core.h" | ||
| 26 | |||
| 27 | namespace GPU { | ||
| 28 | |||
| 29 | Regs g_regs; | ||
| 30 | |||
| 31 | /// 268MHz CPU clocks / 60Hz frames per second | ||
| 32 | const u64 frame_ticks = static_cast<u64>(BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); | ||
| 33 | /// Event id for CoreTiming | ||
| 34 | static CoreTiming::EventType* vblank_event; | ||
| 35 | |||
| 36 | template <typename T> | ||
| 37 | inline void Read(T& var, const u32 raw_addr) { | ||
| 38 | u32 addr = raw_addr - HW::VADDR_GPU; | ||
| 39 | u32 index = addr / 4; | ||
| 40 | |||
| 41 | // Reads other than u32 are untested, so I'd rather have them abort than silently fail | ||
| 42 | if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { | ||
| 43 | LOG_ERROR(HW_GPU, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, addr); | ||
| 44 | return; | ||
| 45 | } | ||
| 46 | |||
| 47 | var = g_regs[addr / 4]; | ||
| 48 | } | ||
| 49 | |||
| 50 | static Math::Vec4<u8> DecodePixel(Regs::PixelFormat input_format, const u8* src_pixel) { | ||
| 51 | switch (input_format) { | ||
| 52 | case Regs::PixelFormat::RGBA8: | ||
| 53 | return Color::DecodeRGBA8(src_pixel); | ||
| 54 | |||
| 55 | case Regs::PixelFormat::RGB8: | ||
| 56 | return Color::DecodeRGB8(src_pixel); | ||
| 57 | |||
| 58 | case Regs::PixelFormat::RGB565: | ||
| 59 | return Color::DecodeRGB565(src_pixel); | ||
| 60 | |||
| 61 | case Regs::PixelFormat::RGB5A1: | ||
| 62 | return Color::DecodeRGB5A1(src_pixel); | ||
| 63 | |||
| 64 | case Regs::PixelFormat::RGBA4: | ||
| 65 | return Color::DecodeRGBA4(src_pixel); | ||
| 66 | |||
| 67 | default: | ||
| 68 | LOG_ERROR(HW_GPU, "Unknown source framebuffer format %x", input_format); | ||
| 69 | return {0, 0, 0, 0}; | ||
| 70 | } | ||
| 71 | } | ||
| 72 | |||
| 73 | MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255)); | ||
| 74 | MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100)); | ||
| 75 | |||
| 76 | static void MemoryFill(const Regs::MemoryFillConfig& config) { | ||
| 77 | const PAddr start_addr = config.GetStartAddress(); | ||
| 78 | const PAddr end_addr = config.GetEndAddress(); | ||
| 79 | |||
| 80 | // TODO: do hwtest with these cases | ||
| 81 | if (!Memory::IsValidPhysicalAddress(start_addr)) { | ||
| 82 | LOG_CRITICAL(HW_GPU, "invalid start address 0x%08X", start_addr); | ||
| 83 | return; | ||
| 84 | } | ||
| 85 | |||
| 86 | if (!Memory::IsValidPhysicalAddress(end_addr)) { | ||
| 87 | LOG_CRITICAL(HW_GPU, "invalid end address 0x%08X", end_addr); | ||
| 88 | return; | ||
| 89 | } | ||
| 90 | |||
| 91 | if (end_addr <= start_addr) { | ||
| 92 | LOG_CRITICAL(HW_GPU, "invalid memory range from 0x%08X to 0x%08X", start_addr, end_addr); | ||
| 93 | return; | ||
| 94 | } | ||
| 95 | |||
| 96 | u8* start = Memory::GetPhysicalPointer(start_addr); | ||
| 97 | u8* end = Memory::GetPhysicalPointer(end_addr); | ||
| 98 | |||
| 99 | // TODO: Consider always accelerating and returning vector of | ||
| 100 | // regions that the accelerated fill did not cover to | ||
| 101 | // reduce/eliminate the fill that the cpu has to do. | ||
| 102 | // This would also mean that the flush below is not needed. | ||
| 103 | // Fill should first flush all surfaces that touch but are | ||
| 104 | // not completely within the fill range. | ||
| 105 | // Then fill all completely covered surfaces, and return the | ||
| 106 | // regions that were between surfaces or within the touching | ||
| 107 | // ones for cpu to manually fill here. | ||
| 108 | if (VideoCore::g_renderer->Rasterizer()->AccelerateFill(config)) | ||
| 109 | return; | ||
| 110 | |||
| 111 | Memory::RasterizerFlushAndInvalidateRegion(config.GetStartAddress(), | ||
| 112 | config.GetEndAddress() - config.GetStartAddress()); | ||
| 113 | |||
| 114 | if (config.fill_24bit) { | ||
| 115 | // fill with 24-bit values | ||
| 116 | for (u8* ptr = start; ptr < end; ptr += 3) { | ||
| 117 | ptr[0] = config.value_24bit_r; | ||
| 118 | ptr[1] = config.value_24bit_g; | ||
| 119 | ptr[2] = config.value_24bit_b; | ||
| 120 | } | ||
| 121 | } else if (config.fill_32bit) { | ||
| 122 | // fill with 32-bit values | ||
| 123 | if (end > start) { | ||
| 124 | u32 value = config.value_32bit; | ||
| 125 | size_t len = (end - start) / sizeof(u32); | ||
| 126 | for (size_t i = 0; i < len; ++i) | ||
| 127 | memcpy(&start[i * sizeof(u32)], &value, sizeof(u32)); | ||
| 128 | } | ||
| 129 | } else { | ||
| 130 | // fill with 16-bit values | ||
| 131 | u16 value_16bit = config.value_16bit.Value(); | ||
| 132 | for (u8* ptr = start; ptr < end; ptr += sizeof(u16)) | ||
| 133 | memcpy(ptr, &value_16bit, sizeof(u16)); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | static void DisplayTransfer(const Regs::DisplayTransferConfig& config) { | ||
| 138 | const PAddr src_addr = config.GetPhysicalInputAddress(); | ||
| 139 | const PAddr dst_addr = config.GetPhysicalOutputAddress(); | ||
| 140 | |||
| 141 | // TODO: do hwtest with these cases | ||
| 142 | if (!Memory::IsValidPhysicalAddress(src_addr)) { | ||
| 143 | LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr); | ||
| 144 | return; | ||
| 145 | } | ||
| 146 | |||
| 147 | if (!Memory::IsValidPhysicalAddress(dst_addr)) { | ||
| 148 | LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr); | ||
| 149 | return; | ||
| 150 | } | ||
| 151 | |||
| 152 | if (config.input_width == 0) { | ||
| 153 | LOG_CRITICAL(HW_GPU, "zero input width"); | ||
| 154 | return; | ||
| 155 | } | ||
| 156 | |||
| 157 | if (config.input_height == 0) { | ||
| 158 | LOG_CRITICAL(HW_GPU, "zero input height"); | ||
| 159 | return; | ||
| 160 | } | ||
| 161 | |||
| 162 | if (config.output_width == 0) { | ||
| 163 | LOG_CRITICAL(HW_GPU, "zero output width"); | ||
| 164 | return; | ||
| 165 | } | ||
| 166 | |||
| 167 | if (config.output_height == 0) { | ||
| 168 | LOG_CRITICAL(HW_GPU, "zero output height"); | ||
| 169 | return; | ||
| 170 | } | ||
| 171 | |||
| 172 | if (VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config)) | ||
| 173 | return; | ||
| 174 | |||
| 175 | u8* src_pointer = Memory::GetPhysicalPointer(src_addr); | ||
| 176 | u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr); | ||
| 177 | |||
| 178 | if (config.scaling > config.ScaleXY) { | ||
| 179 | LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", | ||
| 180 | config.scaling.Value()); | ||
| 181 | UNIMPLEMENTED(); | ||
| 182 | return; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (config.input_linear && config.scaling != config.NoScale) { | ||
| 186 | LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); | ||
| 187 | UNIMPLEMENTED(); | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | |||
| 191 | int horizontal_scale = config.scaling != config.NoScale ? 1 : 0; | ||
| 192 | int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0; | ||
| 193 | |||
| 194 | u32 output_width = config.output_width >> horizontal_scale; | ||
| 195 | u32 output_height = config.output_height >> vertical_scale; | ||
| 196 | |||
| 197 | u32 input_size = | ||
| 198 | config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format); | ||
| 199 | u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); | ||
| 200 | |||
| 201 | Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size); | ||
| 202 | Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), output_size); | ||
| 203 | |||
| 204 | for (u32 y = 0; y < output_height; ++y) { | ||
| 205 | for (u32 x = 0; x < output_width; ++x) { | ||
| 206 | Math::Vec4<u8> src_color; | ||
| 207 | |||
| 208 | // Calculate the [x,y] position of the input image | ||
| 209 | // based on the current output position and the scale | ||
| 210 | u32 input_x = x << horizontal_scale; | ||
| 211 | u32 input_y = y << vertical_scale; | ||
| 212 | |||
| 213 | u32 output_y; | ||
| 214 | if (config.flip_vertically) { | ||
| 215 | // Flip the y value of the output data, | ||
| 216 | // we do this after calculating the [x,y] position of the input image | ||
| 217 | // to account for the scaling options. | ||
| 218 | output_y = output_height - y - 1; | ||
| 219 | } else { | ||
| 220 | output_y = y; | ||
| 221 | } | ||
| 222 | |||
| 223 | u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); | ||
| 224 | u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); | ||
| 225 | u32 src_offset; | ||
| 226 | u32 dst_offset; | ||
| 227 | |||
| 228 | if (config.input_linear) { | ||
| 229 | if (!config.dont_swizzle) { | ||
| 230 | // Interpret the input as linear and the output as tiled | ||
| 231 | u32 coarse_y = output_y & ~7; | ||
| 232 | u32 stride = output_width * dst_bytes_per_pixel; | ||
| 233 | |||
| 234 | src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; | ||
| 235 | dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) + | ||
| 236 | coarse_y * stride; | ||
| 237 | } else { | ||
| 238 | // Both input and output are linear | ||
| 239 | src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; | ||
| 240 | dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel; | ||
| 241 | } | ||
| 242 | } else { | ||
| 243 | if (!config.dont_swizzle) { | ||
| 244 | // Interpret the input as tiled and the output as linear | ||
| 245 | u32 coarse_y = input_y & ~7; | ||
| 246 | u32 stride = config.input_width * src_bytes_per_pixel; | ||
| 247 | |||
| 248 | src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + | ||
| 249 | coarse_y * stride; | ||
| 250 | dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel; | ||
| 251 | } else { | ||
| 252 | // Both input and output are tiled | ||
| 253 | u32 out_coarse_y = output_y & ~7; | ||
| 254 | u32 out_stride = output_width * dst_bytes_per_pixel; | ||
| 255 | |||
| 256 | u32 in_coarse_y = input_y & ~7; | ||
| 257 | u32 in_stride = config.input_width * src_bytes_per_pixel; | ||
| 258 | |||
| 259 | src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + | ||
| 260 | in_coarse_y * in_stride; | ||
| 261 | dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) + | ||
| 262 | out_coarse_y * out_stride; | ||
| 263 | } | ||
| 264 | } | ||
| 265 | |||
| 266 | const u8* src_pixel = src_pointer + src_offset; | ||
| 267 | src_color = DecodePixel(config.input_format, src_pixel); | ||
| 268 | if (config.scaling == config.ScaleX) { | ||
| 269 | Math::Vec4<u8> pixel = | ||
| 270 | DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); | ||
| 271 | src_color = ((src_color + pixel) / 2).Cast<u8>(); | ||
| 272 | } else if (config.scaling == config.ScaleXY) { | ||
| 273 | Math::Vec4<u8> pixel1 = | ||
| 274 | DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel); | ||
| 275 | Math::Vec4<u8> pixel2 = | ||
| 276 | DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel); | ||
| 277 | Math::Vec4<u8> pixel3 = | ||
| 278 | DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel); | ||
| 279 | src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); | ||
| 280 | } | ||
| 281 | |||
| 282 | u8* dst_pixel = dst_pointer + dst_offset; | ||
| 283 | switch (config.output_format) { | ||
| 284 | case Regs::PixelFormat::RGBA8: | ||
| 285 | Color::EncodeRGBA8(src_color, dst_pixel); | ||
| 286 | break; | ||
| 287 | |||
| 288 | case Regs::PixelFormat::RGB8: | ||
| 289 | Color::EncodeRGB8(src_color, dst_pixel); | ||
| 290 | break; | ||
| 291 | |||
| 292 | case Regs::PixelFormat::RGB565: | ||
| 293 | Color::EncodeRGB565(src_color, dst_pixel); | ||
| 294 | break; | ||
| 295 | |||
| 296 | case Regs::PixelFormat::RGB5A1: | ||
| 297 | Color::EncodeRGB5A1(src_color, dst_pixel); | ||
| 298 | break; | ||
| 299 | |||
| 300 | case Regs::PixelFormat::RGBA4: | ||
| 301 | Color::EncodeRGBA4(src_color, dst_pixel); | ||
| 302 | break; | ||
| 303 | |||
| 304 | default: | ||
| 305 | LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", | ||
| 306 | config.output_format.Value()); | ||
| 307 | break; | ||
| 308 | } | ||
| 309 | } | ||
| 310 | } | ||
| 311 | } | ||
| 312 | |||
| 313 | static void TextureCopy(const Regs::DisplayTransferConfig& config) { | ||
| 314 | const PAddr src_addr = config.GetPhysicalInputAddress(); | ||
| 315 | const PAddr dst_addr = config.GetPhysicalOutputAddress(); | ||
| 316 | |||
| 317 | // TODO: do hwtest with invalid addresses | ||
| 318 | if (!Memory::IsValidPhysicalAddress(src_addr)) { | ||
| 319 | LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr); | ||
| 320 | return; | ||
| 321 | } | ||
| 322 | |||
| 323 | if (!Memory::IsValidPhysicalAddress(dst_addr)) { | ||
| 324 | LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr); | ||
| 325 | return; | ||
| 326 | } | ||
| 327 | |||
| 328 | if (VideoCore::g_renderer->Rasterizer()->AccelerateTextureCopy(config)) | ||
| 329 | return; | ||
| 330 | |||
| 331 | u8* src_pointer = Memory::GetPhysicalPointer(src_addr); | ||
| 332 | u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr); | ||
| 333 | |||
| 334 | u32 remaining_size = Common::AlignDown(config.texture_copy.size, 16); | ||
| 335 | |||
| 336 | if (remaining_size == 0) { | ||
| 337 | LOG_CRITICAL(HW_GPU, "zero size. Real hardware freezes on this."); | ||
| 338 | return; | ||
| 339 | } | ||
| 340 | |||
| 341 | u32 input_gap = config.texture_copy.input_gap * 16; | ||
| 342 | u32 output_gap = config.texture_copy.output_gap * 16; | ||
| 343 | |||
| 344 | // Zero gap means contiguous input/output even if width = 0. To avoid infinite loop below, width | ||
| 345 | // is assigned with the total size if gap = 0. | ||
| 346 | u32 input_width = input_gap == 0 ? remaining_size : config.texture_copy.input_width * 16; | ||
| 347 | u32 output_width = output_gap == 0 ? remaining_size : config.texture_copy.output_width * 16; | ||
| 348 | |||
| 349 | if (input_width == 0) { | ||
| 350 | LOG_CRITICAL(HW_GPU, "zero input width. Real hardware freezes on this."); | ||
| 351 | return; | ||
| 352 | } | ||
| 353 | |||
| 354 | if (output_width == 0) { | ||
| 355 | LOG_CRITICAL(HW_GPU, "zero output width. Real hardware freezes on this."); | ||
| 356 | return; | ||
| 357 | } | ||
| 358 | |||
| 359 | size_t contiguous_input_size = | ||
| 360 | config.texture_copy.size / input_width * (input_width + input_gap); | ||
| 361 | Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), | ||
| 362 | static_cast<u32>(contiguous_input_size)); | ||
| 363 | |||
| 364 | size_t contiguous_output_size = | ||
| 365 | config.texture_copy.size / output_width * (output_width + output_gap); | ||
| 366 | Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), | ||
| 367 | static_cast<u32>(contiguous_output_size)); | ||
| 368 | |||
| 369 | u32 remaining_input = input_width; | ||
| 370 | u32 remaining_output = output_width; | ||
| 371 | while (remaining_size > 0) { | ||
| 372 | u32 copy_size = std::min({remaining_input, remaining_output, remaining_size}); | ||
| 373 | |||
| 374 | std::memcpy(dst_pointer, src_pointer, copy_size); | ||
| 375 | src_pointer += copy_size; | ||
| 376 | dst_pointer += copy_size; | ||
| 377 | |||
| 378 | remaining_input -= copy_size; | ||
| 379 | remaining_output -= copy_size; | ||
| 380 | remaining_size -= copy_size; | ||
| 381 | |||
| 382 | if (remaining_input == 0) { | ||
| 383 | remaining_input = input_width; | ||
| 384 | src_pointer += input_gap; | ||
| 385 | } | ||
| 386 | if (remaining_output == 0) { | ||
| 387 | remaining_output = output_width; | ||
| 388 | dst_pointer += output_gap; | ||
| 389 | } | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 393 | template <typename T> | ||
| 394 | inline void Write(u32 addr, const T data) { | ||
| 395 | addr -= HW::VADDR_GPU; | ||
| 396 | u32 index = addr / 4; | ||
| 397 | |||
| 398 | // Writes other than u32 are untested, so I'd rather have them abort than silently fail | ||
| 399 | if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { | ||
| 400 | LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); | ||
| 401 | return; | ||
| 402 | } | ||
| 403 | |||
| 404 | g_regs[index] = static_cast<u32>(data); | ||
| 405 | |||
| 406 | switch (index) { | ||
| 407 | |||
| 408 | // Memory fills are triggered once the fill value is written. | ||
| 409 | case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): | ||
| 410 | case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { | ||
| 411 | const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); | ||
| 412 | auto& config = g_regs.memory_fill_config[is_second_filler]; | ||
| 413 | |||
| 414 | if (config.trigger) { | ||
| 415 | MemoryFill(config); | ||
| 416 | LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), | ||
| 417 | config.GetEndAddress()); | ||
| 418 | |||
| 419 | // It seems that it won't signal interrupt if "address_start" is zero. | ||
| 420 | // TODO: hwtest this | ||
| 421 | if (config.GetStartAddress() != 0) { | ||
| 422 | if (!is_second_filler) { | ||
| 423 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC0); | ||
| 424 | } else { | ||
| 425 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC1); | ||
| 426 | } | ||
| 427 | } | ||
| 428 | |||
| 429 | // Reset "trigger" flag and set the "finish" flag | ||
| 430 | // NOTE: This was confirmed to happen on hardware even if "address_start" is zero. | ||
| 431 | config.trigger.Assign(0); | ||
| 432 | config.finished.Assign(1); | ||
| 433 | } | ||
| 434 | break; | ||
| 435 | } | ||
| 436 | |||
| 437 | case GPU_REG_INDEX(display_transfer_config.trigger): { | ||
| 438 | MICROPROFILE_SCOPE(GPU_DisplayTransfer); | ||
| 439 | |||
| 440 | const auto& config = g_regs.display_transfer_config; | ||
| 441 | if (config.trigger & 1) { | ||
| 442 | |||
| 443 | if (Pica::g_debug_context) | ||
| 444 | Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, | ||
| 445 | nullptr); | ||
| 446 | |||
| 447 | if (config.is_texture_copy) { | ||
| 448 | TextureCopy(config); | ||
| 449 | LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> " | ||
| 450 | "0x%08X(%u+%u), flags 0x%08X", | ||
| 451 | config.texture_copy.size, config.GetPhysicalInputAddress(), | ||
| 452 | config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16, | ||
| 453 | config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16, | ||
| 454 | config.texture_copy.output_gap * 16, config.flags); | ||
| 455 | } else { | ||
| 456 | DisplayTransfer(config); | ||
| 457 | LOG_TRACE(HW_GPU, "DisplayTransfer: 0x%08x(%ux%u)-> " | ||
| 458 | "0x%08x(%ux%u), dst format %x, flags 0x%08X", | ||
| 459 | config.GetPhysicalInputAddress(), config.input_width.Value(), | ||
| 460 | config.input_height.Value(), config.GetPhysicalOutputAddress(), | ||
| 461 | config.output_width.Value(), config.output_height.Value(), | ||
| 462 | config.output_format.Value(), config.flags); | ||
| 463 | } | ||
| 464 | |||
| 465 | g_regs.display_transfer_config.trigger = 0; | ||
| 466 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PPF); | ||
| 467 | } | ||
| 468 | break; | ||
| 469 | } | ||
| 470 | |||
| 471 | // Seems like writing to this register triggers processing | ||
| 472 | case GPU_REG_INDEX(command_processor_config.trigger): { | ||
| 473 | const auto& config = g_regs.command_processor_config; | ||
| 474 | if (config.trigger & 1) { | ||
| 475 | MICROPROFILE_SCOPE(GPU_CmdlistProcessing); | ||
| 476 | |||
| 477 | u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress()); | ||
| 478 | |||
| 479 | if (Pica::g_debug_context && Pica::g_debug_context->recorder) { | ||
| 480 | Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, config.size, | ||
| 481 | config.GetPhysicalAddress()); | ||
| 482 | } | ||
| 483 | |||
| 484 | Pica::CommandProcessor::ProcessCommandList(buffer, config.size); | ||
| 485 | |||
| 486 | g_regs.command_processor_config.trigger = 0; | ||
| 487 | } | ||
| 488 | break; | ||
| 489 | } | ||
| 490 | |||
| 491 | default: | ||
| 492 | break; | ||
| 493 | } | ||
| 494 | |||
| 495 | // Notify tracer about the register write | ||
| 496 | // This is happening *after* handling the write to make sure we properly catch all memory reads. | ||
| 497 | if (Pica::g_debug_context && Pica::g_debug_context->recorder) { | ||
| 498 | // addr + GPU VBase - IO VBase + IO PBase | ||
| 499 | Pica::g_debug_context->recorder->RegisterWritten<T>( | ||
| 500 | addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data); | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | // Explicitly instantiate template functions because we aren't defining this in the header: | ||
| 505 | |||
| 506 | template void Read<u64>(u64& var, const u32 addr); | ||
| 507 | template void Read<u32>(u32& var, const u32 addr); | ||
| 508 | template void Read<u16>(u16& var, const u32 addr); | ||
| 509 | template void Read<u8>(u8& var, const u32 addr); | ||
| 510 | |||
| 511 | template void Write<u64>(u32 addr, const u64 data); | ||
| 512 | template void Write<u32>(u32 addr, const u32 data); | ||
| 513 | template void Write<u16>(u32 addr, const u16 data); | ||
| 514 | template void Write<u8>(u32 addr, const u8 data); | ||
| 515 | |||
| 516 | /// Update hardware | ||
| 517 | static void VBlankCallback(u64 userdata, int cycles_late) { | ||
| 518 | //VideoCore::g_renderer->SwapBuffers(); | ||
| 519 | |||
| 520 | //// Signal to GSP that GPU interrupt has occurred | ||
| 521 | //// TODO(yuriks): hwtest to determine if PDC0 is for the Top screen and PDC1 for the Sub | ||
| 522 | //// screen, or if both use the same interrupts and these two instead determine the | ||
| 523 | //// beginning and end of the VBlank period. If needed, split the interrupt firing into | ||
| 524 | //// two different intervals. | ||
| 525 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC0); | ||
| 526 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC1); | ||
| 527 | |||
| 528 | // Reschedule recurrent event | ||
| 529 | CoreTiming::ScheduleEvent(frame_ticks - cycles_late, vblank_event); | ||
| 530 | } | ||
| 531 | |||
| 532 | /// Initialize hardware | ||
| 533 | void Init() { | ||
| 534 | memset(&g_regs, 0, sizeof(g_regs)); | ||
| 535 | |||
| 536 | auto& framebuffer_top = g_regs.framebuffer_config[0]; | ||
| 537 | auto& framebuffer_sub = g_regs.framebuffer_config[1]; | ||
| 538 | |||
| 539 | // Setup default framebuffer addresses (located in VRAM) | ||
| 540 | // .. or at least these are the ones used by system applets. | ||
| 541 | // There's probably a smarter way to come up with addresses | ||
| 542 | // like this which does not require hardcoding. | ||
| 543 | framebuffer_top.address_left1 = 0x181E6000; | ||
| 544 | framebuffer_top.address_left2 = 0x1822C800; | ||
| 545 | framebuffer_top.address_right1 = 0x18273000; | ||
| 546 | framebuffer_top.address_right2 = 0x182B9800; | ||
| 547 | framebuffer_sub.address_left1 = 0x1848F000; | ||
| 548 | framebuffer_sub.address_left2 = 0x184C7800; | ||
| 549 | |||
| 550 | framebuffer_top.width.Assign(240); | ||
| 551 | framebuffer_top.height.Assign(400); | ||
| 552 | framebuffer_top.stride = 3 * 240; | ||
| 553 | framebuffer_top.color_format.Assign(Regs::PixelFormat::RGB8); | ||
| 554 | framebuffer_top.active_fb = 0; | ||
| 555 | |||
| 556 | framebuffer_sub.width.Assign(240); | ||
| 557 | framebuffer_sub.height.Assign(320); | ||
| 558 | framebuffer_sub.stride = 3 * 240; | ||
| 559 | framebuffer_sub.color_format.Assign(Regs::PixelFormat::RGB8); | ||
| 560 | framebuffer_sub.active_fb = 0; | ||
| 561 | |||
| 562 | vblank_event = CoreTiming::RegisterEvent("GPU::VBlankCallback", VBlankCallback); | ||
| 563 | CoreTiming::ScheduleEvent(frame_ticks, vblank_event); | ||
| 564 | |||
| 565 | LOG_DEBUG(HW_GPU, "initialized OK"); | ||
| 566 | } | ||
| 567 | |||
| 568 | /// Shutdown hardware | ||
| 569 | void Shutdown() { | ||
| 570 | LOG_DEBUG(HW_GPU, "shutdown OK"); | ||
| 571 | } | ||
| 572 | |||
| 573 | } // namespace | ||
diff --git a/src/core/hw/gpu.h b/src/core/hw/gpu.h deleted file mode 100644 index e3d0a0e08..000000000 --- a/src/core/hw/gpu.h +++ /dev/null | |||
| @@ -1,334 +0,0 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <cstddef> | ||
| 8 | #include <type_traits> | ||
| 9 | #include "common/assert.h" | ||
| 10 | #include "common/bit_field.h" | ||
| 11 | #include "common/common_funcs.h" | ||
| 12 | #include "common/common_types.h" | ||
| 13 | |||
| 14 | namespace GPU { | ||
| 15 | |||
| 16 | constexpr float SCREEN_REFRESH_RATE = 60; | ||
| 17 | |||
| 18 | // Returns index corresponding to the Regs member labeled by field_name | ||
| 19 | // TODO: Due to Visual studio bug 209229, offsetof does not return constant expressions | ||
| 20 | // when used with array elements (e.g. GPU_REG_INDEX(memory_fill_config[0])). | ||
| 21 | // For details cf. | ||
| 22 | // https://connect.microsoft.com/VisualStudio/feedback/details/209229/offsetof-does-not-produce-a-constant-expression-for-array-members | ||
| 23 | // Hopefully, this will be fixed sometime in the future. | ||
| 24 | // For lack of better alternatives, we currently hardcode the offsets when constant | ||
| 25 | // expressions are needed via GPU_REG_INDEX_WORKAROUND (on sane compilers, static_asserts | ||
| 26 | // will then make sure the offsets indeed match the automatically calculated ones). | ||
| 27 | #define GPU_REG_INDEX(field_name) (offsetof(GPU::Regs, field_name) / sizeof(u32)) | ||
| 28 | #if defined(_MSC_VER) | ||
| 29 | #define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) (backup_workaround_index) | ||
| 30 | #else | ||
| 31 | // NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler | ||
| 32 | // really is this annoying. This macro just forwards its first argument to GPU_REG_INDEX | ||
| 33 | // and then performs a (no-op) cast to size_t iff the second argument matches the expected | ||
| 34 | // field offset. Otherwise, the compiler will fail to compile this code. | ||
| 35 | #define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) \ | ||
| 36 | ((typename std::enable_if<backup_workaround_index == GPU_REG_INDEX(field_name), size_t>::type) \ | ||
| 37 | GPU_REG_INDEX(field_name)) | ||
| 38 | #endif | ||
| 39 | |||
| 40 | // MMIO region 0x1EFxxxxx | ||
| 41 | struct Regs { | ||
| 42 | |||
| 43 | // helper macro to make sure the defined structures are of the expected size. | ||
| 44 | #if defined(_MSC_VER) | ||
| 45 | // TODO: MSVC does not support using sizeof() on non-static data members even though this | ||
| 46 | // is technically allowed since C++11. This macro should be enabled once MSVC adds | ||
| 47 | // support for that. | ||
| 48 | #define ASSERT_MEMBER_SIZE(name, size_in_bytes) | ||
| 49 | #else | ||
| 50 | #define ASSERT_MEMBER_SIZE(name, size_in_bytes) \ | ||
| 51 | static_assert(sizeof(name) == size_in_bytes, \ | ||
| 52 | "Structure size and register block length don't match") | ||
| 53 | #endif | ||
| 54 | |||
| 55 | // Components are laid out in reverse byte order, most significant bits first. | ||
| 56 | enum class PixelFormat : u32 { | ||
| 57 | RGBA8 = 0, | ||
| 58 | RGB8 = 1, | ||
| 59 | RGB565 = 2, | ||
| 60 | RGB5A1 = 3, | ||
| 61 | RGBA4 = 4, | ||
| 62 | }; | ||
| 63 | |||
| 64 | /** | ||
| 65 | * Returns the number of bytes per pixel. | ||
| 66 | */ | ||
| 67 | static int BytesPerPixel(PixelFormat format) { | ||
| 68 | switch (format) { | ||
| 69 | case PixelFormat::RGBA8: | ||
| 70 | return 4; | ||
| 71 | case PixelFormat::RGB8: | ||
| 72 | return 3; | ||
| 73 | case PixelFormat::RGB565: | ||
| 74 | case PixelFormat::RGB5A1: | ||
| 75 | case PixelFormat::RGBA4: | ||
| 76 | return 2; | ||
| 77 | } | ||
| 78 | |||
| 79 | UNREACHABLE(); | ||
| 80 | } | ||
| 81 | |||
| 82 | INSERT_PADDING_WORDS(0x4); | ||
| 83 | |||
| 84 | struct MemoryFillConfig { | ||
| 85 | u32 address_start; | ||
| 86 | u32 address_end; | ||
| 87 | |||
| 88 | union { | ||
| 89 | u32 value_32bit; | ||
| 90 | |||
| 91 | BitField<0, 16, u32> value_16bit; | ||
| 92 | |||
| 93 | // TODO: Verify component order | ||
| 94 | BitField<0, 8, u32> value_24bit_r; | ||
| 95 | BitField<8, 8, u32> value_24bit_g; | ||
| 96 | BitField<16, 8, u32> value_24bit_b; | ||
| 97 | }; | ||
| 98 | |||
| 99 | union { | ||
| 100 | u32 control; | ||
| 101 | |||
| 102 | // Setting this field to 1 triggers the memory fill. | ||
| 103 | // This field also acts as a status flag, and gets reset to 0 upon completion. | ||
| 104 | BitField<0, 1, u32> trigger; | ||
| 105 | |||
| 106 | // Set to 1 upon completion. | ||
| 107 | BitField<1, 1, u32> finished; | ||
| 108 | |||
| 109 | // If both of these bits are unset, then it will fill the memory with a 16 bit value | ||
| 110 | // 1: fill with 24-bit wide values | ||
| 111 | BitField<8, 1, u32> fill_24bit; | ||
| 112 | // 1: fill with 32-bit wide values | ||
| 113 | BitField<9, 1, u32> fill_32bit; | ||
| 114 | }; | ||
| 115 | |||
| 116 | inline u32 GetStartAddress() const { | ||
| 117 | return DecodeAddressRegister(address_start); | ||
| 118 | } | ||
| 119 | |||
| 120 | inline u32 GetEndAddress() const { | ||
| 121 | return DecodeAddressRegister(address_end); | ||
| 122 | } | ||
| 123 | } memory_fill_config[2]; | ||
| 124 | ASSERT_MEMBER_SIZE(memory_fill_config[0], 0x10); | ||
| 125 | |||
| 126 | INSERT_PADDING_WORDS(0x10b); | ||
| 127 | |||
| 128 | struct FramebufferConfig { | ||
| 129 | union { | ||
| 130 | u32 size; | ||
| 131 | |||
| 132 | BitField<0, 16, u32> width; | ||
| 133 | BitField<16, 16, u32> height; | ||
| 134 | }; | ||
| 135 | |||
| 136 | INSERT_PADDING_WORDS(0x2); | ||
| 137 | |||
| 138 | u32 address_left1; | ||
| 139 | u32 address_left2; | ||
| 140 | |||
| 141 | union { | ||
| 142 | u32 format; | ||
| 143 | |||
| 144 | BitField<0, 3, PixelFormat> color_format; | ||
| 145 | }; | ||
| 146 | |||
| 147 | INSERT_PADDING_WORDS(0x1); | ||
| 148 | |||
| 149 | union { | ||
| 150 | u32 active_fb; | ||
| 151 | |||
| 152 | // 0: Use parameters ending with "1" | ||
| 153 | // 1: Use parameters ending with "2" | ||
| 154 | BitField<0, 1, u32> second_fb_active; | ||
| 155 | }; | ||
| 156 | |||
| 157 | INSERT_PADDING_WORDS(0x5); | ||
| 158 | |||
| 159 | // Distance between two pixel rows, in bytes | ||
| 160 | u32 stride; | ||
| 161 | |||
| 162 | u32 address_right1; | ||
| 163 | u32 address_right2; | ||
| 164 | |||
| 165 | INSERT_PADDING_WORDS(0x30); | ||
| 166 | } framebuffer_config[2]; | ||
| 167 | ASSERT_MEMBER_SIZE(framebuffer_config[0], 0x100); | ||
| 168 | |||
| 169 | INSERT_PADDING_WORDS(0x169); | ||
| 170 | |||
| 171 | struct DisplayTransferConfig { | ||
| 172 | u32 input_address; | ||
| 173 | u32 output_address; | ||
| 174 | |||
| 175 | inline u32 GetPhysicalInputAddress() const { | ||
| 176 | return DecodeAddressRegister(input_address); | ||
| 177 | } | ||
| 178 | |||
| 179 | inline u32 GetPhysicalOutputAddress() const { | ||
| 180 | return DecodeAddressRegister(output_address); | ||
| 181 | } | ||
| 182 | |||
| 183 | union { | ||
| 184 | u32 output_size; | ||
| 185 | |||
| 186 | BitField<0, 16, u32> output_width; | ||
| 187 | BitField<16, 16, u32> output_height; | ||
| 188 | }; | ||
| 189 | |||
| 190 | union { | ||
| 191 | u32 input_size; | ||
| 192 | |||
| 193 | BitField<0, 16, u32> input_width; | ||
| 194 | BitField<16, 16, u32> input_height; | ||
| 195 | }; | ||
| 196 | |||
| 197 | enum ScalingMode : u32 { | ||
| 198 | NoScale = 0, // Doesn't scale the image | ||
| 199 | ScaleX = 1, // Downscales the image in half in the X axis and applies a box filter | ||
| 200 | ScaleXY = | ||
| 201 | 2, // Downscales the image in half in both the X and Y axes and applies a box filter | ||
| 202 | }; | ||
| 203 | |||
| 204 | union { | ||
| 205 | u32 flags; | ||
| 206 | |||
| 207 | BitField<0, 1, u32> flip_vertically; // flips input data vertically | ||
| 208 | BitField<1, 1, u32> input_linear; // Converts from linear to tiled format | ||
| 209 | BitField<2, 1, u32> crop_input_lines; | ||
| 210 | BitField<3, 1, u32> is_texture_copy; // Copies the data without performing any | ||
| 211 | // processing and respecting texture copy fields | ||
| 212 | BitField<5, 1, u32> dont_swizzle; | ||
| 213 | BitField<8, 3, PixelFormat> input_format; | ||
| 214 | BitField<12, 3, PixelFormat> output_format; | ||
| 215 | /// Uses some kind of 32x32 block swizzling mode, instead of the usual 8x8 one. | ||
| 216 | BitField<16, 1, u32> block_32; // TODO(yuriks): unimplemented | ||
| 217 | BitField<24, 2, ScalingMode> scaling; // Determines the scaling mode of the transfer | ||
| 218 | }; | ||
| 219 | |||
| 220 | INSERT_PADDING_WORDS(0x1); | ||
| 221 | |||
| 222 | // it seems that writing to this field triggers the display transfer | ||
| 223 | u32 trigger; | ||
| 224 | |||
| 225 | INSERT_PADDING_WORDS(0x1); | ||
| 226 | |||
| 227 | struct { | ||
| 228 | u32 size; // The lower 4 bits are ignored | ||
| 229 | |||
| 230 | union { | ||
| 231 | u32 input_size; | ||
| 232 | |||
| 233 | BitField<0, 16, u32> input_width; | ||
| 234 | BitField<16, 16, u32> input_gap; | ||
| 235 | }; | ||
| 236 | |||
| 237 | union { | ||
| 238 | u32 output_size; | ||
| 239 | |||
| 240 | BitField<0, 16, u32> output_width; | ||
| 241 | BitField<16, 16, u32> output_gap; | ||
| 242 | }; | ||
| 243 | } texture_copy; | ||
| 244 | } display_transfer_config; | ||
| 245 | ASSERT_MEMBER_SIZE(display_transfer_config, 0x2c); | ||
| 246 | |||
| 247 | INSERT_PADDING_WORDS(0x32D); | ||
| 248 | |||
| 249 | struct { | ||
| 250 | // command list size (in bytes) | ||
| 251 | u32 size; | ||
| 252 | |||
| 253 | INSERT_PADDING_WORDS(0x1); | ||
| 254 | |||
| 255 | // command list address | ||
| 256 | u32 address; | ||
| 257 | |||
| 258 | INSERT_PADDING_WORDS(0x1); | ||
| 259 | |||
| 260 | // it seems that writing to this field triggers command list processing | ||
| 261 | u32 trigger; | ||
| 262 | |||
| 263 | inline u32 GetPhysicalAddress() const { | ||
| 264 | return DecodeAddressRegister(address); | ||
| 265 | } | ||
| 266 | } command_processor_config; | ||
| 267 | ASSERT_MEMBER_SIZE(command_processor_config, 0x14); | ||
| 268 | |||
| 269 | INSERT_PADDING_WORDS(0x9c3); | ||
| 270 | |||
| 271 | static constexpr size_t NumIds() { | ||
| 272 | return sizeof(Regs) / sizeof(u32); | ||
| 273 | } | ||
| 274 | |||
| 275 | const u32& operator[](int index) const { | ||
| 276 | const u32* content = reinterpret_cast<const u32*>(this); | ||
| 277 | return content[index]; | ||
| 278 | } | ||
| 279 | |||
| 280 | u32& operator[](int index) { | ||
| 281 | u32* content = reinterpret_cast<u32*>(this); | ||
| 282 | return content[index]; | ||
| 283 | } | ||
| 284 | |||
| 285 | #undef ASSERT_MEMBER_SIZE | ||
| 286 | |||
| 287 | private: | ||
| 288 | /* | ||
| 289 | * Most physical addresses which GPU registers refer to are 8-byte aligned. | ||
| 290 | * This function should be used to get the address from a raw register value. | ||
| 291 | */ | ||
| 292 | static inline u32 DecodeAddressRegister(u32 register_value) { | ||
| 293 | return register_value * 8; | ||
| 294 | } | ||
| 295 | }; | ||
| 296 | static_assert(std::is_standard_layout<Regs>::value, "Structure does not use standard layout"); | ||
| 297 | |||
| 298 | // TODO: MSVC does not support using offsetof() on non-static data members even though this | ||
| 299 | // is technically allowed since C++11. This macro should be enabled once MSVC adds | ||
| 300 | // support for that. | ||
| 301 | #ifndef _MSC_VER | ||
| 302 | #define ASSERT_REG_POSITION(field_name, position) \ | ||
| 303 | static_assert(offsetof(Regs, field_name) == position * 4, \ | ||
| 304 | "Field " #field_name " has invalid position") | ||
| 305 | |||
| 306 | ASSERT_REG_POSITION(memory_fill_config[0], 0x00004); | ||
| 307 | ASSERT_REG_POSITION(memory_fill_config[1], 0x00008); | ||
| 308 | ASSERT_REG_POSITION(framebuffer_config[0], 0x00117); | ||
| 309 | ASSERT_REG_POSITION(framebuffer_config[1], 0x00157); | ||
| 310 | ASSERT_REG_POSITION(display_transfer_config, 0x00300); | ||
| 311 | ASSERT_REG_POSITION(command_processor_config, 0x00638); | ||
| 312 | |||
| 313 | #undef ASSERT_REG_POSITION | ||
| 314 | #endif // !defined(_MSC_VER) | ||
| 315 | |||
| 316 | // The total number of registers is chosen arbitrarily, but let's make sure it's not some odd value | ||
| 317 | // anyway. | ||
| 318 | static_assert(sizeof(Regs) == 0x1000 * sizeof(u32), "Invalid total size of register set"); | ||
| 319 | |||
| 320 | extern Regs g_regs; | ||
| 321 | |||
| 322 | template <typename T> | ||
| 323 | void Read(T& var, const u32 addr); | ||
| 324 | |||
| 325 | template <typename T> | ||
| 326 | void Write(u32 addr, const T data); | ||
| 327 | |||
| 328 | /// Initialize hardware | ||
| 329 | void Init(); | ||
| 330 | |||
| 331 | /// Shutdown hardware | ||
| 332 | void Shutdown(); | ||
| 333 | |||
| 334 | } // namespace | ||