diff options
Diffstat (limited to 'src/core/hw/gpu.cpp')
| -rw-r--r-- | src/core/hw/gpu.cpp | 573 |
1 files changed, 0 insertions, 573 deletions
diff --git a/src/core/hw/gpu.cpp b/src/core/hw/gpu.cpp deleted file mode 100644 index 47ab14ae9..000000000 --- a/src/core/hw/gpu.cpp +++ /dev/null | |||
| @@ -1,573 +0,0 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <cstring> | ||
| 6 | #include <numeric> | ||
| 7 | #include <type_traits> | ||
| 8 | #include "common/alignment.h" | ||
| 9 | #include "common/color.h" | ||
| 10 | #include "common/common_types.h" | ||
| 11 | #include "common/logging/log.h" | ||
| 12 | #include "common/microprofile.h" | ||
| 13 | #include "common/vector_math.h" | ||
| 14 | #include "core/core_timing.h" | ||
| 15 | #include "core/hle/service/gsp_gpu.h" | ||
| 16 | #include "core/hw/gpu.h" | ||
| 17 | #include "core/hw/hw.h" | ||
| 18 | #include "core/memory.h" | ||
| 19 | #include "core/tracer/recorder.h" | ||
| 20 | #include "video_core/command_processor.h" | ||
| 21 | #include "video_core/debug_utils/debug_utils.h" | ||
| 22 | #include "video_core/rasterizer_interface.h" | ||
| 23 | #include "video_core/renderer_base.h" | ||
| 24 | #include "video_core/utils.h" | ||
| 25 | #include "video_core/video_core.h" | ||
| 26 | |||
| 27 | namespace GPU { | ||
| 28 | |||
| 29 | Regs g_regs; | ||
| 30 | |||
| 31 | /// 268MHz CPU clocks / 60Hz frames per second | ||
| 32 | const u64 frame_ticks = static_cast<u64>(BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); | ||
| 33 | /// Event id for CoreTiming | ||
| 34 | static CoreTiming::EventType* vblank_event; | ||
| 35 | |||
| 36 | template <typename T> | ||
| 37 | inline void Read(T& var, const u32 raw_addr) { | ||
| 38 | u32 addr = raw_addr - HW::VADDR_GPU; | ||
| 39 | u32 index = addr / 4; | ||
| 40 | |||
| 41 | // Reads other than u32 are untested, so I'd rather have them abort than silently fail | ||
| 42 | if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { | ||
| 43 | LOG_ERROR(HW_GPU, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, addr); | ||
| 44 | return; | ||
| 45 | } | ||
| 46 | |||
| 47 | var = g_regs[addr / 4]; | ||
| 48 | } | ||
| 49 | |||
| 50 | static Math::Vec4<u8> DecodePixel(Regs::PixelFormat input_format, const u8* src_pixel) { | ||
| 51 | switch (input_format) { | ||
| 52 | case Regs::PixelFormat::RGBA8: | ||
| 53 | return Color::DecodeRGBA8(src_pixel); | ||
| 54 | |||
| 55 | case Regs::PixelFormat::RGB8: | ||
| 56 | return Color::DecodeRGB8(src_pixel); | ||
| 57 | |||
| 58 | case Regs::PixelFormat::RGB565: | ||
| 59 | return Color::DecodeRGB565(src_pixel); | ||
| 60 | |||
| 61 | case Regs::PixelFormat::RGB5A1: | ||
| 62 | return Color::DecodeRGB5A1(src_pixel); | ||
| 63 | |||
| 64 | case Regs::PixelFormat::RGBA4: | ||
| 65 | return Color::DecodeRGBA4(src_pixel); | ||
| 66 | |||
| 67 | default: | ||
| 68 | LOG_ERROR(HW_GPU, "Unknown source framebuffer format %x", input_format); | ||
| 69 | return {0, 0, 0, 0}; | ||
| 70 | } | ||
| 71 | } | ||
| 72 | |||
| 73 | MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255)); | ||
| 74 | MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100)); | ||
| 75 | |||
| 76 | static void MemoryFill(const Regs::MemoryFillConfig& config) { | ||
| 77 | const PAddr start_addr = config.GetStartAddress(); | ||
| 78 | const PAddr end_addr = config.GetEndAddress(); | ||
| 79 | |||
| 80 | // TODO: do hwtest with these cases | ||
| 81 | if (!Memory::IsValidPhysicalAddress(start_addr)) { | ||
| 82 | LOG_CRITICAL(HW_GPU, "invalid start address 0x%08X", start_addr); | ||
| 83 | return; | ||
| 84 | } | ||
| 85 | |||
| 86 | if (!Memory::IsValidPhysicalAddress(end_addr)) { | ||
| 87 | LOG_CRITICAL(HW_GPU, "invalid end address 0x%08X", end_addr); | ||
| 88 | return; | ||
| 89 | } | ||
| 90 | |||
| 91 | if (end_addr <= start_addr) { | ||
| 92 | LOG_CRITICAL(HW_GPU, "invalid memory range from 0x%08X to 0x%08X", start_addr, end_addr); | ||
| 93 | return; | ||
| 94 | } | ||
| 95 | |||
| 96 | u8* start = Memory::GetPhysicalPointer(start_addr); | ||
| 97 | u8* end = Memory::GetPhysicalPointer(end_addr); | ||
| 98 | |||
| 99 | // TODO: Consider always accelerating and returning vector of | ||
| 100 | // regions that the accelerated fill did not cover to | ||
| 101 | // reduce/eliminate the fill that the cpu has to do. | ||
| 102 | // This would also mean that the flush below is not needed. | ||
| 103 | // Fill should first flush all surfaces that touch but are | ||
| 104 | // not completely within the fill range. | ||
| 105 | // Then fill all completely covered surfaces, and return the | ||
| 106 | // regions that were between surfaces or within the touching | ||
| 107 | // ones for cpu to manually fill here. | ||
| 108 | if (VideoCore::g_renderer->Rasterizer()->AccelerateFill(config)) | ||
| 109 | return; | ||
| 110 | |||
| 111 | Memory::RasterizerFlushAndInvalidateRegion(config.GetStartAddress(), | ||
| 112 | config.GetEndAddress() - config.GetStartAddress()); | ||
| 113 | |||
| 114 | if (config.fill_24bit) { | ||
| 115 | // fill with 24-bit values | ||
| 116 | for (u8* ptr = start; ptr < end; ptr += 3) { | ||
| 117 | ptr[0] = config.value_24bit_r; | ||
| 118 | ptr[1] = config.value_24bit_g; | ||
| 119 | ptr[2] = config.value_24bit_b; | ||
| 120 | } | ||
| 121 | } else if (config.fill_32bit) { | ||
| 122 | // fill with 32-bit values | ||
| 123 | if (end > start) { | ||
| 124 | u32 value = config.value_32bit; | ||
| 125 | size_t len = (end - start) / sizeof(u32); | ||
| 126 | for (size_t i = 0; i < len; ++i) | ||
| 127 | memcpy(&start[i * sizeof(u32)], &value, sizeof(u32)); | ||
| 128 | } | ||
| 129 | } else { | ||
| 130 | // fill with 16-bit values | ||
| 131 | u16 value_16bit = config.value_16bit.Value(); | ||
| 132 | for (u8* ptr = start; ptr < end; ptr += sizeof(u16)) | ||
| 133 | memcpy(ptr, &value_16bit, sizeof(u16)); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | static void DisplayTransfer(const Regs::DisplayTransferConfig& config) { | ||
| 138 | const PAddr src_addr = config.GetPhysicalInputAddress(); | ||
| 139 | const PAddr dst_addr = config.GetPhysicalOutputAddress(); | ||
| 140 | |||
| 141 | // TODO: do hwtest with these cases | ||
| 142 | if (!Memory::IsValidPhysicalAddress(src_addr)) { | ||
| 143 | LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr); | ||
| 144 | return; | ||
| 145 | } | ||
| 146 | |||
| 147 | if (!Memory::IsValidPhysicalAddress(dst_addr)) { | ||
| 148 | LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr); | ||
| 149 | return; | ||
| 150 | } | ||
| 151 | |||
| 152 | if (config.input_width == 0) { | ||
| 153 | LOG_CRITICAL(HW_GPU, "zero input width"); | ||
| 154 | return; | ||
| 155 | } | ||
| 156 | |||
| 157 | if (config.input_height == 0) { | ||
| 158 | LOG_CRITICAL(HW_GPU, "zero input height"); | ||
| 159 | return; | ||
| 160 | } | ||
| 161 | |||
| 162 | if (config.output_width == 0) { | ||
| 163 | LOG_CRITICAL(HW_GPU, "zero output width"); | ||
| 164 | return; | ||
| 165 | } | ||
| 166 | |||
| 167 | if (config.output_height == 0) { | ||
| 168 | LOG_CRITICAL(HW_GPU, "zero output height"); | ||
| 169 | return; | ||
| 170 | } | ||
| 171 | |||
| 172 | if (VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config)) | ||
| 173 | return; | ||
| 174 | |||
| 175 | u8* src_pointer = Memory::GetPhysicalPointer(src_addr); | ||
| 176 | u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr); | ||
| 177 | |||
| 178 | if (config.scaling > config.ScaleXY) { | ||
| 179 | LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", | ||
| 180 | config.scaling.Value()); | ||
| 181 | UNIMPLEMENTED(); | ||
| 182 | return; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (config.input_linear && config.scaling != config.NoScale) { | ||
| 186 | LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); | ||
| 187 | UNIMPLEMENTED(); | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | |||
| 191 | int horizontal_scale = config.scaling != config.NoScale ? 1 : 0; | ||
| 192 | int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0; | ||
| 193 | |||
| 194 | u32 output_width = config.output_width >> horizontal_scale; | ||
| 195 | u32 output_height = config.output_height >> vertical_scale; | ||
| 196 | |||
| 197 | u32 input_size = | ||
| 198 | config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format); | ||
| 199 | u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); | ||
| 200 | |||
| 201 | Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size); | ||
| 202 | Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), output_size); | ||
| 203 | |||
| 204 | for (u32 y = 0; y < output_height; ++y) { | ||
| 205 | for (u32 x = 0; x < output_width; ++x) { | ||
| 206 | Math::Vec4<u8> src_color; | ||
| 207 | |||
| 208 | // Calculate the [x,y] position of the input image | ||
| 209 | // based on the current output position and the scale | ||
| 210 | u32 input_x = x << horizontal_scale; | ||
| 211 | u32 input_y = y << vertical_scale; | ||
| 212 | |||
| 213 | u32 output_y; | ||
| 214 | if (config.flip_vertically) { | ||
| 215 | // Flip the y value of the output data, | ||
| 216 | // we do this after calculating the [x,y] position of the input image | ||
| 217 | // to account for the scaling options. | ||
| 218 | output_y = output_height - y - 1; | ||
| 219 | } else { | ||
| 220 | output_y = y; | ||
| 221 | } | ||
| 222 | |||
| 223 | u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); | ||
| 224 | u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); | ||
| 225 | u32 src_offset; | ||
| 226 | u32 dst_offset; | ||
| 227 | |||
| 228 | if (config.input_linear) { | ||
| 229 | if (!config.dont_swizzle) { | ||
| 230 | // Interpret the input as linear and the output as tiled | ||
| 231 | u32 coarse_y = output_y & ~7; | ||
| 232 | u32 stride = output_width * dst_bytes_per_pixel; | ||
| 233 | |||
| 234 | src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; | ||
| 235 | dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) + | ||
| 236 | coarse_y * stride; | ||
| 237 | } else { | ||
| 238 | // Both input and output are linear | ||
| 239 | src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; | ||
| 240 | dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel; | ||
| 241 | } | ||
| 242 | } else { | ||
| 243 | if (!config.dont_swizzle) { | ||
| 244 | // Interpret the input as tiled and the output as linear | ||
| 245 | u32 coarse_y = input_y & ~7; | ||
| 246 | u32 stride = config.input_width * src_bytes_per_pixel; | ||
| 247 | |||
| 248 | src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + | ||
| 249 | coarse_y * stride; | ||
| 250 | dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel; | ||
| 251 | } else { | ||
| 252 | // Both input and output are tiled | ||
| 253 | u32 out_coarse_y = output_y & ~7; | ||
| 254 | u32 out_stride = output_width * dst_bytes_per_pixel; | ||
| 255 | |||
| 256 | u32 in_coarse_y = input_y & ~7; | ||
| 257 | u32 in_stride = config.input_width * src_bytes_per_pixel; | ||
| 258 | |||
| 259 | src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + | ||
| 260 | in_coarse_y * in_stride; | ||
| 261 | dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) + | ||
| 262 | out_coarse_y * out_stride; | ||
| 263 | } | ||
| 264 | } | ||
| 265 | |||
| 266 | const u8* src_pixel = src_pointer + src_offset; | ||
| 267 | src_color = DecodePixel(config.input_format, src_pixel); | ||
| 268 | if (config.scaling == config.ScaleX) { | ||
| 269 | Math::Vec4<u8> pixel = | ||
| 270 | DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); | ||
| 271 | src_color = ((src_color + pixel) / 2).Cast<u8>(); | ||
| 272 | } else if (config.scaling == config.ScaleXY) { | ||
| 273 | Math::Vec4<u8> pixel1 = | ||
| 274 | DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel); | ||
| 275 | Math::Vec4<u8> pixel2 = | ||
| 276 | DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel); | ||
| 277 | Math::Vec4<u8> pixel3 = | ||
| 278 | DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel); | ||
| 279 | src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); | ||
| 280 | } | ||
| 281 | |||
| 282 | u8* dst_pixel = dst_pointer + dst_offset; | ||
| 283 | switch (config.output_format) { | ||
| 284 | case Regs::PixelFormat::RGBA8: | ||
| 285 | Color::EncodeRGBA8(src_color, dst_pixel); | ||
| 286 | break; | ||
| 287 | |||
| 288 | case Regs::PixelFormat::RGB8: | ||
| 289 | Color::EncodeRGB8(src_color, dst_pixel); | ||
| 290 | break; | ||
| 291 | |||
| 292 | case Regs::PixelFormat::RGB565: | ||
| 293 | Color::EncodeRGB565(src_color, dst_pixel); | ||
| 294 | break; | ||
| 295 | |||
| 296 | case Regs::PixelFormat::RGB5A1: | ||
| 297 | Color::EncodeRGB5A1(src_color, dst_pixel); | ||
| 298 | break; | ||
| 299 | |||
| 300 | case Regs::PixelFormat::RGBA4: | ||
| 301 | Color::EncodeRGBA4(src_color, dst_pixel); | ||
| 302 | break; | ||
| 303 | |||
| 304 | default: | ||
| 305 | LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", | ||
| 306 | config.output_format.Value()); | ||
| 307 | break; | ||
| 308 | } | ||
| 309 | } | ||
| 310 | } | ||
| 311 | } | ||
| 312 | |||
| 313 | static void TextureCopy(const Regs::DisplayTransferConfig& config) { | ||
| 314 | const PAddr src_addr = config.GetPhysicalInputAddress(); | ||
| 315 | const PAddr dst_addr = config.GetPhysicalOutputAddress(); | ||
| 316 | |||
| 317 | // TODO: do hwtest with invalid addresses | ||
| 318 | if (!Memory::IsValidPhysicalAddress(src_addr)) { | ||
| 319 | LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr); | ||
| 320 | return; | ||
| 321 | } | ||
| 322 | |||
| 323 | if (!Memory::IsValidPhysicalAddress(dst_addr)) { | ||
| 324 | LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr); | ||
| 325 | return; | ||
| 326 | } | ||
| 327 | |||
| 328 | if (VideoCore::g_renderer->Rasterizer()->AccelerateTextureCopy(config)) | ||
| 329 | return; | ||
| 330 | |||
| 331 | u8* src_pointer = Memory::GetPhysicalPointer(src_addr); | ||
| 332 | u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr); | ||
| 333 | |||
| 334 | u32 remaining_size = Common::AlignDown(config.texture_copy.size, 16); | ||
| 335 | |||
| 336 | if (remaining_size == 0) { | ||
| 337 | LOG_CRITICAL(HW_GPU, "zero size. Real hardware freezes on this."); | ||
| 338 | return; | ||
| 339 | } | ||
| 340 | |||
| 341 | u32 input_gap = config.texture_copy.input_gap * 16; | ||
| 342 | u32 output_gap = config.texture_copy.output_gap * 16; | ||
| 343 | |||
| 344 | // Zero gap means contiguous input/output even if width = 0. To avoid infinite loop below, width | ||
| 345 | // is assigned with the total size if gap = 0. | ||
| 346 | u32 input_width = input_gap == 0 ? remaining_size : config.texture_copy.input_width * 16; | ||
| 347 | u32 output_width = output_gap == 0 ? remaining_size : config.texture_copy.output_width * 16; | ||
| 348 | |||
| 349 | if (input_width == 0) { | ||
| 350 | LOG_CRITICAL(HW_GPU, "zero input width. Real hardware freezes on this."); | ||
| 351 | return; | ||
| 352 | } | ||
| 353 | |||
| 354 | if (output_width == 0) { | ||
| 355 | LOG_CRITICAL(HW_GPU, "zero output width. Real hardware freezes on this."); | ||
| 356 | return; | ||
| 357 | } | ||
| 358 | |||
| 359 | size_t contiguous_input_size = | ||
| 360 | config.texture_copy.size / input_width * (input_width + input_gap); | ||
| 361 | Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), | ||
| 362 | static_cast<u32>(contiguous_input_size)); | ||
| 363 | |||
| 364 | size_t contiguous_output_size = | ||
| 365 | config.texture_copy.size / output_width * (output_width + output_gap); | ||
| 366 | Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), | ||
| 367 | static_cast<u32>(contiguous_output_size)); | ||
| 368 | |||
| 369 | u32 remaining_input = input_width; | ||
| 370 | u32 remaining_output = output_width; | ||
| 371 | while (remaining_size > 0) { | ||
| 372 | u32 copy_size = std::min({remaining_input, remaining_output, remaining_size}); | ||
| 373 | |||
| 374 | std::memcpy(dst_pointer, src_pointer, copy_size); | ||
| 375 | src_pointer += copy_size; | ||
| 376 | dst_pointer += copy_size; | ||
| 377 | |||
| 378 | remaining_input -= copy_size; | ||
| 379 | remaining_output -= copy_size; | ||
| 380 | remaining_size -= copy_size; | ||
| 381 | |||
| 382 | if (remaining_input == 0) { | ||
| 383 | remaining_input = input_width; | ||
| 384 | src_pointer += input_gap; | ||
| 385 | } | ||
| 386 | if (remaining_output == 0) { | ||
| 387 | remaining_output = output_width; | ||
| 388 | dst_pointer += output_gap; | ||
| 389 | } | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 393 | template <typename T> | ||
| 394 | inline void Write(u32 addr, const T data) { | ||
| 395 | addr -= HW::VADDR_GPU; | ||
| 396 | u32 index = addr / 4; | ||
| 397 | |||
| 398 | // Writes other than u32 are untested, so I'd rather have them abort than silently fail | ||
| 399 | if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { | ||
| 400 | LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); | ||
| 401 | return; | ||
| 402 | } | ||
| 403 | |||
| 404 | g_regs[index] = static_cast<u32>(data); | ||
| 405 | |||
| 406 | switch (index) { | ||
| 407 | |||
| 408 | // Memory fills are triggered once the fill value is written. | ||
| 409 | case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): | ||
| 410 | case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { | ||
| 411 | const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); | ||
| 412 | auto& config = g_regs.memory_fill_config[is_second_filler]; | ||
| 413 | |||
| 414 | if (config.trigger) { | ||
| 415 | MemoryFill(config); | ||
| 416 | LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), | ||
| 417 | config.GetEndAddress()); | ||
| 418 | |||
| 419 | // It seems that it won't signal interrupt if "address_start" is zero. | ||
| 420 | // TODO: hwtest this | ||
| 421 | if (config.GetStartAddress() != 0) { | ||
| 422 | if (!is_second_filler) { | ||
| 423 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC0); | ||
| 424 | } else { | ||
| 425 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC1); | ||
| 426 | } | ||
| 427 | } | ||
| 428 | |||
| 429 | // Reset "trigger" flag and set the "finish" flag | ||
| 430 | // NOTE: This was confirmed to happen on hardware even if "address_start" is zero. | ||
| 431 | config.trigger.Assign(0); | ||
| 432 | config.finished.Assign(1); | ||
| 433 | } | ||
| 434 | break; | ||
| 435 | } | ||
| 436 | |||
| 437 | case GPU_REG_INDEX(display_transfer_config.trigger): { | ||
| 438 | MICROPROFILE_SCOPE(GPU_DisplayTransfer); | ||
| 439 | |||
| 440 | const auto& config = g_regs.display_transfer_config; | ||
| 441 | if (config.trigger & 1) { | ||
| 442 | |||
| 443 | if (Pica::g_debug_context) | ||
| 444 | Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, | ||
| 445 | nullptr); | ||
| 446 | |||
| 447 | if (config.is_texture_copy) { | ||
| 448 | TextureCopy(config); | ||
| 449 | LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> " | ||
| 450 | "0x%08X(%u+%u), flags 0x%08X", | ||
| 451 | config.texture_copy.size, config.GetPhysicalInputAddress(), | ||
| 452 | config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16, | ||
| 453 | config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16, | ||
| 454 | config.texture_copy.output_gap * 16, config.flags); | ||
| 455 | } else { | ||
| 456 | DisplayTransfer(config); | ||
| 457 | LOG_TRACE(HW_GPU, "DisplayTransfer: 0x%08x(%ux%u)-> " | ||
| 458 | "0x%08x(%ux%u), dst format %x, flags 0x%08X", | ||
| 459 | config.GetPhysicalInputAddress(), config.input_width.Value(), | ||
| 460 | config.input_height.Value(), config.GetPhysicalOutputAddress(), | ||
| 461 | config.output_width.Value(), config.output_height.Value(), | ||
| 462 | config.output_format.Value(), config.flags); | ||
| 463 | } | ||
| 464 | |||
| 465 | g_regs.display_transfer_config.trigger = 0; | ||
| 466 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PPF); | ||
| 467 | } | ||
| 468 | break; | ||
| 469 | } | ||
| 470 | |||
| 471 | // Seems like writing to this register triggers processing | ||
| 472 | case GPU_REG_INDEX(command_processor_config.trigger): { | ||
| 473 | const auto& config = g_regs.command_processor_config; | ||
| 474 | if (config.trigger & 1) { | ||
| 475 | MICROPROFILE_SCOPE(GPU_CmdlistProcessing); | ||
| 476 | |||
| 477 | u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress()); | ||
| 478 | |||
| 479 | if (Pica::g_debug_context && Pica::g_debug_context->recorder) { | ||
| 480 | Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, config.size, | ||
| 481 | config.GetPhysicalAddress()); | ||
| 482 | } | ||
| 483 | |||
| 484 | Pica::CommandProcessor::ProcessCommandList(buffer, config.size); | ||
| 485 | |||
| 486 | g_regs.command_processor_config.trigger = 0; | ||
| 487 | } | ||
| 488 | break; | ||
| 489 | } | ||
| 490 | |||
| 491 | default: | ||
| 492 | break; | ||
| 493 | } | ||
| 494 | |||
| 495 | // Notify tracer about the register write | ||
| 496 | // This is happening *after* handling the write to make sure we properly catch all memory reads. | ||
| 497 | if (Pica::g_debug_context && Pica::g_debug_context->recorder) { | ||
| 498 | // addr + GPU VBase - IO VBase + IO PBase | ||
| 499 | Pica::g_debug_context->recorder->RegisterWritten<T>( | ||
| 500 | addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data); | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | // Explicitly instantiate template functions because we aren't defining this in the header: | ||
| 505 | |||
| 506 | template void Read<u64>(u64& var, const u32 addr); | ||
| 507 | template void Read<u32>(u32& var, const u32 addr); | ||
| 508 | template void Read<u16>(u16& var, const u32 addr); | ||
| 509 | template void Read<u8>(u8& var, const u32 addr); | ||
| 510 | |||
| 511 | template void Write<u64>(u32 addr, const u64 data); | ||
| 512 | template void Write<u32>(u32 addr, const u32 data); | ||
| 513 | template void Write<u16>(u32 addr, const u16 data); | ||
| 514 | template void Write<u8>(u32 addr, const u8 data); | ||
| 515 | |||
| 516 | /// Update hardware | ||
| 517 | static void VBlankCallback(u64 userdata, int cycles_late) { | ||
| 518 | //VideoCore::g_renderer->SwapBuffers(); | ||
| 519 | |||
| 520 | //// Signal to GSP that GPU interrupt has occurred | ||
| 521 | //// TODO(yuriks): hwtest to determine if PDC0 is for the Top screen and PDC1 for the Sub | ||
| 522 | //// screen, or if both use the same interrupts and these two instead determine the | ||
| 523 | //// beginning and end of the VBlank period. If needed, split the interrupt firing into | ||
| 524 | //// two different intervals. | ||
| 525 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC0); | ||
| 526 | //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC1); | ||
| 527 | |||
| 528 | // Reschedule recurrent event | ||
| 529 | CoreTiming::ScheduleEvent(frame_ticks - cycles_late, vblank_event); | ||
| 530 | } | ||
| 531 | |||
| 532 | /// Initialize hardware | ||
| 533 | void Init() { | ||
| 534 | memset(&g_regs, 0, sizeof(g_regs)); | ||
| 535 | |||
| 536 | auto& framebuffer_top = g_regs.framebuffer_config[0]; | ||
| 537 | auto& framebuffer_sub = g_regs.framebuffer_config[1]; | ||
| 538 | |||
| 539 | // Setup default framebuffer addresses (located in VRAM) | ||
| 540 | // .. or at least these are the ones used by system applets. | ||
| 541 | // There's probably a smarter way to come up with addresses | ||
| 542 | // like this which does not require hardcoding. | ||
| 543 | framebuffer_top.address_left1 = 0x181E6000; | ||
| 544 | framebuffer_top.address_left2 = 0x1822C800; | ||
| 545 | framebuffer_top.address_right1 = 0x18273000; | ||
| 546 | framebuffer_top.address_right2 = 0x182B9800; | ||
| 547 | framebuffer_sub.address_left1 = 0x1848F000; | ||
| 548 | framebuffer_sub.address_left2 = 0x184C7800; | ||
| 549 | |||
| 550 | framebuffer_top.width.Assign(240); | ||
| 551 | framebuffer_top.height.Assign(400); | ||
| 552 | framebuffer_top.stride = 3 * 240; | ||
| 553 | framebuffer_top.color_format.Assign(Regs::PixelFormat::RGB8); | ||
| 554 | framebuffer_top.active_fb = 0; | ||
| 555 | |||
| 556 | framebuffer_sub.width.Assign(240); | ||
| 557 | framebuffer_sub.height.Assign(320); | ||
| 558 | framebuffer_sub.stride = 3 * 240; | ||
| 559 | framebuffer_sub.color_format.Assign(Regs::PixelFormat::RGB8); | ||
| 560 | framebuffer_sub.active_fb = 0; | ||
| 561 | |||
| 562 | vblank_event = CoreTiming::RegisterEvent("GPU::VBlankCallback", VBlankCallback); | ||
| 563 | CoreTiming::ScheduleEvent(frame_ticks, vblank_event); | ||
| 564 | |||
| 565 | LOG_DEBUG(HW_GPU, "initialized OK"); | ||
| 566 | } | ||
| 567 | |||
| 568 | /// Shutdown hardware | ||
| 569 | void Shutdown() { | ||
| 570 | LOG_DEBUG(HW_GPU, "shutdown OK"); | ||
| 571 | } | ||
| 572 | |||
| 573 | } // namespace | ||