diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/audio_core/codec.cpp | 4 | ||||
| -rw-r--r-- | src/audio_core/cubeb_sink.cpp | 8 | ||||
| -rw-r--r-- | src/common/swap.h | 6 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/nvflinger.cpp | 76 | ||||
| -rw-r--r-- | src/core/hle/service/nvflinger/nvflinger.h | 11 | ||||
| -rw-r--r-- | src/core/hle/service/vi/display/vi_display.cpp | 49 | ||||
| -rw-r--r-- | src/core/hle/service/vi/display/vi_display.h | 74 | ||||
| -rw-r--r-- | src/core/hle/service/vi/layer/vi_layer.cpp | 3 | ||||
| -rw-r--r-- | src/core/hle/service/vi/layer/vi_layer.h | 37 | ||||
| -rw-r--r-- | src/core/hle/service/vi/vi.cpp | 27 | ||||
| -rw-r--r-- | src/video_core/engines/shader_bytecode.h | 6 | ||||
| -rw-r--r-- | src/video_core/engines/shader_header.h | 41 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | 6 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_shader_decompiler.cpp | 44 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_shader_gen.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/shader/decode/memory.cpp | 2 | ||||
| -rw-r--r-- | src/video_core/shader/decode/other.cpp | 15 | ||||
| -rw-r--r-- | src/video_core/shader/track.cpp | 10 |
18 files changed, 303 insertions, 120 deletions
diff --git a/src/audio_core/codec.cpp b/src/audio_core/codec.cpp index 454de798b..c5a0d98ce 100644 --- a/src/audio_core/codec.cpp +++ b/src/audio_core/codec.cpp | |||
| @@ -68,8 +68,8 @@ std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM | |||
| 68 | } | 68 | } |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | state.yn1 = yn1; | 71 | state.yn1 = static_cast<s16>(yn1); |
| 72 | state.yn2 = yn2; | 72 | state.yn2 = static_cast<s16>(yn2); |
| 73 | 73 | ||
| 74 | return ret; | 74 | return ret; |
| 75 | } | 75 | } |
diff --git a/src/audio_core/cubeb_sink.cpp b/src/audio_core/cubeb_sink.cpp index 097328901..dc45dedd3 100644 --- a/src/audio_core/cubeb_sink.cpp +++ b/src/audio_core/cubeb_sink.cpp | |||
| @@ -46,7 +46,7 @@ public: | |||
| 46 | } | 46 | } |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | ~CubebSinkStream() { | 49 | ~CubebSinkStream() override { |
| 50 | if (!ctx) { | 50 | if (!ctx) { |
| 51 | return; | 51 | return; |
| 52 | } | 52 | } |
| @@ -75,11 +75,11 @@ public: | |||
| 75 | queue.Push(samples); | 75 | queue.Push(samples); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | std::size_t SamplesInQueue(u32 num_channels) const override { | 78 | std::size_t SamplesInQueue(u32 channel_count) const override { |
| 79 | if (!ctx) | 79 | if (!ctx) |
| 80 | return 0; | 80 | return 0; |
| 81 | 81 | ||
| 82 | return queue.Size() / num_channels; | 82 | return queue.Size() / channel_count; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | void Flush() override { | 85 | void Flush() override { |
| @@ -98,7 +98,7 @@ private: | |||
| 98 | u32 num_channels{}; | 98 | u32 num_channels{}; |
| 99 | 99 | ||
| 100 | Common::RingBuffer<s16, 0x10000> queue; | 100 | Common::RingBuffer<s16, 0x10000> queue; |
| 101 | std::array<s16, 2> last_frame; | 101 | std::array<s16, 2> last_frame{}; |
| 102 | std::atomic<bool> should_flush{}; | 102 | std::atomic<bool> should_flush{}; |
| 103 | TimeStretcher time_stretch; | 103 | TimeStretcher time_stretch; |
| 104 | 104 | ||
diff --git a/src/common/swap.h b/src/common/swap.h index 32af0b6ac..0e219747f 100644 --- a/src/common/swap.h +++ b/src/common/swap.h | |||
| @@ -28,8 +28,8 @@ | |||
| 28 | #include <cstring> | 28 | #include <cstring> |
| 29 | #include "common/common_types.h" | 29 | #include "common/common_types.h" |
| 30 | 30 | ||
| 31 | // GCC 4.6+ | 31 | // GCC |
| 32 | #if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) | 32 | #ifdef __GNUC__ |
| 33 | 33 | ||
| 34 | #if __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && !defined(COMMON_LITTLE_ENDIAN) | 34 | #if __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && !defined(COMMON_LITTLE_ENDIAN) |
| 35 | #define COMMON_LITTLE_ENDIAN 1 | 35 | #define COMMON_LITTLE_ENDIAN 1 |
| @@ -38,7 +38,7 @@ | |||
| 38 | #endif | 38 | #endif |
| 39 | 39 | ||
| 40 | // LLVM/clang | 40 | // LLVM/clang |
| 41 | #elif __clang__ | 41 | #elif defined(__clang__) |
| 42 | 42 | ||
| 43 | #if __LITTLE_ENDIAN__ && !defined(COMMON_LITTLE_ENDIAN) | 43 | #if __LITTLE_ENDIAN__ && !defined(COMMON_LITTLE_ENDIAN) |
| 44 | #define COMMON_LITTLE_ENDIAN 1 | 44 | #define COMMON_LITTLE_ENDIAN 1 |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index b5d452db1..56f31e2ac 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -28,9 +28,13 @@ namespace Service::NVFlinger { | |||
| 28 | constexpr std::size_t SCREEN_REFRESH_RATE = 60; | 28 | constexpr std::size_t SCREEN_REFRESH_RATE = 60; |
| 29 | constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); | 29 | constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); |
| 30 | 30 | ||
| 31 | NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) | 31 | NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { |
| 32 | : displays{{0, "Default"}, {1, "External"}, {2, "Edid"}, {3, "Internal"}, {4, "Null"}}, | 32 | displays.emplace_back(0, "Default"); |
| 33 | core_timing{core_timing} { | 33 | displays.emplace_back(1, "External"); |
| 34 | displays.emplace_back(2, "Edid"); | ||
| 35 | displays.emplace_back(3, "Internal"); | ||
| 36 | displays.emplace_back(4, "Null"); | ||
| 37 | |||
| 34 | // Schedule the screen composition events | 38 | // Schedule the screen composition events |
| 35 | composition_event = | 39 | composition_event = |
| 36 | core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { | 40 | core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { |
| @@ -55,13 +59,14 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { | |||
| 55 | // TODO(Subv): Currently we only support the Default display. | 59 | // TODO(Subv): Currently we only support the Default display. |
| 56 | ASSERT(name == "Default"); | 60 | ASSERT(name == "Default"); |
| 57 | 61 | ||
| 58 | const auto itr = std::find_if(displays.begin(), displays.end(), | 62 | const auto itr = |
| 59 | [&](const VI::Display& display) { return display.name == name; }); | 63 | std::find_if(displays.begin(), displays.end(), |
| 64 | [&](const VI::Display& display) { return display.GetName() == name; }); | ||
| 60 | if (itr == displays.end()) { | 65 | if (itr == displays.end()) { |
| 61 | return {}; | 66 | return {}; |
| 62 | } | 67 | } |
| 63 | 68 | ||
| 64 | return itr->id; | 69 | return itr->GetID(); |
| 65 | } | 70 | } |
| 66 | 71 | ||
| 67 | std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | 72 | std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { |
| @@ -71,13 +76,10 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | |||
| 71 | return {}; | 76 | return {}; |
| 72 | } | 77 | } |
| 73 | 78 | ||
| 74 | ASSERT_MSG(display->layers.empty(), "Only one layer is supported per display at the moment"); | ||
| 75 | |||
| 76 | const u64 layer_id = next_layer_id++; | 79 | const u64 layer_id = next_layer_id++; |
| 77 | const u32 buffer_queue_id = next_buffer_queue_id++; | 80 | const u32 buffer_queue_id = next_buffer_queue_id++; |
| 78 | auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id); | 81 | buffer_queues.emplace_back(buffer_queue_id, layer_id); |
| 79 | display->layers.emplace_back(layer_id, buffer_queue); | 82 | display->CreateLayer(layer_id, buffer_queues.back()); |
| 80 | buffer_queues.emplace_back(std::move(buffer_queue)); | ||
| 81 | return layer_id; | 83 | return layer_id; |
| 82 | } | 84 | } |
| 83 | 85 | ||
| @@ -88,7 +90,7 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co | |||
| 88 | return {}; | 90 | return {}; |
| 89 | } | 91 | } |
| 90 | 92 | ||
| 91 | return layer->buffer_queue->GetId(); | 93 | return layer->GetBufferQueue().GetId(); |
| 92 | } | 94 | } |
| 93 | 95 | ||
| 94 | Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { | 96 | Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { |
| @@ -98,12 +100,20 @@ Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_i | |||
| 98 | return nullptr; | 100 | return nullptr; |
| 99 | } | 101 | } |
| 100 | 102 | ||
| 101 | return display->vsync_event.readable; | 103 | return display->GetVSyncEvent(); |
| 102 | } | 104 | } |
| 103 | 105 | ||
| 104 | std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const { | 106 | BufferQueue& NVFlinger::FindBufferQueue(u32 id) { |
| 105 | const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), | 107 | const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), |
| 106 | [&](const auto& queue) { return queue->GetId() == id; }); | 108 | [id](const auto& queue) { return queue.GetId() == id; }); |
| 109 | |||
| 110 | ASSERT(itr != buffer_queues.end()); | ||
| 111 | return *itr; | ||
| 112 | } | ||
| 113 | |||
| 114 | const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const { | ||
| 115 | const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), | ||
| 116 | [id](const auto& queue) { return queue.GetId() == id; }); | ||
| 107 | 117 | ||
| 108 | ASSERT(itr != buffer_queues.end()); | 118 | ASSERT(itr != buffer_queues.end()); |
| 109 | return *itr; | 119 | return *itr; |
| @@ -112,7 +122,7 @@ std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const { | |||
| 112 | VI::Display* NVFlinger::FindDisplay(u64 display_id) { | 122 | VI::Display* NVFlinger::FindDisplay(u64 display_id) { |
| 113 | const auto itr = | 123 | const auto itr = |
| 114 | std::find_if(displays.begin(), displays.end(), | 124 | std::find_if(displays.begin(), displays.end(), |
| 115 | [&](const VI::Display& display) { return display.id == display_id; }); | 125 | [&](const VI::Display& display) { return display.GetID() == display_id; }); |
| 116 | 126 | ||
| 117 | if (itr == displays.end()) { | 127 | if (itr == displays.end()) { |
| 118 | return nullptr; | 128 | return nullptr; |
| @@ -124,7 +134,7 @@ VI::Display* NVFlinger::FindDisplay(u64 display_id) { | |||
| 124 | const VI::Display* NVFlinger::FindDisplay(u64 display_id) const { | 134 | const VI::Display* NVFlinger::FindDisplay(u64 display_id) const { |
| 125 | const auto itr = | 135 | const auto itr = |
| 126 | std::find_if(displays.begin(), displays.end(), | 136 | std::find_if(displays.begin(), displays.end(), |
| 127 | [&](const VI::Display& display) { return display.id == display_id; }); | 137 | [&](const VI::Display& display) { return display.GetID() == display_id; }); |
| 128 | 138 | ||
| 129 | if (itr == displays.end()) { | 139 | if (itr == displays.end()) { |
| 130 | return nullptr; | 140 | return nullptr; |
| @@ -140,14 +150,7 @@ VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) { | |||
| 140 | return nullptr; | 150 | return nullptr; |
| 141 | } | 151 | } |
| 142 | 152 | ||
| 143 | const auto itr = std::find_if(display->layers.begin(), display->layers.end(), | 153 | return display->FindLayer(layer_id); |
| 144 | [&](const VI::Layer& layer) { return layer.id == layer_id; }); | ||
| 145 | |||
| 146 | if (itr == display->layers.end()) { | ||
| 147 | return nullptr; | ||
| 148 | } | ||
| 149 | |||
| 150 | return &*itr; | ||
| 151 | } | 154 | } |
| 152 | 155 | ||
| 153 | const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { | 156 | const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { |
| @@ -157,33 +160,24 @@ const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { | |||
| 157 | return nullptr; | 160 | return nullptr; |
| 158 | } | 161 | } |
| 159 | 162 | ||
| 160 | const auto itr = std::find_if(display->layers.begin(), display->layers.end(), | 163 | return display->FindLayer(layer_id); |
| 161 | [&](const VI::Layer& layer) { return layer.id == layer_id; }); | ||
| 162 | |||
| 163 | if (itr == display->layers.end()) { | ||
| 164 | return nullptr; | ||
| 165 | } | ||
| 166 | |||
| 167 | return &*itr; | ||
| 168 | } | 164 | } |
| 169 | 165 | ||
| 170 | void NVFlinger::Compose() { | 166 | void NVFlinger::Compose() { |
| 171 | for (auto& display : displays) { | 167 | for (auto& display : displays) { |
| 172 | // Trigger vsync for this display at the end of drawing | 168 | // Trigger vsync for this display at the end of drawing |
| 173 | SCOPE_EXIT({ display.vsync_event.writable->Signal(); }); | 169 | SCOPE_EXIT({ display.SignalVSyncEvent(); }); |
| 174 | 170 | ||
| 175 | // Don't do anything for displays without layers. | 171 | // Don't do anything for displays without layers. |
| 176 | if (display.layers.empty()) | 172 | if (!display.HasLayers()) |
| 177 | continue; | 173 | continue; |
| 178 | 174 | ||
| 179 | // TODO(Subv): Support more than 1 layer. | 175 | // TODO(Subv): Support more than 1 layer. |
| 180 | ASSERT_MSG(display.layers.size() == 1, "Max 1 layer per display is supported"); | 176 | VI::Layer& layer = display.GetLayer(0); |
| 181 | 177 | auto& buffer_queue = layer.GetBufferQueue(); | |
| 182 | VI::Layer& layer = display.layers[0]; | ||
| 183 | auto& buffer_queue = layer.buffer_queue; | ||
| 184 | 178 | ||
| 185 | // Search for a queued buffer and acquire it | 179 | // Search for a queued buffer and acquire it |
| 186 | auto buffer = buffer_queue->AcquireBuffer(); | 180 | auto buffer = buffer_queue.AcquireBuffer(); |
| 187 | 181 | ||
| 188 | MicroProfileFlip(); | 182 | MicroProfileFlip(); |
| 189 | 183 | ||
| @@ -208,7 +202,7 @@ void NVFlinger::Compose() { | |||
| 208 | igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, | 202 | igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, |
| 209 | buffer->get().transform, buffer->get().crop_rect); | 203 | buffer->get().transform, buffer->get().crop_rect); |
| 210 | 204 | ||
| 211 | buffer_queue->ReleaseBuffer(buffer->get().slot); | 205 | buffer_queue.ReleaseBuffer(buffer->get().slot); |
| 212 | } | 206 | } |
| 213 | } | 207 | } |
| 214 | 208 | ||
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 2e000af91..c0a83fffb 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -28,8 +28,8 @@ class Module; | |||
| 28 | } // namespace Service::Nvidia | 28 | } // namespace Service::Nvidia |
| 29 | 29 | ||
| 30 | namespace Service::VI { | 30 | namespace Service::VI { |
| 31 | struct Display; | 31 | class Display; |
| 32 | struct Layer; | 32 | class Layer; |
| 33 | } // namespace Service::VI | 33 | } // namespace Service::VI |
| 34 | 34 | ||
| 35 | namespace Service::NVFlinger { | 35 | namespace Service::NVFlinger { |
| @@ -65,7 +65,10 @@ public: | |||
| 65 | Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; | 65 | Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; |
| 66 | 66 | ||
| 67 | /// Obtains a buffer queue identified by the ID. | 67 | /// Obtains a buffer queue identified by the ID. |
| 68 | std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const; | 68 | BufferQueue& FindBufferQueue(u32 id); |
| 69 | |||
| 70 | /// Obtains a buffer queue identified by the ID. | ||
| 71 | const BufferQueue& FindBufferQueue(u32 id) const; | ||
| 69 | 72 | ||
| 70 | /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when | 73 | /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when |
| 71 | /// finished. | 74 | /// finished. |
| @@ -87,7 +90,7 @@ private: | |||
| 87 | std::shared_ptr<Nvidia::Module> nvdrv; | 90 | std::shared_ptr<Nvidia::Module> nvdrv; |
| 88 | 91 | ||
| 89 | std::vector<VI::Display> displays; | 92 | std::vector<VI::Display> displays; |
| 90 | std::vector<std::shared_ptr<BufferQueue>> buffer_queues; | 93 | std::vector<BufferQueue> buffer_queues; |
| 91 | 94 | ||
| 92 | /// Id to use for the next layer that is created, this counter is shared among all displays. | 95 | /// Id to use for the next layer that is created, this counter is shared among all displays. |
| 93 | u64 next_layer_id = 1; | 96 | u64 next_layer_id = 1; |
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index a108e468f..01d80311b 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp | |||
| @@ -2,8 +2,12 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | ||
| 6 | #include <utility> | ||
| 7 | |||
| 5 | #include <fmt/format.h> | 8 | #include <fmt/format.h> |
| 6 | 9 | ||
| 10 | #include "common/assert.h" | ||
| 7 | #include "core/core.h" | 11 | #include "core/core.h" |
| 8 | #include "core/hle/kernel/readable_event.h" | 12 | #include "core/hle/kernel/readable_event.h" |
| 9 | #include "core/hle/service/vi/display/vi_display.h" | 13 | #include "core/hle/service/vi/display/vi_display.h" |
| @@ -19,4 +23,49 @@ Display::Display(u64 id, std::string name) : id{id}, name{std::move(name)} { | |||
| 19 | 23 | ||
| 20 | Display::~Display() = default; | 24 | Display::~Display() = default; |
| 21 | 25 | ||
| 26 | Layer& Display::GetLayer(std::size_t index) { | ||
| 27 | return layers.at(index); | ||
| 28 | } | ||
| 29 | |||
| 30 | const Layer& Display::GetLayer(std::size_t index) const { | ||
| 31 | return layers.at(index); | ||
| 32 | } | ||
| 33 | |||
| 34 | Kernel::SharedPtr<Kernel::ReadableEvent> Display::GetVSyncEvent() const { | ||
| 35 | return vsync_event.readable; | ||
| 36 | } | ||
| 37 | |||
| 38 | void Display::SignalVSyncEvent() { | ||
| 39 | vsync_event.writable->Signal(); | ||
| 40 | } | ||
| 41 | |||
| 42 | void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) { | ||
| 43 | // TODO(Subv): Support more than 1 layer. | ||
| 44 | ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); | ||
| 45 | |||
| 46 | layers.emplace_back(id, buffer_queue); | ||
| 47 | } | ||
| 48 | |||
| 49 | Layer* Display::FindLayer(u64 id) { | ||
| 50 | const auto itr = std::find_if(layers.begin(), layers.end(), | ||
| 51 | [id](const VI::Layer& layer) { return layer.GetID() == id; }); | ||
| 52 | |||
| 53 | if (itr == layers.end()) { | ||
| 54 | return nullptr; | ||
| 55 | } | ||
| 56 | |||
| 57 | return &*itr; | ||
| 58 | } | ||
| 59 | |||
| 60 | const Layer* Display::FindLayer(u64 id) const { | ||
| 61 | const auto itr = std::find_if(layers.begin(), layers.end(), | ||
| 62 | [id](const VI::Layer& layer) { return layer.GetID() == id; }); | ||
| 63 | |||
| 64 | if (itr == layers.end()) { | ||
| 65 | return nullptr; | ||
| 66 | } | ||
| 67 | |||
| 68 | return &*itr; | ||
| 69 | } | ||
| 70 | |||
| 22 | } // namespace Service::VI | 71 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index df44db306..2acd46ff8 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -10,14 +10,84 @@ | |||
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "core/hle/kernel/writable_event.h" | 11 | #include "core/hle/kernel/writable_event.h" |
| 12 | 12 | ||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class BufferQueue; | ||
| 15 | } | ||
| 16 | |||
| 13 | namespace Service::VI { | 17 | namespace Service::VI { |
| 14 | 18 | ||
| 15 | struct Layer; | 19 | class Layer; |
| 16 | 20 | ||
| 17 | struct Display { | 21 | /// Represents a single display type |
| 22 | class Display { | ||
| 23 | public: | ||
| 24 | /// Constructs a display with a given unique ID and name. | ||
| 25 | /// | ||
| 26 | /// @param id The unique ID for this display. | ||
| 27 | /// @param name The name for this display. | ||
| 28 | /// | ||
| 18 | Display(u64 id, std::string name); | 29 | Display(u64 id, std::string name); |
| 19 | ~Display(); | 30 | ~Display(); |
| 20 | 31 | ||
| 32 | Display(const Display&) = delete; | ||
| 33 | Display& operator=(const Display&) = delete; | ||
| 34 | |||
| 35 | Display(Display&&) = default; | ||
| 36 | Display& operator=(Display&&) = default; | ||
| 37 | |||
| 38 | /// Gets the unique ID assigned to this display. | ||
| 39 | u64 GetID() const { | ||
| 40 | return id; | ||
| 41 | } | ||
| 42 | |||
| 43 | /// Gets the name of this display | ||
| 44 | const std::string& GetName() const { | ||
| 45 | return name; | ||
| 46 | } | ||
| 47 | |||
| 48 | /// Whether or not this display has any layers added to it. | ||
| 49 | bool HasLayers() const { | ||
| 50 | return !layers.empty(); | ||
| 51 | } | ||
| 52 | |||
| 53 | /// Gets a layer for this display based off an index. | ||
| 54 | Layer& GetLayer(std::size_t index); | ||
| 55 | |||
| 56 | /// Gets a layer for this display based off an index. | ||
| 57 | const Layer& GetLayer(std::size_t index) const; | ||
| 58 | |||
| 59 | /// Gets the readable vsync event. | ||
| 60 | Kernel::SharedPtr<Kernel::ReadableEvent> GetVSyncEvent() const; | ||
| 61 | |||
| 62 | /// Signals the internal vsync event. | ||
| 63 | void SignalVSyncEvent(); | ||
| 64 | |||
| 65 | /// Creates and adds a layer to this display with the given ID. | ||
| 66 | /// | ||
| 67 | /// @param id The ID to assign to the created layer. | ||
| 68 | /// @param buffer_queue The buffer queue for the layer instance to use. | ||
| 69 | /// | ||
| 70 | void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue); | ||
| 71 | |||
| 72 | /// Attempts to find a layer with the given ID. | ||
| 73 | /// | ||
| 74 | /// @param id The layer ID. | ||
| 75 | /// | ||
| 76 | /// @returns If found, the Layer instance with the given ID. | ||
| 77 | /// If not found, then nullptr is returned. | ||
| 78 | /// | ||
| 79 | Layer* FindLayer(u64 id); | ||
| 80 | |||
| 81 | /// Attempts to find a layer with the given ID. | ||
| 82 | /// | ||
| 83 | /// @param id The layer ID. | ||
| 84 | /// | ||
| 85 | /// @returns If found, the Layer instance with the given ID. | ||
| 86 | /// If not found, then nullptr is returned. | ||
| 87 | /// | ||
| 88 | const Layer* FindLayer(u64 id) const; | ||
| 89 | |||
| 90 | private: | ||
| 21 | u64 id; | 91 | u64 id; |
| 22 | std::string name; | 92 | std::string name; |
| 23 | 93 | ||
diff --git a/src/core/hle/service/vi/layer/vi_layer.cpp b/src/core/hle/service/vi/layer/vi_layer.cpp index 3a83e5b95..954225c26 100644 --- a/src/core/hle/service/vi/layer/vi_layer.cpp +++ b/src/core/hle/service/vi/layer/vi_layer.cpp | |||
| @@ -6,8 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | namespace Service::VI { | 7 | namespace Service::VI { |
| 8 | 8 | ||
| 9 | Layer::Layer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> queue) | 9 | Layer::Layer(u64 id, NVFlinger::BufferQueue& queue) : id{id}, buffer_queue{queue} {} |
| 10 | : id{id}, buffer_queue{std::move(queue)} {} | ||
| 11 | 10 | ||
| 12 | Layer::~Layer() = default; | 11 | Layer::~Layer() = default; |
| 13 | 12 | ||
diff --git a/src/core/hle/service/vi/layer/vi_layer.h b/src/core/hle/service/vi/layer/vi_layer.h index df328e09f..c6bfd01f6 100644 --- a/src/core/hle/service/vi/layer/vi_layer.h +++ b/src/core/hle/service/vi/layer/vi_layer.h | |||
| @@ -4,8 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <memory> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 10 | 8 | ||
| 11 | namespace Service::NVFlinger { | 9 | namespace Service::NVFlinger { |
| @@ -14,12 +12,41 @@ class BufferQueue; | |||
| 14 | 12 | ||
| 15 | namespace Service::VI { | 13 | namespace Service::VI { |
| 16 | 14 | ||
| 17 | struct Layer { | 15 | /// Represents a single display layer. |
| 18 | Layer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> queue); | 16 | class Layer { |
| 17 | public: | ||
| 18 | /// Constructs a layer with a given ID and buffer queue. | ||
| 19 | /// | ||
| 20 | /// @param id The ID to assign to this layer. | ||
| 21 | /// @param queue The buffer queue for this layer to use. | ||
| 22 | /// | ||
| 23 | Layer(u64 id, NVFlinger::BufferQueue& queue); | ||
| 19 | ~Layer(); | 24 | ~Layer(); |
| 20 | 25 | ||
| 26 | Layer(const Layer&) = delete; | ||
| 27 | Layer& operator=(const Layer&) = delete; | ||
| 28 | |||
| 29 | Layer(Layer&&) = default; | ||
| 30 | Layer& operator=(Layer&&) = delete; | ||
| 31 | |||
| 32 | /// Gets the ID for this layer. | ||
| 33 | u64 GetID() const { | ||
| 34 | return id; | ||
| 35 | } | ||
| 36 | |||
| 37 | /// Gets a reference to the buffer queue this layer is using. | ||
| 38 | NVFlinger::BufferQueue& GetBufferQueue() { | ||
| 39 | return buffer_queue; | ||
| 40 | } | ||
| 41 | |||
| 42 | /// Gets a const reference to the buffer queue this layer is using. | ||
| 43 | const NVFlinger::BufferQueue& GetBufferQueue() const { | ||
| 44 | return buffer_queue; | ||
| 45 | } | ||
| 46 | |||
| 47 | private: | ||
| 21 | u64 id; | 48 | u64 id; |
| 22 | std::shared_ptr<NVFlinger::BufferQueue> buffer_queue; | 49 | NVFlinger::BufferQueue& buffer_queue; |
| 23 | }; | 50 | }; |
| 24 | 51 | ||
| 25 | } // namespace Service::VI | 52 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index a317a2885..74384a24d 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -525,7 +525,7 @@ private: | |||
| 525 | LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, | 525 | LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, |
| 526 | static_cast<u32>(transaction), flags); | 526 | static_cast<u32>(transaction), flags); |
| 527 | 527 | ||
| 528 | auto buffer_queue = nv_flinger->FindBufferQueue(id); | 528 | auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| 529 | 529 | ||
| 530 | if (transaction == TransactionId::Connect) { | 530 | if (transaction == TransactionId::Connect) { |
| 531 | IGBPConnectRequestParcel request{ctx.ReadBuffer()}; | 531 | IGBPConnectRequestParcel request{ctx.ReadBuffer()}; |
| @@ -538,7 +538,7 @@ private: | |||
| 538 | } else if (transaction == TransactionId::SetPreallocatedBuffer) { | 538 | } else if (transaction == TransactionId::SetPreallocatedBuffer) { |
| 539 | IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; | 539 | IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; |
| 540 | 540 | ||
| 541 | buffer_queue->SetPreallocatedBuffer(request.data.slot, request.buffer); | 541 | buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer); |
| 542 | 542 | ||
| 543 | IGBPSetPreallocatedBufferResponseParcel response{}; | 543 | IGBPSetPreallocatedBufferResponseParcel response{}; |
| 544 | ctx.WriteBuffer(response.Serialize()); | 544 | ctx.WriteBuffer(response.Serialize()); |
| @@ -546,7 +546,7 @@ private: | |||
| 546 | IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; | 546 | IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; |
| 547 | const u32 width{request.data.width}; | 547 | const u32 width{request.data.width}; |
| 548 | const u32 height{request.data.height}; | 548 | const u32 height{request.data.height}; |
| 549 | std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); | 549 | std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); |
| 550 | 550 | ||
| 551 | if (slot) { | 551 | if (slot) { |
| 552 | // Buffer is available | 552 | // Buffer is available |
| @@ -559,8 +559,8 @@ private: | |||
| 559 | [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, | 559 | [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, |
| 560 | Kernel::ThreadWakeupReason reason) { | 560 | Kernel::ThreadWakeupReason reason) { |
| 561 | // Repeat TransactParcel DequeueBuffer when a buffer is available | 561 | // Repeat TransactParcel DequeueBuffer when a buffer is available |
| 562 | auto buffer_queue = nv_flinger->FindBufferQueue(id); | 562 | auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| 563 | std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); | 563 | std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); |
| 564 | ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); | 564 | ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); |
| 565 | 565 | ||
| 566 | IGBPDequeueBufferResponseParcel response{*slot}; | 566 | IGBPDequeueBufferResponseParcel response{*slot}; |
| @@ -568,28 +568,28 @@ private: | |||
| 568 | IPC::ResponseBuilder rb{ctx, 2}; | 568 | IPC::ResponseBuilder rb{ctx, 2}; |
| 569 | rb.Push(RESULT_SUCCESS); | 569 | rb.Push(RESULT_SUCCESS); |
| 570 | }, | 570 | }, |
| 571 | buffer_queue->GetWritableBufferWaitEvent()); | 571 | buffer_queue.GetWritableBufferWaitEvent()); |
| 572 | } | 572 | } |
| 573 | } else if (transaction == TransactionId::RequestBuffer) { | 573 | } else if (transaction == TransactionId::RequestBuffer) { |
| 574 | IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; | 574 | IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; |
| 575 | 575 | ||
| 576 | auto& buffer = buffer_queue->RequestBuffer(request.slot); | 576 | auto& buffer = buffer_queue.RequestBuffer(request.slot); |
| 577 | 577 | ||
| 578 | IGBPRequestBufferResponseParcel response{buffer}; | 578 | IGBPRequestBufferResponseParcel response{buffer}; |
| 579 | ctx.WriteBuffer(response.Serialize()); | 579 | ctx.WriteBuffer(response.Serialize()); |
| 580 | } else if (transaction == TransactionId::QueueBuffer) { | 580 | } else if (transaction == TransactionId::QueueBuffer) { |
| 581 | IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; | 581 | IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; |
| 582 | 582 | ||
| 583 | buffer_queue->QueueBuffer(request.data.slot, request.data.transform, | 583 | buffer_queue.QueueBuffer(request.data.slot, request.data.transform, |
| 584 | request.data.GetCropRect()); | 584 | request.data.GetCropRect()); |
| 585 | 585 | ||
| 586 | IGBPQueueBufferResponseParcel response{1280, 720}; | 586 | IGBPQueueBufferResponseParcel response{1280, 720}; |
| 587 | ctx.WriteBuffer(response.Serialize()); | 587 | ctx.WriteBuffer(response.Serialize()); |
| 588 | } else if (transaction == TransactionId::Query) { | 588 | } else if (transaction == TransactionId::Query) { |
| 589 | IGBPQueryRequestParcel request{ctx.ReadBuffer()}; | 589 | IGBPQueryRequestParcel request{ctx.ReadBuffer()}; |
| 590 | 590 | ||
| 591 | u32 value = | 591 | const u32 value = |
| 592 | buffer_queue->Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); | 592 | buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); |
| 593 | 593 | ||
| 594 | IGBPQueryResponseParcel response{value}; | 594 | IGBPQueryResponseParcel response{value}; |
| 595 | ctx.WriteBuffer(response.Serialize()); | 595 | ctx.WriteBuffer(response.Serialize()); |
| @@ -629,12 +629,12 @@ private: | |||
| 629 | 629 | ||
| 630 | LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); | 630 | LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); |
| 631 | 631 | ||
| 632 | const auto buffer_queue = nv_flinger->FindBufferQueue(id); | 632 | const auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| 633 | 633 | ||
| 634 | // TODO(Subv): Find out what this actually is. | 634 | // TODO(Subv): Find out what this actually is. |
| 635 | IPC::ResponseBuilder rb{ctx, 2, 1}; | 635 | IPC::ResponseBuilder rb{ctx, 2, 1}; |
| 636 | rb.Push(RESULT_SUCCESS); | 636 | rb.Push(RESULT_SUCCESS); |
| 637 | rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent()); | 637 | rb.PushCopyObjects(buffer_queue.GetBufferWaitEvent()); |
| 638 | } | 638 | } |
| 639 | 639 | ||
| 640 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | 640 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; |
| @@ -752,6 +752,7 @@ public: | |||
| 752 | {1102, nullptr, "GetDisplayResolution"}, | 752 | {1102, nullptr, "GetDisplayResolution"}, |
| 753 | {2010, &IManagerDisplayService::CreateManagedLayer, "CreateManagedLayer"}, | 753 | {2010, &IManagerDisplayService::CreateManagedLayer, "CreateManagedLayer"}, |
| 754 | {2011, nullptr, "DestroyManagedLayer"}, | 754 | {2011, nullptr, "DestroyManagedLayer"}, |
| 755 | {2012, nullptr, "CreateStrayLayer"}, | ||
| 755 | {2050, nullptr, "CreateIndirectLayer"}, | 756 | {2050, nullptr, "CreateIndirectLayer"}, |
| 756 | {2051, nullptr, "DestroyIndirectLayer"}, | 757 | {2051, nullptr, "DestroyIndirectLayer"}, |
| 757 | {2052, nullptr, "CreateIndirectProducerEndPoint"}, | 758 | {2052, nullptr, "CreateIndirectProducerEndPoint"}, |
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 1f425f90b..252592edd 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h | |||
| @@ -376,9 +376,9 @@ enum class R2pMode : u64 { | |||
| 376 | }; | 376 | }; |
| 377 | 377 | ||
| 378 | enum class IpaInterpMode : u64 { | 378 | enum class IpaInterpMode : u64 { |
| 379 | Linear = 0, | 379 | Pass = 0, |
| 380 | Perspective = 1, | 380 | Multiply = 1, |
| 381 | Flat = 2, | 381 | Constant = 2, |
| 382 | Sc = 3, | 382 | Sc = 3, |
| 383 | }; | 383 | }; |
| 384 | 384 | ||
diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h index cf2b76ff6..e86a7f04a 100644 --- a/src/video_core/engines/shader_header.h +++ b/src/video_core/engines/shader_header.h | |||
| @@ -16,6 +16,13 @@ enum class OutputTopology : u32 { | |||
| 16 | TriangleStrip = 7, | 16 | TriangleStrip = 7, |
| 17 | }; | 17 | }; |
| 18 | 18 | ||
| 19 | enum class AttributeUse : u8 { | ||
| 20 | Unused = 0, | ||
| 21 | Constant = 1, | ||
| 22 | Perspective = 2, | ||
| 23 | ScreenLinear = 3, | ||
| 24 | }; | ||
| 25 | |||
| 19 | // Documentation in: | 26 | // Documentation in: |
| 20 | // http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html#ImapTexture | 27 | // http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html#ImapTexture |
| 21 | struct Header { | 28 | struct Header { |
| @@ -84,9 +91,15 @@ struct Header { | |||
| 84 | } vtg; | 91 | } vtg; |
| 85 | 92 | ||
| 86 | struct { | 93 | struct { |
| 87 | INSERT_PADDING_BYTES(3); // ImapSystemValuesA | 94 | INSERT_PADDING_BYTES(3); // ImapSystemValuesA |
| 88 | INSERT_PADDING_BYTES(1); // ImapSystemValuesB | 95 | INSERT_PADDING_BYTES(1); // ImapSystemValuesB |
| 89 | INSERT_PADDING_BYTES(32); // ImapGenericVector[32] | 96 | union { |
| 97 | BitField<0, 2, AttributeUse> x; | ||
| 98 | BitField<2, 2, AttributeUse> y; | ||
| 99 | BitField<4, 2, AttributeUse> w; | ||
| 100 | BitField<6, 2, AttributeUse> z; | ||
| 101 | u8 raw; | ||
| 102 | } imap_generic_vector[32]; | ||
| 90 | INSERT_PADDING_BYTES(2); // ImapColor | 103 | INSERT_PADDING_BYTES(2); // ImapColor |
| 91 | INSERT_PADDING_BYTES(2); // ImapSystemValuesC | 104 | INSERT_PADDING_BYTES(2); // ImapSystemValuesC |
| 92 | INSERT_PADDING_BYTES(10); // ImapFixedFncTexture[10] | 105 | INSERT_PADDING_BYTES(10); // ImapFixedFncTexture[10] |
| @@ -103,6 +116,28 @@ struct Header { | |||
| 103 | const u32 bit = render_target * 4 + component; | 116 | const u32 bit = render_target * 4 + component; |
| 104 | return omap.target & (1 << bit); | 117 | return omap.target & (1 << bit); |
| 105 | } | 118 | } |
| 119 | AttributeUse GetAttributeIndexUse(u32 attribute, u32 index) const { | ||
| 120 | return static_cast<AttributeUse>( | ||
| 121 | (imap_generic_vector[attribute].raw >> (index * 2)) & 0x03); | ||
| 122 | } | ||
| 123 | AttributeUse GetAttributeUse(u32 attribute) const { | ||
| 124 | AttributeUse result = AttributeUse::Unused; | ||
| 125 | for (u32 i = 0; i < 4; i++) { | ||
| 126 | const auto index = GetAttributeIndexUse(attribute, i); | ||
| 127 | if (index == AttributeUse::Unused) { | ||
| 128 | continue; | ||
| 129 | } | ||
| 130 | if (result == AttributeUse::Unused || result == index) { | ||
| 131 | result = index; | ||
| 132 | continue; | ||
| 133 | } | ||
| 134 | LOG_CRITICAL(HW_GPU, "Generic Attribute Conflict in Interpolation Mode"); | ||
| 135 | if (index == AttributeUse::Perspective) { | ||
| 136 | result = index; | ||
| 137 | } | ||
| 138 | } | ||
| 139 | return result; | ||
| 140 | } | ||
| 106 | } ps; | 141 | } ps; |
| 107 | }; | 142 | }; |
| 108 | 143 | ||
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 74200914e..e6d47ce41 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | |||
| @@ -1257,7 +1257,11 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, | |||
| 1257 | case SurfaceTarget::TextureCubemap: | 1257 | case SurfaceTarget::TextureCubemap: |
| 1258 | case SurfaceTarget::Texture2DArray: | 1258 | case SurfaceTarget::Texture2DArray: |
| 1259 | case SurfaceTarget::TextureCubeArray: | 1259 | case SurfaceTarget::TextureCubeArray: |
| 1260 | FastLayeredCopySurface(old_surface, new_surface); | 1260 | if (old_params.pixel_format == new_params.pixel_format) |
| 1261 | FastLayeredCopySurface(old_surface, new_surface); | ||
| 1262 | else { | ||
| 1263 | AccurateCopySurface(old_surface, new_surface); | ||
| 1264 | } | ||
| 1261 | break; | 1265 | break; |
| 1262 | default: | 1266 | default: |
| 1263 | LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}", | 1267 | LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}", |
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index db18f4dbe..72ff6ac6a 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | namespace OpenGL::GLShader { | 20 | namespace OpenGL::GLShader { |
| 21 | 21 | ||
| 22 | using Tegra::Shader::Attribute; | 22 | using Tegra::Shader::Attribute; |
| 23 | using Tegra::Shader::AttributeUse; | ||
| 23 | using Tegra::Shader::Header; | 24 | using Tegra::Shader::Header; |
| 24 | using Tegra::Shader::IpaInterpMode; | 25 | using Tegra::Shader::IpaInterpMode; |
| 25 | using Tegra::Shader::IpaMode; | 26 | using Tegra::Shader::IpaMode; |
| @@ -288,34 +289,22 @@ private: | |||
| 288 | code.AddNewLine(); | 289 | code.AddNewLine(); |
| 289 | } | 290 | } |
| 290 | 291 | ||
| 291 | std::string GetInputFlags(const IpaMode& input_mode) { | 292 | std::string GetInputFlags(AttributeUse attribute) { |
| 292 | const IpaSampleMode sample_mode = input_mode.sampling_mode; | ||
| 293 | const IpaInterpMode interp_mode = input_mode.interpolation_mode; | ||
| 294 | std::string out; | 293 | std::string out; |
| 295 | 294 | ||
| 296 | switch (interp_mode) { | 295 | switch (attribute) { |
| 297 | case IpaInterpMode::Flat: | 296 | case AttributeUse::Constant: |
| 298 | out += "flat "; | 297 | out += "flat "; |
| 299 | break; | 298 | break; |
| 300 | case IpaInterpMode::Linear: | 299 | case AttributeUse::ScreenLinear: |
| 301 | out += "noperspective "; | 300 | out += "noperspective "; |
| 302 | break; | 301 | break; |
| 303 | case IpaInterpMode::Perspective: | 302 | case AttributeUse::Perspective: |
| 304 | // Default, Smooth | 303 | // Default, Smooth |
| 305 | break; | 304 | break; |
| 306 | default: | 305 | default: |
| 307 | UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode)); | 306 | LOG_CRITICAL(HW_GPU, "Unused attribute being fetched"); |
| 308 | } | 307 | UNREACHABLE(); |
| 309 | switch (sample_mode) { | ||
| 310 | case IpaSampleMode::Centroid: | ||
| 311 | // It can be implemented with the "centroid " keyword in GLSL | ||
| 312 | UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid"); | ||
| 313 | break; | ||
| 314 | case IpaSampleMode::Default: | ||
| 315 | // Default, n/a | ||
| 316 | break; | ||
| 317 | default: | ||
| 318 | UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode)); | ||
| 319 | } | 308 | } |
| 320 | return out; | 309 | return out; |
| 321 | } | 310 | } |
| @@ -324,16 +313,11 @@ private: | |||
| 324 | const auto& attributes = ir.GetInputAttributes(); | 313 | const auto& attributes = ir.GetInputAttributes(); |
| 325 | for (const auto element : attributes) { | 314 | for (const auto element : attributes) { |
| 326 | const Attribute::Index index = element.first; | 315 | const Attribute::Index index = element.first; |
| 327 | const IpaMode& input_mode = *element.second.begin(); | ||
| 328 | if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { | 316 | if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { |
| 329 | // Skip when it's not a generic attribute | 317 | // Skip when it's not a generic attribute |
| 330 | continue; | 318 | continue; |
| 331 | } | 319 | } |
| 332 | 320 | ||
| 333 | ASSERT(element.second.size() > 0); | ||
| 334 | UNIMPLEMENTED_IF_MSG(element.second.size() > 1, | ||
| 335 | "Multiple input flag modes are not supported in GLSL"); | ||
| 336 | |||
| 337 | // TODO(bunnei): Use proper number of elements for these | 321 | // TODO(bunnei): Use proper number of elements for these |
| 338 | u32 idx = static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0); | 322 | u32 idx = static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0); |
| 339 | if (stage != ShaderStage::Vertex) { | 323 | if (stage != ShaderStage::Vertex) { |
| @@ -345,8 +329,14 @@ private: | |||
| 345 | if (stage == ShaderStage::Geometry) { | 329 | if (stage == ShaderStage::Geometry) { |
| 346 | attr = "gs_" + attr + "[]"; | 330 | attr = "gs_" + attr + "[]"; |
| 347 | } | 331 | } |
| 348 | code.AddLine("layout (location = " + std::to_string(idx) + ") " + | 332 | std::string suffix; |
| 349 | GetInputFlags(input_mode) + "in vec4 " + attr + ';'); | 333 | if (stage == ShaderStage::Fragment) { |
| 334 | const auto input_mode = | ||
| 335 | header.ps.GetAttributeUse(idx - GENERIC_VARYING_START_LOCATION); | ||
| 336 | suffix = GetInputFlags(input_mode); | ||
| 337 | } | ||
| 338 | code.AddLine("layout (location = " + std::to_string(idx) + ") " + suffix + "in vec4 " + | ||
| 339 | attr + ';'); | ||
| 350 | } | 340 | } |
| 351 | if (!attributes.empty()) | 341 | if (!attributes.empty()) |
| 352 | code.AddNewLine(); | 342 | code.AddNewLine(); |
| @@ -1584,4 +1574,4 @@ ProgramResult Decompile(const ShaderIR& ir, Maxwell::ShaderStage stage, const st | |||
| 1584 | return {decompiler.GetResult(), decompiler.GetShaderEntries()}; | 1574 | return {decompiler.GetResult(), decompiler.GetShaderEntries()}; |
| 1585 | } | 1575 | } |
| 1586 | 1576 | ||
| 1587 | } // namespace OpenGL::GLShader \ No newline at end of file | 1577 | } // namespace OpenGL::GLShader |
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 04e1db911..7d96649af 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp | |||
| @@ -124,7 +124,7 @@ layout (location = 5) out vec4 FragColor5; | |||
| 124 | layout (location = 6) out vec4 FragColor6; | 124 | layout (location = 6) out vec4 FragColor6; |
| 125 | layout (location = 7) out vec4 FragColor7; | 125 | layout (location = 7) out vec4 FragColor7; |
| 126 | 126 | ||
| 127 | layout (location = 0) in vec4 position; | 127 | layout (location = 0) in noperspective vec4 position; |
| 128 | 128 | ||
| 129 | layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { | 129 | layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { |
| 130 | vec4 viewport_flip; | 130 | vec4 viewport_flip; |
| @@ -172,4 +172,4 @@ void main() { | |||
| 172 | return {out, program.second}; | 172 | return {out, program.second}; |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | } // namespace OpenGL::GLShader \ No newline at end of file | 175 | } // namespace OpenGL::GLShader |
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 55ec601ff..38f01ca50 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp | |||
| @@ -48,7 +48,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | |||
| 48 | UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, | 48 | UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, |
| 49 | "Unaligned attribute loads are not supported"); | 49 | "Unaligned attribute loads are not supported"); |
| 50 | 50 | ||
| 51 | Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, | 51 | Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Pass, |
| 52 | Tegra::Shader::IpaSampleMode::Default}; | 52 | Tegra::Shader::IpaSampleMode::Default}; |
| 53 | 53 | ||
| 54 | u64 next_element = instr.attribute.fmt20.element; | 54 | u64 next_element = instr.attribute.fmt20.element; |
diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp index f9502e3d0..d750a2936 100644 --- a/src/video_core/shader/decode/other.cpp +++ b/src/video_core/shader/decode/other.cpp | |||
| @@ -135,7 +135,18 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { | |||
| 135 | instr.ipa.sample_mode.Value()}; | 135 | instr.ipa.sample_mode.Value()}; |
| 136 | 136 | ||
| 137 | const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode); | 137 | const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode); |
| 138 | const Node value = GetSaturatedFloat(attr, instr.ipa.saturate); | 138 | Node value = attr; |
| 139 | const Tegra::Shader::Attribute::Index index = attribute.index.Value(); | ||
| 140 | if (index >= Tegra::Shader::Attribute::Index::Attribute_0 && | ||
| 141 | index <= Tegra::Shader::Attribute::Index::Attribute_31) { | ||
| 142 | // TODO(Blinkhawk): There are cases where a perspective attribute use PASS. | ||
| 143 | // In theory by setting them as perspective, OpenGL does the perspective correction. | ||
| 144 | // A way must figured to reverse the last step of it. | ||
| 145 | if (input_mode.interpolation_mode == Tegra::Shader::IpaInterpMode::Multiply) { | ||
| 146 | value = Operation(OperationCode::FMul, PRECISE, value, GetRegister(instr.gpr20)); | ||
| 147 | } | ||
| 148 | } | ||
| 149 | value = GetSaturatedFloat(value, instr.ipa.saturate); | ||
| 139 | 150 | ||
| 140 | SetRegister(bb, instr.gpr0, value); | 151 | SetRegister(bb, instr.gpr0, value); |
| 141 | break; | 152 | break; |
| @@ -175,4 +186,4 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { | |||
| 175 | return pc; | 186 | return pc; |
| 176 | } | 187 | } |
| 177 | 188 | ||
| 178 | } // namespace VideoCommon::Shader \ No newline at end of file | 189 | } // namespace VideoCommon::Shader |
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index be4635342..33b071747 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp | |||
| @@ -20,9 +20,9 @@ std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor, | |||
| 20 | return {node, cursor}; | 20 | return {node, cursor}; |
| 21 | } | 21 | } |
| 22 | if (const auto conditional = std::get_if<ConditionalNode>(node)) { | 22 | if (const auto conditional = std::get_if<ConditionalNode>(node)) { |
| 23 | const auto& code = conditional->GetCode(); | 23 | const auto& conditional_code = conditional->GetCode(); |
| 24 | const auto [found, internal_cursor] = | 24 | const auto [found, internal_cursor] = FindOperation( |
| 25 | FindOperation(code, static_cast<s64>(code.size() - 1), operation_code); | 25 | conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code); |
| 26 | if (found) | 26 | if (found) |
| 27 | return {found, cursor}; | 27 | return {found, cursor}; |
| 28 | } | 28 | } |
| @@ -58,8 +58,8 @@ Node ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) { | |||
| 58 | return nullptr; | 58 | return nullptr; |
| 59 | } | 59 | } |
| 60 | if (const auto conditional = std::get_if<ConditionalNode>(tracked)) { | 60 | if (const auto conditional = std::get_if<ConditionalNode>(tracked)) { |
| 61 | const auto& code = conditional->GetCode(); | 61 | const auto& conditional_code = conditional->GetCode(); |
| 62 | return TrackCbuf(tracked, code, static_cast<s64>(code.size())); | 62 | return TrackCbuf(tracked, conditional_code, static_cast<s64>(conditional_code.size())); |
| 63 | } | 63 | } |
| 64 | return nullptr; | 64 | return nullptr; |
| 65 | } | 65 | } |