diff options
Diffstat (limited to 'src')
46 files changed, 1563 insertions, 499 deletions
diff --git a/src/core/file_sys/submission_package.cpp b/src/core/file_sys/submission_package.cpp index 8b3b14e25..730221fd6 100644 --- a/src/core/file_sys/submission_package.cpp +++ b/src/core/file_sys/submission_package.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "core/file_sys/content_archive.h" | 14 | #include "core/file_sys/content_archive.h" |
| 15 | #include "core/file_sys/nca_metadata.h" | 15 | #include "core/file_sys/nca_metadata.h" |
| 16 | #include "core/file_sys/partition_filesystem.h" | 16 | #include "core/file_sys/partition_filesystem.h" |
| 17 | #include "core/file_sys/program_metadata.h" | ||
| 17 | #include "core/file_sys/submission_package.h" | 18 | #include "core/file_sys/submission_package.h" |
| 18 | #include "core/loader/loader.h" | 19 | #include "core/loader/loader.h" |
| 19 | 20 | ||
| @@ -78,6 +79,10 @@ Loader::ResultStatus NSP::GetStatus() const { | |||
| 78 | } | 79 | } |
| 79 | 80 | ||
| 80 | Loader::ResultStatus NSP::GetProgramStatus(u64 title_id) const { | 81 | Loader::ResultStatus NSP::GetProgramStatus(u64 title_id) const { |
| 82 | if (IsExtractedType() && GetExeFS() != nullptr && FileSys::IsDirectoryExeFS(GetExeFS())) { | ||
| 83 | return Loader::ResultStatus::Success; | ||
| 84 | } | ||
| 85 | |||
| 81 | const auto iter = program_status.find(title_id); | 86 | const auto iter = program_status.find(title_id); |
| 82 | if (iter == program_status.end()) | 87 | if (iter == program_status.end()) |
| 83 | return Loader::ResultStatus::ErrorNSPMissingProgramNCA; | 88 | return Loader::ResultStatus::ErrorNSPMissingProgramNCA; |
| @@ -85,12 +90,29 @@ Loader::ResultStatus NSP::GetProgramStatus(u64 title_id) const { | |||
| 85 | } | 90 | } |
| 86 | 91 | ||
| 87 | u64 NSP::GetFirstTitleID() const { | 92 | u64 NSP::GetFirstTitleID() const { |
| 93 | if (IsExtractedType()) { | ||
| 94 | return GetProgramTitleID(); | ||
| 95 | } | ||
| 96 | |||
| 88 | if (program_status.empty()) | 97 | if (program_status.empty()) |
| 89 | return 0; | 98 | return 0; |
| 90 | return program_status.begin()->first; | 99 | return program_status.begin()->first; |
| 91 | } | 100 | } |
| 92 | 101 | ||
| 93 | u64 NSP::GetProgramTitleID() const { | 102 | u64 NSP::GetProgramTitleID() const { |
| 103 | if (IsExtractedType()) { | ||
| 104 | if (GetExeFS() == nullptr || !IsDirectoryExeFS(GetExeFS())) { | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | ProgramMetadata meta; | ||
| 109 | if (meta.Load(GetExeFS()->GetFile("main.npdm")) == Loader::ResultStatus::Success) { | ||
| 110 | return meta.GetTitleID(); | ||
| 111 | } else { | ||
| 112 | return 0; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 94 | const auto out = GetFirstTitleID(); | 116 | const auto out = GetFirstTitleID(); |
| 95 | if ((out & 0x800) == 0) | 117 | if ((out & 0x800) == 0) |
| 96 | return out; | 118 | return out; |
| @@ -102,6 +124,10 @@ u64 NSP::GetProgramTitleID() const { | |||
| 102 | } | 124 | } |
| 103 | 125 | ||
| 104 | std::vector<u64> NSP::GetTitleIDs() const { | 126 | std::vector<u64> NSP::GetTitleIDs() const { |
| 127 | if (IsExtractedType()) { | ||
| 128 | return {GetProgramTitleID()}; | ||
| 129 | } | ||
| 130 | |||
| 105 | std::vector<u64> out; | 131 | std::vector<u64> out; |
| 106 | out.reserve(ncas.size()); | 132 | out.reserve(ncas.size()); |
| 107 | for (const auto& kv : ncas) | 133 | for (const auto& kv : ncas) |
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp index c01ee3eda..a7c55e116 100644 --- a/src/core/hle/service/acc/acc.cpp +++ b/src/core/hle/service/acc/acc.cpp | |||
| @@ -31,6 +31,9 @@ | |||
| 31 | 31 | ||
| 32 | namespace Service::Account { | 32 | namespace Service::Account { |
| 33 | 33 | ||
| 34 | constexpr ResultCode ERR_INVALID_BUFFER_SIZE{ErrorModule::Account, 30}; | ||
| 35 | constexpr ResultCode ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100}; | ||
| 36 | |||
| 34 | static std::string GetImagePath(Common::UUID uuid) { | 37 | static std::string GetImagePath(Common::UUID uuid) { |
| 35 | return FileUtil::GetUserPath(FileUtil::UserPath::NANDDir) + | 38 | return FileUtil::GetUserPath(FileUtil::UserPath::NANDDir) + |
| 36 | "/system/save/8000000000000010/su/avators/" + uuid.FormatSwitch() + ".jpg"; | 39 | "/system/save/8000000000000010/su/avators/" + uuid.FormatSwitch() + ".jpg"; |
| @@ -41,20 +44,31 @@ static constexpr u32 SanitizeJPEGSize(std::size_t size) { | |||
| 41 | return static_cast<u32>(std::min(size, max_jpeg_image_size)); | 44 | return static_cast<u32>(std::min(size, max_jpeg_image_size)); |
| 42 | } | 45 | } |
| 43 | 46 | ||
| 44 | class IProfile final : public ServiceFramework<IProfile> { | 47 | class IProfileCommon : public ServiceFramework<IProfileCommon> { |
| 45 | public: | 48 | public: |
| 46 | explicit IProfile(Common::UUID user_id, ProfileManager& profile_manager) | 49 | explicit IProfileCommon(const char* name, bool editor_commands, Common::UUID user_id, |
| 47 | : ServiceFramework("IProfile"), profile_manager(profile_manager), user_id(user_id) { | 50 | ProfileManager& profile_manager) |
| 51 | : ServiceFramework(name), profile_manager(profile_manager), user_id(user_id) { | ||
| 48 | static const FunctionInfo functions[] = { | 52 | static const FunctionInfo functions[] = { |
| 49 | {0, &IProfile::Get, "Get"}, | 53 | {0, &IProfileCommon::Get, "Get"}, |
| 50 | {1, &IProfile::GetBase, "GetBase"}, | 54 | {1, &IProfileCommon::GetBase, "GetBase"}, |
| 51 | {10, &IProfile::GetImageSize, "GetImageSize"}, | 55 | {10, &IProfileCommon::GetImageSize, "GetImageSize"}, |
| 52 | {11, &IProfile::LoadImage, "LoadImage"}, | 56 | {11, &IProfileCommon::LoadImage, "LoadImage"}, |
| 53 | }; | 57 | }; |
| 58 | |||
| 54 | RegisterHandlers(functions); | 59 | RegisterHandlers(functions); |
| 60 | |||
| 61 | if (editor_commands) { | ||
| 62 | static const FunctionInfo editor_functions[] = { | ||
| 63 | {100, &IProfileCommon::Store, "Store"}, | ||
| 64 | {101, &IProfileCommon::StoreWithImage, "StoreWithImage"}, | ||
| 65 | }; | ||
| 66 | |||
| 67 | RegisterHandlers(editor_functions); | ||
| 68 | } | ||
| 55 | } | 69 | } |
| 56 | 70 | ||
| 57 | private: | 71 | protected: |
| 58 | void Get(Kernel::HLERequestContext& ctx) { | 72 | void Get(Kernel::HLERequestContext& ctx) { |
| 59 | LOG_INFO(Service_ACC, "called user_id={}", user_id.Format()); | 73 | LOG_INFO(Service_ACC, "called user_id={}", user_id.Format()); |
| 60 | ProfileBase profile_base{}; | 74 | ProfileBase profile_base{}; |
| @@ -127,10 +141,91 @@ private: | |||
| 127 | } | 141 | } |
| 128 | } | 142 | } |
| 129 | 143 | ||
| 130 | const ProfileManager& profile_manager; | 144 | void Store(Kernel::HLERequestContext& ctx) { |
| 145 | IPC::RequestParser rp{ctx}; | ||
| 146 | const auto base = rp.PopRaw<ProfileBase>(); | ||
| 147 | |||
| 148 | const auto user_data = ctx.ReadBuffer(); | ||
| 149 | |||
| 150 | LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid={}", | ||
| 151 | Common::StringFromFixedZeroTerminatedBuffer( | ||
| 152 | reinterpret_cast<const char*>(base.username.data()), base.username.size()), | ||
| 153 | base.timestamp, base.user_uuid.Format()); | ||
| 154 | |||
| 155 | if (user_data.size() < sizeof(ProfileData)) { | ||
| 156 | LOG_ERROR(Service_ACC, "ProfileData buffer too small!"); | ||
| 157 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 158 | rb.Push(ERR_INVALID_BUFFER_SIZE); | ||
| 159 | return; | ||
| 160 | } | ||
| 161 | |||
| 162 | ProfileData data; | ||
| 163 | std::memcpy(&data, user_data.data(), sizeof(ProfileData)); | ||
| 164 | |||
| 165 | if (!profile_manager.SetProfileBaseAndData(user_id, base, data)) { | ||
| 166 | LOG_ERROR(Service_ACC, "Failed to update profile data and base!"); | ||
| 167 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 168 | rb.Push(ERR_FAILED_SAVE_DATA); | ||
| 169 | return; | ||
| 170 | } | ||
| 171 | |||
| 172 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 173 | rb.Push(RESULT_SUCCESS); | ||
| 174 | } | ||
| 175 | |||
| 176 | void StoreWithImage(Kernel::HLERequestContext& ctx) { | ||
| 177 | IPC::RequestParser rp{ctx}; | ||
| 178 | const auto base = rp.PopRaw<ProfileBase>(); | ||
| 179 | |||
| 180 | const auto user_data = ctx.ReadBuffer(); | ||
| 181 | const auto image_data = ctx.ReadBuffer(1); | ||
| 182 | |||
| 183 | LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid={}", | ||
| 184 | Common::StringFromFixedZeroTerminatedBuffer( | ||
| 185 | reinterpret_cast<const char*>(base.username.data()), base.username.size()), | ||
| 186 | base.timestamp, base.user_uuid.Format()); | ||
| 187 | |||
| 188 | if (user_data.size() < sizeof(ProfileData)) { | ||
| 189 | LOG_ERROR(Service_ACC, "ProfileData buffer too small!"); | ||
| 190 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 191 | rb.Push(ERR_INVALID_BUFFER_SIZE); | ||
| 192 | return; | ||
| 193 | } | ||
| 194 | |||
| 195 | ProfileData data; | ||
| 196 | std::memcpy(&data, user_data.data(), sizeof(ProfileData)); | ||
| 197 | |||
| 198 | FileUtil::IOFile image(GetImagePath(user_id), "wb"); | ||
| 199 | |||
| 200 | if (!image.IsOpen() || !image.Resize(image_data.size()) || | ||
| 201 | image.WriteBytes(image_data.data(), image_data.size()) != image_data.size() || | ||
| 202 | !profile_manager.SetProfileBaseAndData(user_id, base, data)) { | ||
| 203 | LOG_ERROR(Service_ACC, "Failed to update profile data, base, and image!"); | ||
| 204 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 205 | rb.Push(ERR_FAILED_SAVE_DATA); | ||
| 206 | return; | ||
| 207 | } | ||
| 208 | |||
| 209 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 210 | rb.Push(RESULT_SUCCESS); | ||
| 211 | } | ||
| 212 | |||
| 213 | ProfileManager& profile_manager; | ||
| 131 | Common::UUID user_id; ///< The user id this profile refers to. | 214 | Common::UUID user_id; ///< The user id this profile refers to. |
| 132 | }; | 215 | }; |
| 133 | 216 | ||
| 217 | class IProfile final : public IProfileCommon { | ||
| 218 | public: | ||
| 219 | IProfile(Common::UUID user_id, ProfileManager& profile_manager) | ||
| 220 | : IProfileCommon("IProfile", false, user_id, profile_manager) {} | ||
| 221 | }; | ||
| 222 | |||
| 223 | class IProfileEditor final : public IProfileCommon { | ||
| 224 | public: | ||
| 225 | IProfileEditor(Common::UUID user_id, ProfileManager& profile_manager) | ||
| 226 | : IProfileCommon("IProfileEditor", true, user_id, profile_manager) {} | ||
| 227 | }; | ||
| 228 | |||
| 134 | class IManagerForApplication final : public ServiceFramework<IManagerForApplication> { | 229 | class IManagerForApplication final : public ServiceFramework<IManagerForApplication> { |
| 135 | public: | 230 | public: |
| 136 | IManagerForApplication() : ServiceFramework("IManagerForApplication") { | 231 | IManagerForApplication() : ServiceFramework("IManagerForApplication") { |
| @@ -322,6 +417,17 @@ void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx | |||
| 322 | rb.Push(is_locked); | 417 | rb.Push(is_locked); |
| 323 | } | 418 | } |
| 324 | 419 | ||
| 420 | void Module::Interface::GetProfileEditor(Kernel::HLERequestContext& ctx) { | ||
| 421 | IPC::RequestParser rp{ctx}; | ||
| 422 | Common::UUID user_id = rp.PopRaw<Common::UUID>(); | ||
| 423 | |||
| 424 | LOG_DEBUG(Service_ACC, "called, user_id={}", user_id.Format()); | ||
| 425 | |||
| 426 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | ||
| 427 | rb.Push(RESULT_SUCCESS); | ||
| 428 | rb.PushIpcInterface<IProfileEditor>(user_id, *profile_manager); | ||
| 429 | } | ||
| 430 | |||
| 325 | void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) { | 431 | void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) { |
| 326 | LOG_DEBUG(Service_ACC, "called"); | 432 | LOG_DEBUG(Service_ACC, "called"); |
| 327 | // A u8 is passed into this function which we can safely ignore. It's to determine if we have | 433 | // A u8 is passed into this function which we can safely ignore. It's to determine if we have |
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h index f651773b7..7a7dc9ec6 100644 --- a/src/core/hle/service/acc/acc.h +++ b/src/core/hle/service/acc/acc.h | |||
| @@ -32,6 +32,7 @@ public: | |||
| 32 | void IsUserRegistrationRequestPermitted(Kernel::HLERequestContext& ctx); | 32 | void IsUserRegistrationRequestPermitted(Kernel::HLERequestContext& ctx); |
| 33 | void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx); | 33 | void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx); |
| 34 | void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx); | 34 | void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx); |
| 35 | void GetProfileEditor(Kernel::HLERequestContext& ctx); | ||
| 35 | 36 | ||
| 36 | private: | 37 | private: |
| 37 | ResultCode InitializeApplicationInfoBase(u64 process_id); | 38 | ResultCode InitializeApplicationInfoBase(u64 process_id); |
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp index 1b7ec3ed0..0d1663657 100644 --- a/src/core/hle/service/acc/acc_su.cpp +++ b/src/core/hle/service/acc/acc_su.cpp | |||
| @@ -41,7 +41,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p | |||
| 41 | {202, nullptr, "CancelUserRegistration"}, | 41 | {202, nullptr, "CancelUserRegistration"}, |
| 42 | {203, nullptr, "DeleteUser"}, | 42 | {203, nullptr, "DeleteUser"}, |
| 43 | {204, nullptr, "SetUserPosition"}, | 43 | {204, nullptr, "SetUserPosition"}, |
| 44 | {205, nullptr, "GetProfileEditor"}, | 44 | {205, &ACC_SU::GetProfileEditor, "GetProfileEditor"}, |
| 45 | {206, nullptr, "CompleteUserRegistrationForcibly"}, | 45 | {206, nullptr, "CompleteUserRegistrationForcibly"}, |
| 46 | {210, nullptr, "CreateFloatingRegistrationRequest"}, | 46 | {210, nullptr, "CreateFloatingRegistrationRequest"}, |
| 47 | {230, nullptr, "AuthenticateServiceAsync"}, | 47 | {230, nullptr, "AuthenticateServiceAsync"}, |
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp index 49aa5908b..8f9986326 100644 --- a/src/core/hle/service/acc/profile_manager.cpp +++ b/src/core/hle/service/acc/profile_manager.cpp | |||
| @@ -305,6 +305,17 @@ bool ProfileManager::SetProfileBase(UUID uuid, const ProfileBase& profile_new) { | |||
| 305 | return true; | 305 | return true; |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | bool ProfileManager::SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new, | ||
| 309 | const ProfileData& data_new) { | ||
| 310 | const auto index = GetUserIndex(uuid); | ||
| 311 | if (index.has_value() && SetProfileBase(uuid, profile_new)) { | ||
| 312 | profiles[*index].data = data_new; | ||
| 313 | return true; | ||
| 314 | } | ||
| 315 | |||
| 316 | return false; | ||
| 317 | } | ||
| 318 | |||
| 308 | void ProfileManager::ParseUserSaveFile() { | 319 | void ProfileManager::ParseUserSaveFile() { |
| 309 | FileUtil::IOFile save(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir) + | 320 | FileUtil::IOFile save(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir) + |
| 310 | ACC_SAVE_AVATORS_BASE_PATH + "profiles.dat", | 321 | ACC_SAVE_AVATORS_BASE_PATH + "profiles.dat", |
diff --git a/src/core/hle/service/acc/profile_manager.h b/src/core/hle/service/acc/profile_manager.h index fd7abb541..5a6d28925 100644 --- a/src/core/hle/service/acc/profile_manager.h +++ b/src/core/hle/service/acc/profile_manager.h | |||
| @@ -91,6 +91,8 @@ public: | |||
| 91 | 91 | ||
| 92 | bool RemoveUser(Common::UUID uuid); | 92 | bool RemoveUser(Common::UUID uuid); |
| 93 | bool SetProfileBase(Common::UUID uuid, const ProfileBase& profile_new); | 93 | bool SetProfileBase(Common::UUID uuid, const ProfileBase& profile_new); |
| 94 | bool SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new, | ||
| 95 | const ProfileData& data_new); | ||
| 94 | 96 | ||
| 95 | private: | 97 | private: |
| 96 | void ParseUserSaveFile(); | 98 | void ParseUserSaveFile(); |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 241dac881..b4ee2a255 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | |||
| @@ -146,8 +146,8 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp | |||
| 146 | } | 146 | } |
| 147 | IoctlSubmitGpfifo params{}; | 147 | IoctlSubmitGpfifo params{}; |
| 148 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); | 148 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); |
| 149 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, gpfifo={:X}, num_entries={:X}, flags={:X}", | 149 | LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address, |
| 150 | params.address, params.num_entries, params.flags.raw); | 150 | params.num_entries, params.flags.raw); |
| 151 | 151 | ||
| 152 | ASSERT_MSG(input.size() == sizeof(IoctlSubmitGpfifo) + | 152 | ASSERT_MSG(input.size() == sizeof(IoctlSubmitGpfifo) + |
| 153 | params.num_entries * sizeof(Tegra::CommandListHeader), | 153 | params.num_entries * sizeof(Tegra::CommandListHeader), |
| @@ -179,8 +179,8 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 179 | } | 179 | } |
| 180 | IoctlSubmitGpfifo params{}; | 180 | IoctlSubmitGpfifo params{}; |
| 181 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); | 181 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); |
| 182 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, gpfifo={:X}, num_entries={:X}, flags={:X}", | 182 | LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address, |
| 183 | params.address, params.num_entries, params.flags.raw); | 183 | params.num_entries, params.flags.raw); |
| 184 | 184 | ||
| 185 | Tegra::CommandList entries(params.num_entries); | 185 | Tegra::CommandList entries(params.num_entries); |
| 186 | Memory::ReadBlock(params.address, entries.data(), | 186 | Memory::ReadBlock(params.address, entries.data(), |
diff --git a/src/core/loader/nsp.cpp b/src/core/loader/nsp.cpp index b1171ce65..35c82c99d 100644 --- a/src/core/loader/nsp.cpp +++ b/src/core/loader/nsp.cpp | |||
| @@ -26,20 +26,18 @@ AppLoader_NSP::AppLoader_NSP(FileSys::VirtualFile file) | |||
| 26 | 26 | ||
| 27 | if (nsp->GetStatus() != ResultStatus::Success) | 27 | if (nsp->GetStatus() != ResultStatus::Success) |
| 28 | return; | 28 | return; |
| 29 | if (nsp->IsExtractedType()) | ||
| 30 | return; | ||
| 31 | |||
| 32 | const auto control_nca = | ||
| 33 | nsp->GetNCA(nsp->GetProgramTitleID(), FileSys::ContentRecordType::Control); | ||
| 34 | if (control_nca == nullptr || control_nca->GetStatus() != ResultStatus::Success) | ||
| 35 | return; | ||
| 36 | |||
| 37 | std::tie(nacp_file, icon_file) = | ||
| 38 | FileSys::PatchManager(nsp->GetProgramTitleID()).ParseControlNCA(*control_nca); | ||
| 39 | 29 | ||
| 40 | if (nsp->IsExtractedType()) { | 30 | if (nsp->IsExtractedType()) { |
| 41 | secondary_loader = std::make_unique<AppLoader_DeconstructedRomDirectory>(nsp->GetExeFS()); | 31 | secondary_loader = std::make_unique<AppLoader_DeconstructedRomDirectory>(nsp->GetExeFS()); |
| 42 | } else { | 32 | } else { |
| 33 | const auto control_nca = | ||
| 34 | nsp->GetNCA(nsp->GetProgramTitleID(), FileSys::ContentRecordType::Control); | ||
| 35 | if (control_nca == nullptr || control_nca->GetStatus() != ResultStatus::Success) | ||
| 36 | return; | ||
| 37 | |||
| 38 | std::tie(nacp_file, icon_file) = | ||
| 39 | FileSys::PatchManager(nsp->GetProgramTitleID()).ParseControlNCA(*control_nca); | ||
| 40 | |||
| 43 | if (title_id == 0) | 41 | if (title_id == 0) |
| 44 | return; | 42 | return; |
| 45 | 43 | ||
| @@ -56,11 +54,11 @@ FileType AppLoader_NSP::IdentifyType(const FileSys::VirtualFile& file) { | |||
| 56 | if (nsp.GetStatus() == ResultStatus::Success) { | 54 | if (nsp.GetStatus() == ResultStatus::Success) { |
| 57 | // Extracted Type case | 55 | // Extracted Type case |
| 58 | if (nsp.IsExtractedType() && nsp.GetExeFS() != nullptr && | 56 | if (nsp.IsExtractedType() && nsp.GetExeFS() != nullptr && |
| 59 | FileSys::IsDirectoryExeFS(nsp.GetExeFS()) && nsp.GetRomFS() != nullptr) { | 57 | FileSys::IsDirectoryExeFS(nsp.GetExeFS())) { |
| 60 | return FileType::NSP; | 58 | return FileType::NSP; |
| 61 | } | 59 | } |
| 62 | 60 | ||
| 63 | // Non-Ectracted Type case | 61 | // Non-Extracted Type case |
| 64 | if (!nsp.IsExtractedType() && | 62 | if (!nsp.IsExtractedType() && |
| 65 | nsp.GetNCA(nsp.GetFirstTitleID(), FileSys::ContentRecordType::Program) != nullptr && | 63 | nsp.GetNCA(nsp.GetFirstTitleID(), FileSys::ContentRecordType::Program) != nullptr && |
| 66 | AppLoader_NCA::IdentifyType(nsp.GetNCAFile( | 64 | AppLoader_NCA::IdentifyType(nsp.GetNCAFile( |
| @@ -77,7 +75,7 @@ AppLoader_NSP::LoadResult AppLoader_NSP::Load(Kernel::Process& process) { | |||
| 77 | return {ResultStatus::ErrorAlreadyLoaded, {}}; | 75 | return {ResultStatus::ErrorAlreadyLoaded, {}}; |
| 78 | } | 76 | } |
| 79 | 77 | ||
| 80 | if (title_id == 0) { | 78 | if (!nsp->IsExtractedType() && title_id == 0) { |
| 81 | return {ResultStatus::ErrorNSPMissingProgramNCA, {}}; | 79 | return {ResultStatus::ErrorNSPMissingProgramNCA, {}}; |
| 82 | } | 80 | } |
| 83 | 81 | ||
| @@ -91,7 +89,8 @@ AppLoader_NSP::LoadResult AppLoader_NSP::Load(Kernel::Process& process) { | |||
| 91 | return {nsp_program_status, {}}; | 89 | return {nsp_program_status, {}}; |
| 92 | } | 90 | } |
| 93 | 91 | ||
| 94 | if (nsp->GetNCA(title_id, FileSys::ContentRecordType::Program) == nullptr) { | 92 | if (!nsp->IsExtractedType() && |
| 93 | nsp->GetNCA(title_id, FileSys::ContentRecordType::Program) == nullptr) { | ||
| 95 | if (!Core::Crypto::KeyManager::KeyFileExists(false)) { | 94 | if (!Core::Crypto::KeyManager::KeyFileExists(false)) { |
| 96 | return {ResultStatus::ErrorMissingProductionKeyFile, {}}; | 95 | return {ResultStatus::ErrorMissingProductionKeyFile, {}}; |
| 97 | } | 96 | } |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 8555691c0..9e030789d 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -43,8 +43,13 @@ static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* me | |||
| 43 | 43 | ||
| 44 | // During boot, current_page_table might not be set yet, in which case we need not flush | 44 | // During boot, current_page_table might not be set yet, in which case we need not flush |
| 45 | if (Core::System::GetInstance().IsPoweredOn()) { | 45 | if (Core::System::GetInstance().IsPoweredOn()) { |
| 46 | Core::System::GetInstance().GPU().FlushAndInvalidateRegion(base << PAGE_BITS, | 46 | auto& gpu = Core::System::GetInstance().GPU(); |
| 47 | size * PAGE_SIZE); | 47 | for (u64 i = 0; i < size; i++) { |
| 48 | const auto page = base + i; | ||
| 49 | if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) { | ||
| 50 | gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); | ||
| 51 | } | ||
| 52 | } | ||
| 48 | } | 53 | } |
| 49 | 54 | ||
| 50 | VAddr end = base + size; | 55 | VAddr end = base + size; |
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 08586d33c..63d449135 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <bitset> | ||
| 5 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 6 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 7 | #include "core/core.h" | 8 | #include "core/core.h" |
| @@ -49,6 +50,33 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) { | |||
| 49 | } | 50 | } |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 53 | Tegra::Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const { | ||
| 54 | const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value(); | ||
| 55 | ASSERT(cbuf_mask[regs.tex_cb_index]); | ||
| 56 | |||
| 57 | const auto& texinfo = launch_description.const_buffer_config[regs.tex_cb_index]; | ||
| 58 | ASSERT(texinfo.Address() != 0); | ||
| 59 | |||
| 60 | const GPUVAddr address = texinfo.Address() + offset * sizeof(Texture::TextureHandle); | ||
| 61 | ASSERT(address < texinfo.Address() + texinfo.size); | ||
| 62 | |||
| 63 | const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(address)}; | ||
| 64 | return GetTextureInfo(tex_handle, offset); | ||
| 65 | } | ||
| 66 | |||
| 67 | Texture::FullTextureInfo KeplerCompute::GetTextureInfo(const Texture::TextureHandle tex_handle, | ||
| 68 | std::size_t offset) const { | ||
| 69 | return Texture::FullTextureInfo{static_cast<u32>(offset), GetTICEntry(tex_handle.tic_id), | ||
| 70 | GetTSCEntry(tex_handle.tsc_id)}; | ||
| 71 | } | ||
| 72 | |||
| 73 | u32 KeplerCompute::AccessConstBuffer32(u64 const_buffer, u64 offset) const { | ||
| 74 | const auto& buffer = launch_description.const_buffer_config[const_buffer]; | ||
| 75 | u32 result; | ||
| 76 | std::memcpy(&result, memory_manager.GetPointer(buffer.Address() + offset), sizeof(u32)); | ||
| 77 | return result; | ||
| 78 | } | ||
| 79 | |||
| 52 | void KeplerCompute::ProcessLaunch() { | 80 | void KeplerCompute::ProcessLaunch() { |
| 53 | const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address(); | 81 | const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address(); |
| 54 | memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description, | 82 | memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description, |
| @@ -60,4 +88,29 @@ void KeplerCompute::ProcessLaunch() { | |||
| 60 | rasterizer.DispatchCompute(code_addr); | 88 | rasterizer.DispatchCompute(code_addr); |
| 61 | } | 89 | } |
| 62 | 90 | ||
| 91 | Texture::TICEntry KeplerCompute::GetTICEntry(u32 tic_index) const { | ||
| 92 | const GPUVAddr tic_address_gpu{regs.tic.Address() + tic_index * sizeof(Texture::TICEntry)}; | ||
| 93 | |||
| 94 | Texture::TICEntry tic_entry; | ||
| 95 | memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry)); | ||
| 96 | |||
| 97 | const auto r_type{tic_entry.r_type.Value()}; | ||
| 98 | const auto g_type{tic_entry.g_type.Value()}; | ||
| 99 | const auto b_type{tic_entry.b_type.Value()}; | ||
| 100 | const auto a_type{tic_entry.a_type.Value()}; | ||
| 101 | |||
| 102 | // TODO(Subv): Different data types for separate components are not supported | ||
| 103 | DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type); | ||
| 104 | |||
| 105 | return tic_entry; | ||
| 106 | } | ||
| 107 | |||
| 108 | Texture::TSCEntry KeplerCompute::GetTSCEntry(u32 tsc_index) const { | ||
| 109 | const GPUVAddr tsc_address_gpu{regs.tsc.Address() + tsc_index * sizeof(Texture::TSCEntry)}; | ||
| 110 | |||
| 111 | Texture::TSCEntry tsc_entry; | ||
| 112 | memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry)); | ||
| 113 | return tsc_entry; | ||
| 114 | } | ||
| 115 | |||
| 63 | } // namespace Tegra::Engines | 116 | } // namespace Tegra::Engines |
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h index 6a3309a2c..90cf650d2 100644 --- a/src/video_core/engines/kepler_compute.h +++ b/src/video_core/engines/kepler_compute.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "video_core/engines/engine_upload.h" | 13 | #include "video_core/engines/engine_upload.h" |
| 14 | #include "video_core/gpu.h" | 14 | #include "video_core/gpu.h" |
| 15 | #include "video_core/textures/texture.h" | ||
| 15 | 16 | ||
| 16 | namespace Core { | 17 | namespace Core { |
| 17 | class System; | 18 | class System; |
| @@ -111,7 +112,7 @@ public: | |||
| 111 | 112 | ||
| 112 | INSERT_PADDING_WORDS(0x3FE); | 113 | INSERT_PADDING_WORDS(0x3FE); |
| 113 | 114 | ||
| 114 | u32 texture_const_buffer_index; | 115 | u32 tex_cb_index; |
| 115 | 116 | ||
| 116 | INSERT_PADDING_WORDS(0x374); | 117 | INSERT_PADDING_WORDS(0x374); |
| 117 | }; | 118 | }; |
| @@ -149,7 +150,7 @@ public: | |||
| 149 | union { | 150 | union { |
| 150 | BitField<0, 8, u32> const_buffer_enable_mask; | 151 | BitField<0, 8, u32> const_buffer_enable_mask; |
| 151 | BitField<29, 2, u32> cache_layout; | 152 | BitField<29, 2, u32> cache_layout; |
| 152 | } memory_config; | 153 | }; |
| 153 | 154 | ||
| 154 | INSERT_PADDING_WORDS(0x8); | 155 | INSERT_PADDING_WORDS(0x8); |
| 155 | 156 | ||
| @@ -194,6 +195,14 @@ public: | |||
| 194 | /// Write the value to the register identified by method. | 195 | /// Write the value to the register identified by method. |
| 195 | void CallMethod(const GPU::MethodCall& method_call); | 196 | void CallMethod(const GPU::MethodCall& method_call); |
| 196 | 197 | ||
| 198 | Tegra::Texture::FullTextureInfo GetTexture(std::size_t offset) const; | ||
| 199 | |||
| 200 | /// Given a Texture Handle, returns the TSC and TIC entries. | ||
| 201 | Texture::FullTextureInfo GetTextureInfo(const Texture::TextureHandle tex_handle, | ||
| 202 | std::size_t offset) const; | ||
| 203 | |||
| 204 | u32 AccessConstBuffer32(u64 const_buffer, u64 offset) const; | ||
| 205 | |||
| 197 | private: | 206 | private: |
| 198 | Core::System& system; | 207 | Core::System& system; |
| 199 | VideoCore::RasterizerInterface& rasterizer; | 208 | VideoCore::RasterizerInterface& rasterizer; |
| @@ -201,6 +210,12 @@ private: | |||
| 201 | Upload::State upload_state; | 210 | Upload::State upload_state; |
| 202 | 211 | ||
| 203 | void ProcessLaunch(); | 212 | void ProcessLaunch(); |
| 213 | |||
| 214 | /// Retrieves information about a specific TIC entry from the TIC buffer. | ||
| 215 | Texture::TICEntry GetTICEntry(u32 tic_index) const; | ||
| 216 | |||
| 217 | /// Retrieves information about a specific TSC entry from the TSC buffer. | ||
| 218 | Texture::TSCEntry GetTSCEntry(u32 tsc_index) const; | ||
| 204 | }; | 219 | }; |
| 205 | 220 | ||
| 206 | #define ASSERT_REG_POSITION(field_name, position) \ | 221 | #define ASSERT_REG_POSITION(field_name, position) \ |
| @@ -218,12 +233,12 @@ ASSERT_REG_POSITION(launch, 0xAF); | |||
| 218 | ASSERT_REG_POSITION(tsc, 0x557); | 233 | ASSERT_REG_POSITION(tsc, 0x557); |
| 219 | ASSERT_REG_POSITION(tic, 0x55D); | 234 | ASSERT_REG_POSITION(tic, 0x55D); |
| 220 | ASSERT_REG_POSITION(code_loc, 0x582); | 235 | ASSERT_REG_POSITION(code_loc, 0x582); |
| 221 | ASSERT_REG_POSITION(texture_const_buffer_index, 0x982); | 236 | ASSERT_REG_POSITION(tex_cb_index, 0x982); |
| 222 | ASSERT_LAUNCH_PARAM_POSITION(program_start, 0x8); | 237 | ASSERT_LAUNCH_PARAM_POSITION(program_start, 0x8); |
| 223 | ASSERT_LAUNCH_PARAM_POSITION(grid_dim_x, 0xC); | 238 | ASSERT_LAUNCH_PARAM_POSITION(grid_dim_x, 0xC); |
| 224 | ASSERT_LAUNCH_PARAM_POSITION(shared_alloc, 0x11); | 239 | ASSERT_LAUNCH_PARAM_POSITION(shared_alloc, 0x11); |
| 225 | ASSERT_LAUNCH_PARAM_POSITION(block_dim_x, 0x12); | 240 | ASSERT_LAUNCH_PARAM_POSITION(block_dim_x, 0x12); |
| 226 | ASSERT_LAUNCH_PARAM_POSITION(memory_config, 0x14); | 241 | ASSERT_LAUNCH_PARAM_POSITION(const_buffer_enable_mask, 0x14); |
| 227 | ASSERT_LAUNCH_PARAM_POSITION(const_buffer_config, 0x1D); | 242 | ASSERT_LAUNCH_PARAM_POSITION(const_buffer_config, 0x1D); |
| 228 | 243 | ||
| 229 | #undef ASSERT_REG_POSITION | 244 | #undef ASSERT_REG_POSITION |
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index f5158d219..fb3d1112c 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp | |||
| @@ -89,6 +89,9 @@ void Maxwell3D::InitializeRegisterDefaults() { | |||
| 89 | 89 | ||
| 90 | // Commercial games seem to assume this value is enabled and nouveau sets this value manually. | 90 | // Commercial games seem to assume this value is enabled and nouveau sets this value manually. |
| 91 | regs.rt_separate_frag_data = 1; | 91 | regs.rt_separate_frag_data = 1; |
| 92 | |||
| 93 | // Some games (like Super Mario Odyssey) assume that SRGB is enabled. | ||
| 94 | regs.framebuffer_srgb = 1; | ||
| 92 | } | 95 | } |
| 93 | 96 | ||
| 94 | #define DIRTY_REGS_POS(field_name) (offsetof(Maxwell3D::DirtyRegs, field_name)) | 97 | #define DIRTY_REGS_POS(field_name) (offsetof(Maxwell3D::DirtyRegs, field_name)) |
| @@ -244,7 +247,7 @@ void Maxwell3D::InitDirtySettings() { | |||
| 244 | dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_clamp)] = polygon_offset_dirty_reg; | 247 | dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_clamp)] = polygon_offset_dirty_reg; |
| 245 | } | 248 | } |
| 246 | 249 | ||
| 247 | void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { | 250 | void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters) { |
| 248 | // Reset the current macro. | 251 | // Reset the current macro. |
| 249 | executing_macro = 0; | 252 | executing_macro = 0; |
| 250 | 253 | ||
| @@ -252,7 +255,7 @@ void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { | |||
| 252 | const u32 entry = ((method - MacroRegistersStart) >> 1) % macro_positions.size(); | 255 | const u32 entry = ((method - MacroRegistersStart) >> 1) % macro_positions.size(); |
| 253 | 256 | ||
| 254 | // Execute the current macro. | 257 | // Execute the current macro. |
| 255 | macro_interpreter.Execute(macro_positions[entry], std::move(parameters)); | 258 | macro_interpreter.Execute(macro_positions[entry], num_parameters, parameters); |
| 256 | } | 259 | } |
| 257 | 260 | ||
| 258 | void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { | 261 | void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { |
| @@ -289,7 +292,8 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { | |||
| 289 | 292 | ||
| 290 | // Call the macro when there are no more parameters in the command buffer | 293 | // Call the macro when there are no more parameters in the command buffer |
| 291 | if (method_call.IsLastCall()) { | 294 | if (method_call.IsLastCall()) { |
| 292 | CallMacroMethod(executing_macro, std::move(macro_params)); | 295 | CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); |
| 296 | macro_params.clear(); | ||
| 293 | } | 297 | } |
| 294 | return; | 298 | return; |
| 295 | } | 299 | } |
| @@ -328,6 +332,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { | |||
| 328 | ProcessMacroBind(method_call.argument); | 332 | ProcessMacroBind(method_call.argument); |
| 329 | break; | 333 | break; |
| 330 | } | 334 | } |
| 335 | case MAXWELL3D_REG_INDEX(firmware[4]): { | ||
| 336 | ProcessFirmwareCall4(); | ||
| 337 | break; | ||
| 338 | } | ||
| 331 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]): | 339 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]): |
| 332 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]): | 340 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]): |
| 333 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]): | 341 | case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]): |
| @@ -418,6 +426,14 @@ void Maxwell3D::ProcessMacroBind(u32 data) { | |||
| 418 | macro_positions[regs.macros.entry++] = data; | 426 | macro_positions[regs.macros.entry++] = data; |
| 419 | } | 427 | } |
| 420 | 428 | ||
| 429 | void Maxwell3D::ProcessFirmwareCall4() { | ||
| 430 | LOG_WARNING(HW_GPU, "(STUBBED) called"); | ||
| 431 | |||
| 432 | // Firmware call 4 is a blob that changes some registers depending on its parameters. | ||
| 433 | // These registers don't affect emulation and so are stubbed by setting 0xd00 to 1. | ||
| 434 | regs.reg_array[0xd00] = 1; | ||
| 435 | } | ||
| 436 | |||
| 421 | void Maxwell3D::ProcessQueryGet() { | 437 | void Maxwell3D::ProcessQueryGet() { |
| 422 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; | 438 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; |
| 423 | // Since the sequence address is given as a GPU VAddr, we have to convert it to an application | 439 | // Since the sequence address is given as a GPU VAddr, we have to convert it to an application |
| @@ -525,7 +541,7 @@ void Maxwell3D::ProcessSyncPoint() { | |||
| 525 | } | 541 | } |
| 526 | 542 | ||
| 527 | void Maxwell3D::DrawArrays() { | 543 | void Maxwell3D::DrawArrays() { |
| 528 | LOG_DEBUG(HW_GPU, "called, topology={}, count={}", static_cast<u32>(regs.draw.topology.Value()), | 544 | LOG_TRACE(HW_GPU, "called, topology={}, count={}", static_cast<u32>(regs.draw.topology.Value()), |
| 529 | regs.vertex_buffer.count); | 545 | regs.vertex_buffer.count); |
| 530 | ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); | 546 | ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); |
| 531 | 547 | ||
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 0184342a0..e5ec90717 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h | |||
| @@ -62,6 +62,7 @@ public: | |||
| 62 | static constexpr std::size_t NumVertexAttributes = 32; | 62 | static constexpr std::size_t NumVertexAttributes = 32; |
| 63 | static constexpr std::size_t NumVaryings = 31; | 63 | static constexpr std::size_t NumVaryings = 31; |
| 64 | static constexpr std::size_t NumTextureSamplers = 32; | 64 | static constexpr std::size_t NumTextureSamplers = 32; |
| 65 | static constexpr std::size_t NumImages = 8; // TODO(Rodrigo): Investigate this number | ||
| 65 | static constexpr std::size_t NumClipDistances = 8; | 66 | static constexpr std::size_t NumClipDistances = 8; |
| 66 | static constexpr std::size_t MaxShaderProgram = 6; | 67 | static constexpr std::size_t MaxShaderProgram = 6; |
| 67 | static constexpr std::size_t MaxShaderStage = 5; | 68 | static constexpr std::size_t MaxShaderStage = 5; |
| @@ -1088,7 +1089,9 @@ public: | |||
| 1088 | INSERT_PADDING_WORDS(14); | 1089 | INSERT_PADDING_WORDS(14); |
| 1089 | } shader_config[MaxShaderProgram]; | 1090 | } shader_config[MaxShaderProgram]; |
| 1090 | 1091 | ||
| 1091 | INSERT_PADDING_WORDS(0x80); | 1092 | INSERT_PADDING_WORDS(0x60); |
| 1093 | |||
| 1094 | u32 firmware[0x20]; | ||
| 1092 | 1095 | ||
| 1093 | struct { | 1096 | struct { |
| 1094 | u32 cb_size; | 1097 | u32 cb_size; |
| @@ -1307,9 +1310,10 @@ private: | |||
| 1307 | /** | 1310 | /** |
| 1308 | * Call a macro on this engine. | 1311 | * Call a macro on this engine. |
| 1309 | * @param method Method to call | 1312 | * @param method Method to call |
| 1313 | * @param num_parameters Number of arguments | ||
| 1310 | * @param parameters Arguments to the method call | 1314 | * @param parameters Arguments to the method call |
| 1311 | */ | 1315 | */ |
| 1312 | void CallMacroMethod(u32 method, std::vector<u32> parameters); | 1316 | void CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters); |
| 1313 | 1317 | ||
| 1314 | /// Handles writes to the macro uploading register. | 1318 | /// Handles writes to the macro uploading register. |
| 1315 | void ProcessMacroUpload(u32 data); | 1319 | void ProcessMacroUpload(u32 data); |
| @@ -1317,6 +1321,9 @@ private: | |||
| 1317 | /// Handles writes to the macro bind register. | 1321 | /// Handles writes to the macro bind register. |
| 1318 | void ProcessMacroBind(u32 data); | 1322 | void ProcessMacroBind(u32 data); |
| 1319 | 1323 | ||
| 1324 | /// Handles firmware blob 4 | ||
| 1325 | void ProcessFirmwareCall4(); | ||
| 1326 | |||
| 1320 | /// Handles a write to the CLEAR_BUFFERS register. | 1327 | /// Handles a write to the CLEAR_BUFFERS register. |
| 1321 | void ProcessClearBuffers(); | 1328 | void ProcessClearBuffers(); |
| 1322 | 1329 | ||
| @@ -1429,6 +1436,7 @@ ASSERT_REG_POSITION(vertex_array[0], 0x700); | |||
| 1429 | ASSERT_REG_POSITION(independent_blend, 0x780); | 1436 | ASSERT_REG_POSITION(independent_blend, 0x780); |
| 1430 | ASSERT_REG_POSITION(vertex_array_limit[0], 0x7C0); | 1437 | ASSERT_REG_POSITION(vertex_array_limit[0], 0x7C0); |
| 1431 | ASSERT_REG_POSITION(shader_config[0], 0x800); | 1438 | ASSERT_REG_POSITION(shader_config[0], 0x800); |
| 1439 | ASSERT_REG_POSITION(firmware, 0x8C0); | ||
| 1432 | ASSERT_REG_POSITION(const_buffer, 0x8E0); | 1440 | ASSERT_REG_POSITION(const_buffer, 0x8E0); |
| 1433 | ASSERT_REG_POSITION(cb_bind[0], 0x904); | 1441 | ASSERT_REG_POSITION(cb_bind[0], 0x904); |
| 1434 | ASSERT_REG_POSITION(tex_cb_index, 0x982); | 1442 | ASSERT_REG_POSITION(tex_cb_index, 0x982); |
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index c3678b9ea..a6110bd86 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h | |||
| @@ -544,6 +544,35 @@ enum class VoteOperation : u64 { | |||
| 544 | Eq = 2, // allThreadsEqualNV | 544 | Eq = 2, // allThreadsEqualNV |
| 545 | }; | 545 | }; |
| 546 | 546 | ||
| 547 | enum class ImageAtomicSize : u64 { | ||
| 548 | U32 = 0, | ||
| 549 | S32 = 1, | ||
| 550 | U64 = 2, | ||
| 551 | F32 = 3, | ||
| 552 | S64 = 5, | ||
| 553 | SD32 = 6, | ||
| 554 | SD64 = 7, | ||
| 555 | }; | ||
| 556 | |||
| 557 | enum class ImageAtomicOperation : u64 { | ||
| 558 | Add = 0, | ||
| 559 | Min = 1, | ||
| 560 | Max = 2, | ||
| 561 | Inc = 3, | ||
| 562 | Dec = 4, | ||
| 563 | And = 5, | ||
| 564 | Or = 6, | ||
| 565 | Xor = 7, | ||
| 566 | Exch = 8, | ||
| 567 | }; | ||
| 568 | |||
| 569 | enum class ShuffleOperation : u64 { | ||
| 570 | Idx = 0, // shuffleNV | ||
| 571 | Up = 1, // shuffleUpNV | ||
| 572 | Down = 2, // shuffleDownNV | ||
| 573 | Bfly = 3, // shuffleXorNV | ||
| 574 | }; | ||
| 575 | |||
| 547 | union Instruction { | 576 | union Instruction { |
| 548 | Instruction& operator=(const Instruction& instr) { | 577 | Instruction& operator=(const Instruction& instr) { |
| 549 | value = instr.value; | 578 | value = instr.value; |
| @@ -578,6 +607,15 @@ union Instruction { | |||
| 578 | } vote; | 607 | } vote; |
| 579 | 608 | ||
| 580 | union { | 609 | union { |
| 610 | BitField<30, 2, ShuffleOperation> operation; | ||
| 611 | BitField<48, 3, u64> pred48; | ||
| 612 | BitField<28, 1, u64> is_index_imm; | ||
| 613 | BitField<29, 1, u64> is_mask_imm; | ||
| 614 | BitField<20, 5, u64> index_imm; | ||
| 615 | BitField<34, 13, u64> mask_imm; | ||
| 616 | } shfl; | ||
| 617 | |||
| 618 | union { | ||
| 581 | BitField<8, 8, Register> gpr; | 619 | BitField<8, 8, Register> gpr; |
| 582 | BitField<20, 24, s64> offset; | 620 | BitField<20, 24, s64> offset; |
| 583 | } gmem; | 621 | } gmem; |
| @@ -675,6 +713,10 @@ union Instruction { | |||
| 675 | } shift; | 713 | } shift; |
| 676 | 714 | ||
| 677 | union { | 715 | union { |
| 716 | BitField<39, 1, u64> wrap; | ||
| 717 | } shr; | ||
| 718 | |||
| 719 | union { | ||
| 678 | BitField<39, 5, u64> shift_amount; | 720 | BitField<39, 5, u64> shift_amount; |
| 679 | BitField<48, 1, u64> negate_b; | 721 | BitField<48, 1, u64> negate_b; |
| 680 | BitField<49, 1, u64> negate_a; | 722 | BitField<49, 1, u64> negate_a; |
| @@ -1388,6 +1430,14 @@ union Instruction { | |||
| 1388 | } sust; | 1430 | } sust; |
| 1389 | 1431 | ||
| 1390 | union { | 1432 | union { |
| 1433 | BitField<28, 1, u64> is_ba; | ||
| 1434 | BitField<51, 3, ImageAtomicSize> size; | ||
| 1435 | BitField<33, 3, ImageType> image_type; | ||
| 1436 | BitField<29, 4, ImageAtomicOperation> operation; | ||
| 1437 | BitField<49, 2, OutOfBoundsStore> out_of_bounds_store; | ||
| 1438 | } suatom_d; | ||
| 1439 | |||
| 1440 | union { | ||
| 1391 | BitField<20, 24, u64> target; | 1441 | BitField<20, 24, u64> target; |
| 1392 | BitField<5, 1, u64> constant_buffer; | 1442 | BitField<5, 1, u64> constant_buffer; |
| 1393 | 1443 | ||
| @@ -1508,6 +1558,7 @@ public: | |||
| 1508 | BRK, | 1558 | BRK, |
| 1509 | DEPBAR, | 1559 | DEPBAR, |
| 1510 | VOTE, | 1560 | VOTE, |
| 1561 | SHFL, | ||
| 1511 | BFE_C, | 1562 | BFE_C, |
| 1512 | BFE_R, | 1563 | BFE_R, |
| 1513 | BFE_IMM, | 1564 | BFE_IMM, |
| @@ -1539,6 +1590,7 @@ public: | |||
| 1539 | TMML_B, // Texture Mip Map Level | 1590 | TMML_B, // Texture Mip Map Level |
| 1540 | TMML, // Texture Mip Map Level | 1591 | TMML, // Texture Mip Map Level |
| 1541 | SUST, // Surface Store | 1592 | SUST, // Surface Store |
| 1593 | SUATOM, // Surface Atomic Operation | ||
| 1542 | EXIT, | 1594 | EXIT, |
| 1543 | NOP, | 1595 | NOP, |
| 1544 | IPA, | 1596 | IPA, |
| @@ -1798,6 +1850,7 @@ private: | |||
| 1798 | INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"), | 1850 | INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"), |
| 1799 | INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"), | 1851 | INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"), |
| 1800 | INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"), | 1852 | INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"), |
| 1853 | INST("1110111100010---", Id::SHFL, Type::Warp, "SHFL"), | ||
| 1801 | INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"), | 1854 | INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"), |
| 1802 | INST("1110111101001---", Id::LD_S, Type::Memory, "LD_S"), | 1855 | INST("1110111101001---", Id::LD_S, Type::Memory, "LD_S"), |
| 1803 | INST("1110111101000---", Id::LD_L, Type::Memory, "LD_L"), | 1856 | INST("1110111101000---", Id::LD_L, Type::Memory, "LD_L"), |
| @@ -1822,6 +1875,7 @@ private: | |||
| 1822 | INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), | 1875 | INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), |
| 1823 | INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), | 1876 | INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), |
| 1824 | INST("11101011001-----", Id::SUST, Type::Image, "SUST"), | 1877 | INST("11101011001-----", Id::SUST, Type::Image, "SUST"), |
| 1878 | INST("1110101000------", Id::SUATOM, Type::Image, "SUATOM_D"), | ||
| 1825 | INST("0101000010110---", Id::NOP, Type::Trivial, "NOP"), | 1879 | INST("0101000010110---", Id::NOP, Type::Trivial, "NOP"), |
| 1826 | INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), | 1880 | INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), |
| 1827 | INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), | 1881 | INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), |
diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro_interpreter.cpp index 9f59a2dc1..62afc0d11 100644 --- a/src/video_core/macro_interpreter.cpp +++ b/src/video_core/macro_interpreter.cpp | |||
| @@ -14,11 +14,18 @@ namespace Tegra { | |||
| 14 | 14 | ||
| 15 | MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} | 15 | MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} |
| 16 | 16 | ||
| 17 | void MacroInterpreter::Execute(u32 offset, std::vector<u32> parameters) { | 17 | void MacroInterpreter::Execute(u32 offset, std::size_t num_parameters, const u32* parameters) { |
| 18 | MICROPROFILE_SCOPE(MacroInterp); | 18 | MICROPROFILE_SCOPE(MacroInterp); |
| 19 | Reset(); | 19 | Reset(); |
| 20 | |||
| 20 | registers[1] = parameters[0]; | 21 | registers[1] = parameters[0]; |
| 21 | this->parameters = std::move(parameters); | 22 | |
| 23 | if (num_parameters > parameters_capacity) { | ||
| 24 | parameters_capacity = num_parameters; | ||
| 25 | this->parameters = std::make_unique<u32[]>(num_parameters); | ||
| 26 | } | ||
| 27 | std::memcpy(this->parameters.get(), parameters, num_parameters * sizeof(u32)); | ||
| 28 | this->num_parameters = num_parameters; | ||
| 22 | 29 | ||
| 23 | // Execute the code until we hit an exit condition. | 30 | // Execute the code until we hit an exit condition. |
| 24 | bool keep_executing = true; | 31 | bool keep_executing = true; |
| @@ -27,7 +34,7 @@ void MacroInterpreter::Execute(u32 offset, std::vector<u32> parameters) { | |||
| 27 | } | 34 | } |
| 28 | 35 | ||
| 29 | // Assert the the macro used all the input parameters | 36 | // Assert the the macro used all the input parameters |
| 30 | ASSERT(next_parameter_index == this->parameters.size()); | 37 | ASSERT(next_parameter_index == num_parameters); |
| 31 | } | 38 | } |
| 32 | 39 | ||
| 33 | void MacroInterpreter::Reset() { | 40 | void MacroInterpreter::Reset() { |
| @@ -35,7 +42,7 @@ void MacroInterpreter::Reset() { | |||
| 35 | pc = 0; | 42 | pc = 0; |
| 36 | delayed_pc = {}; | 43 | delayed_pc = {}; |
| 37 | method_address.raw = 0; | 44 | method_address.raw = 0; |
| 38 | parameters.clear(); | 45 | num_parameters = 0; |
| 39 | // The next parameter index starts at 1, because $r1 already has the value of the first | 46 | // The next parameter index starts at 1, because $r1 already has the value of the first |
| 40 | // parameter. | 47 | // parameter. |
| 41 | next_parameter_index = 1; | 48 | next_parameter_index = 1; |
| @@ -124,9 +131,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) { | |||
| 124 | 131 | ||
| 125 | // An instruction with the Exit flag will not actually | 132 | // An instruction with the Exit flag will not actually |
| 126 | // cause an exit if it's executed inside a delay slot. | 133 | // cause an exit if it's executed inside a delay slot. |
| 127 | // TODO(Blinkhawk): Reversed to always exit. The behavior explained above requires further | 134 | if (opcode.is_exit && !is_delay_slot) { |
| 128 | // testing on the MME code. | ||
| 129 | if (opcode.is_exit) { | ||
| 130 | // Exit has a delay slot, execute the next instruction | 135 | // Exit has a delay slot, execute the next instruction |
| 131 | Step(offset, true); | 136 | Step(offset, true); |
| 132 | return false; | 137 | return false; |
| @@ -229,7 +234,8 @@ void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 res | |||
| 229 | } | 234 | } |
| 230 | 235 | ||
| 231 | u32 MacroInterpreter::FetchParameter() { | 236 | u32 MacroInterpreter::FetchParameter() { |
| 232 | return parameters.at(next_parameter_index++); | 237 | ASSERT(next_parameter_index < num_parameters); |
| 238 | return parameters[next_parameter_index++]; | ||
| 233 | } | 239 | } |
| 234 | 240 | ||
| 235 | u32 MacroInterpreter::GetRegister(u32 register_id) const { | 241 | u32 MacroInterpreter::GetRegister(u32 register_id) const { |
diff --git a/src/video_core/macro_interpreter.h b/src/video_core/macro_interpreter.h index cde360288..76b6a895b 100644 --- a/src/video_core/macro_interpreter.h +++ b/src/video_core/macro_interpreter.h | |||
| @@ -25,7 +25,7 @@ public: | |||
| 25 | * @param offset Offset to start execution at. | 25 | * @param offset Offset to start execution at. |
| 26 | * @param parameters The parameters of the macro. | 26 | * @param parameters The parameters of the macro. |
| 27 | */ | 27 | */ |
| 28 | void Execute(u32 offset, std::vector<u32> parameters); | 28 | void Execute(u32 offset, std::size_t num_parameters, const u32* parameters); |
| 29 | 29 | ||
| 30 | private: | 30 | private: |
| 31 | enum class Operation : u32 { | 31 | enum class Operation : u32 { |
| @@ -162,10 +162,12 @@ private: | |||
| 162 | MethodAddress method_address = {}; | 162 | MethodAddress method_address = {}; |
| 163 | 163 | ||
| 164 | /// Input parameters of the current macro. | 164 | /// Input parameters of the current macro. |
| 165 | std::vector<u32> parameters; | 165 | std::unique_ptr<u32[]> parameters; |
| 166 | std::size_t num_parameters = 0; | ||
| 167 | std::size_t parameters_capacity = 0; | ||
| 166 | /// Index of the next parameter that will be fetched by the 'parm' instruction. | 168 | /// Index of the next parameter that will be fetched by the 'parm' instruction. |
| 167 | u32 next_parameter_index = 0; | 169 | u32 next_parameter_index = 0; |
| 168 | 170 | ||
| 169 | bool carry_flag{}; | 171 | bool carry_flag = false; |
| 170 | }; | 172 | }; |
| 171 | } // namespace Tegra | 173 | } // namespace Tegra |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 01d89f47d..4dd08bccb 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -331,7 +331,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { | |||
| 331 | const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage); | 331 | const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage); |
| 332 | SetupDrawConstBuffers(stage_enum, shader); | 332 | SetupDrawConstBuffers(stage_enum, shader); |
| 333 | SetupDrawGlobalMemory(stage_enum, shader); | 333 | SetupDrawGlobalMemory(stage_enum, shader); |
| 334 | const auto texture_buffer_usage{SetupTextures(stage_enum, shader, base_bindings)}; | 334 | const auto texture_buffer_usage{SetupDrawTextures(stage_enum, shader, base_bindings)}; |
| 335 | 335 | ||
| 336 | const ProgramVariant variant{base_bindings, primitive_mode, texture_buffer_usage}; | 336 | const ProgramVariant variant{base_bindings, primitive_mode, texture_buffer_usage}; |
| 337 | const auto [program_handle, next_bindings] = shader->GetProgramHandle(variant); | 337 | const auto [program_handle, next_bindings] = shader->GetProgramHandle(variant); |
| @@ -489,9 +489,6 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( | |||
| 489 | // Assume that a surface will be written to if it is used as a framebuffer, even if | 489 | // Assume that a surface will be written to if it is used as a framebuffer, even if |
| 490 | // the shader doesn't actually write to it. | 490 | // the shader doesn't actually write to it. |
| 491 | texture_cache.MarkColorBufferInUse(*single_color_target); | 491 | texture_cache.MarkColorBufferInUse(*single_color_target); |
| 492 | // Workaround for and issue in nvidia drivers | ||
| 493 | // https://devtalk.nvidia.com/default/topic/776591/opengl/gl_framebuffer_srgb-functions-incorrectly/ | ||
| 494 | state.framebuffer_srgb.enabled |= color_surface->GetSurfaceParams().srgb_conversion; | ||
| 495 | } | 492 | } |
| 496 | 493 | ||
| 497 | fbkey.is_single_buffer = true; | 494 | fbkey.is_single_buffer = true; |
| @@ -512,11 +509,6 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( | |||
| 512 | // Assume that a surface will be written to if it is used as a framebuffer, even | 509 | // Assume that a surface will be written to if it is used as a framebuffer, even |
| 513 | // if the shader doesn't actually write to it. | 510 | // if the shader doesn't actually write to it. |
| 514 | texture_cache.MarkColorBufferInUse(index); | 511 | texture_cache.MarkColorBufferInUse(index); |
| 515 | // Enable sRGB only for supported formats | ||
| 516 | // Workaround for and issue in nvidia drivers | ||
| 517 | // https://devtalk.nvidia.com/default/topic/776591/opengl/gl_framebuffer_srgb-functions-incorrectly/ | ||
| 518 | state.framebuffer_srgb.enabled |= | ||
| 519 | color_surface->GetSurfaceParams().srgb_conversion; | ||
| 520 | } | 512 | } |
| 521 | 513 | ||
| 522 | fbkey.color_attachments[index] = | 514 | fbkey.color_attachments[index] = |
| @@ -801,7 +793,11 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { | |||
| 801 | } | 793 | } |
| 802 | 794 | ||
| 803 | auto kernel = shader_cache.GetComputeKernel(code_addr); | 795 | auto kernel = shader_cache.GetComputeKernel(code_addr); |
| 804 | const auto [program, next_bindings] = kernel->GetProgramHandle({}); | 796 | ProgramVariant variant; |
| 797 | variant.texture_buffer_usage = SetupComputeTextures(kernel); | ||
| 798 | SetupComputeImages(kernel); | ||
| 799 | |||
| 800 | const auto [program, next_bindings] = kernel->GetProgramHandle(variant); | ||
| 805 | state.draw.shader_program = program; | 801 | state.draw.shader_program = program; |
| 806 | state.draw.program_pipeline = 0; | 802 | state.draw.program_pipeline = 0; |
| 807 | 803 | ||
| @@ -816,13 +812,13 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { | |||
| 816 | SetupComputeConstBuffers(kernel); | 812 | SetupComputeConstBuffers(kernel); |
| 817 | SetupComputeGlobalMemory(kernel); | 813 | SetupComputeGlobalMemory(kernel); |
| 818 | 814 | ||
| 819 | // TODO(Rodrigo): Bind images and samplers | ||
| 820 | |||
| 821 | buffer_cache.Unmap(); | 815 | buffer_cache.Unmap(); |
| 822 | 816 | ||
| 823 | bind_ubo_pushbuffer.Bind(); | 817 | bind_ubo_pushbuffer.Bind(); |
| 824 | bind_ssbo_pushbuffer.Bind(); | 818 | bind_ssbo_pushbuffer.Bind(); |
| 825 | 819 | ||
| 820 | state.ApplyTextures(); | ||
| 821 | state.ApplyImages(); | ||
| 826 | state.ApplyShaderProgram(); | 822 | state.ApplyShaderProgram(); |
| 827 | state.ApplyProgramPipeline(); | 823 | state.ApplyProgramPipeline(); |
| 828 | 824 | ||
| @@ -902,6 +898,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, | |||
| 902 | } | 898 | } |
| 903 | 899 | ||
| 904 | screen_info.display_texture = surface->GetTexture(); | 900 | screen_info.display_texture = surface->GetTexture(); |
| 901 | screen_info.display_srgb = surface->GetSurfaceParams().srgb_conversion; | ||
| 905 | 902 | ||
| 906 | return true; | 903 | return true; |
| 907 | } | 904 | } |
| @@ -922,7 +919,7 @@ void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) { | |||
| 922 | const auto& launch_desc = system.GPU().KeplerCompute().launch_description; | 919 | const auto& launch_desc = system.GPU().KeplerCompute().launch_description; |
| 923 | for (const auto& entry : kernel->GetShaderEntries().const_buffers) { | 920 | for (const auto& entry : kernel->GetShaderEntries().const_buffers) { |
| 924 | const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; | 921 | const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; |
| 925 | const std::bitset<8> mask = launch_desc.memory_config.const_buffer_enable_mask.Value(); | 922 | const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value(); |
| 926 | Tegra::Engines::ConstBufferInfo buffer; | 923 | Tegra::Engines::ConstBufferInfo buffer; |
| 927 | buffer.address = config.Address(); | 924 | buffer.address = config.Address(); |
| 928 | buffer.size = config.size; | 925 | buffer.size = config.size; |
| @@ -981,53 +978,125 @@ void RasterizerOpenGL::SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entr | |||
| 981 | bind_ssbo_pushbuffer.Push(ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); | 978 | bind_ssbo_pushbuffer.Push(ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); |
| 982 | } | 979 | } |
| 983 | 980 | ||
| 984 | TextureBufferUsage RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader, | 981 | TextureBufferUsage RasterizerOpenGL::SetupDrawTextures(Maxwell::ShaderStage stage, |
| 985 | BaseBindings base_bindings) { | 982 | const Shader& shader, |
| 983 | BaseBindings base_bindings) { | ||
| 986 | MICROPROFILE_SCOPE(OpenGL_Texture); | 984 | MICROPROFILE_SCOPE(OpenGL_Texture); |
| 987 | const auto& gpu = system.GPU(); | 985 | const auto& gpu = system.GPU(); |
| 988 | const auto& maxwell3d = gpu.Maxwell3D(); | 986 | const auto& maxwell3d = gpu.Maxwell3D(); |
| 989 | const auto& entries = shader->GetShaderEntries().samplers; | 987 | const auto& entries = shader->GetShaderEntries().samplers; |
| 990 | 988 | ||
| 991 | ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.texture_units), | 989 | ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.textures), |
| 992 | "Exceeded the number of active textures."); | 990 | "Exceeded the number of active textures."); |
| 993 | 991 | ||
| 994 | TextureBufferUsage texture_buffer_usage{0}; | 992 | TextureBufferUsage texture_buffer_usage{0}; |
| 995 | 993 | ||
| 996 | for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { | 994 | for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { |
| 997 | const auto& entry = entries[bindpoint]; | 995 | const auto& entry = entries[bindpoint]; |
| 998 | Tegra::Texture::FullTextureInfo texture; | 996 | const auto texture = [&]() { |
| 999 | if (entry.IsBindless()) { | 997 | if (!entry.IsBindless()) { |
| 998 | return maxwell3d.GetStageTexture(stage, entry.GetOffset()); | ||
| 999 | } | ||
| 1000 | const auto cbuf = entry.GetBindlessCBuf(); | 1000 | const auto cbuf = entry.GetBindlessCBuf(); |
| 1001 | Tegra::Texture::TextureHandle tex_handle; | 1001 | Tegra::Texture::TextureHandle tex_handle; |
| 1002 | tex_handle.raw = maxwell3d.AccessConstBuffer32(stage, cbuf.first, cbuf.second); | 1002 | tex_handle.raw = maxwell3d.AccessConstBuffer32(stage, cbuf.first, cbuf.second); |
| 1003 | texture = maxwell3d.GetTextureInfo(tex_handle, entry.GetOffset()); | 1003 | return maxwell3d.GetTextureInfo(tex_handle, entry.GetOffset()); |
| 1004 | } else { | 1004 | }(); |
| 1005 | texture = maxwell3d.GetStageTexture(stage, entry.GetOffset()); | 1005 | |
| 1006 | if (SetupTexture(base_bindings.sampler + bindpoint, texture, entry)) { | ||
| 1007 | texture_buffer_usage.set(bindpoint); | ||
| 1006 | } | 1008 | } |
| 1007 | const u32 current_bindpoint = base_bindings.sampler + bindpoint; | 1009 | } |
| 1008 | 1010 | ||
| 1009 | auto& unit{state.texture_units[current_bindpoint]}; | 1011 | return texture_buffer_usage; |
| 1010 | unit.sampler = sampler_cache.GetSampler(texture.tsc); | 1012 | } |
| 1011 | 1013 | ||
| 1012 | if (const auto view{texture_cache.GetTextureSurface(texture, entry)}; view) { | 1014 | TextureBufferUsage RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) { |
| 1013 | if (view->GetSurfaceParams().IsBuffer()) { | 1015 | MICROPROFILE_SCOPE(OpenGL_Texture); |
| 1014 | // Record that this texture is a texture buffer. | 1016 | const auto& compute = system.GPU().KeplerCompute(); |
| 1015 | texture_buffer_usage.set(bindpoint); | 1017 | const auto& entries = kernel->GetShaderEntries().samplers; |
| 1016 | } else { | 1018 | |
| 1017 | // Apply swizzle to textures that are not buffers. | 1019 | ASSERT_MSG(entries.size() <= std::size(state.textures), |
| 1018 | view->ApplySwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source, | 1020 | "Exceeded the number of active textures."); |
| 1019 | texture.tic.w_source); | 1021 | |
| 1022 | TextureBufferUsage texture_buffer_usage{0}; | ||
| 1023 | |||
| 1024 | for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { | ||
| 1025 | const auto& entry = entries[bindpoint]; | ||
| 1026 | const auto texture = [&]() { | ||
| 1027 | if (!entry.IsBindless()) { | ||
| 1028 | return compute.GetTexture(entry.GetOffset()); | ||
| 1020 | } | 1029 | } |
| 1021 | state.texture_units[current_bindpoint].texture = view->GetTexture(); | 1030 | const auto cbuf = entry.GetBindlessCBuf(); |
| 1022 | } else { | 1031 | Tegra::Texture::TextureHandle tex_handle; |
| 1023 | // Can occur when texture addr is null or its memory is unmapped/invalid | 1032 | tex_handle.raw = compute.AccessConstBuffer32(cbuf.first, cbuf.second); |
| 1024 | unit.texture = 0; | 1033 | return compute.GetTextureInfo(tex_handle, entry.GetOffset()); |
| 1034 | }(); | ||
| 1035 | |||
| 1036 | if (SetupTexture(bindpoint, texture, entry)) { | ||
| 1037 | texture_buffer_usage.set(bindpoint); | ||
| 1025 | } | 1038 | } |
| 1026 | } | 1039 | } |
| 1027 | 1040 | ||
| 1028 | return texture_buffer_usage; | 1041 | return texture_buffer_usage; |
| 1029 | } | 1042 | } |
| 1030 | 1043 | ||
| 1044 | bool RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, | ||
| 1045 | const GLShader::SamplerEntry& entry) { | ||
| 1046 | state.samplers[binding] = sampler_cache.GetSampler(texture.tsc); | ||
| 1047 | |||
| 1048 | const auto view = texture_cache.GetTextureSurface(texture.tic, entry); | ||
| 1049 | if (!view) { | ||
| 1050 | // Can occur when texture addr is null or its memory is unmapped/invalid | ||
| 1051 | state.textures[binding] = 0; | ||
| 1052 | return false; | ||
| 1053 | } | ||
| 1054 | state.textures[binding] = view->GetTexture(); | ||
| 1055 | |||
| 1056 | if (view->GetSurfaceParams().IsBuffer()) { | ||
| 1057 | return true; | ||
| 1058 | } | ||
| 1059 | |||
| 1060 | // Apply swizzle to textures that are not buffers. | ||
| 1061 | view->ApplySwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source, | ||
| 1062 | texture.tic.w_source); | ||
| 1063 | return false; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | void RasterizerOpenGL::SetupComputeImages(const Shader& shader) { | ||
| 1067 | const auto& compute = system.GPU().KeplerCompute(); | ||
| 1068 | const auto& entries = shader->GetShaderEntries().images; | ||
| 1069 | for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { | ||
| 1070 | const auto& entry = entries[bindpoint]; | ||
| 1071 | const auto tic = [&]() { | ||
| 1072 | if (!entry.IsBindless()) { | ||
| 1073 | return compute.GetTexture(entry.GetOffset()).tic; | ||
| 1074 | } | ||
| 1075 | const auto cbuf = entry.GetBindlessCBuf(); | ||
| 1076 | Tegra::Texture::TextureHandle tex_handle; | ||
| 1077 | tex_handle.raw = compute.AccessConstBuffer32(cbuf.first, cbuf.second); | ||
| 1078 | return compute.GetTextureInfo(tex_handle, entry.GetOffset()).tic; | ||
| 1079 | }(); | ||
| 1080 | SetupImage(bindpoint, tic, entry); | ||
| 1081 | } | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic, | ||
| 1085 | const GLShader::ImageEntry& entry) { | ||
| 1086 | const auto view = texture_cache.GetImageSurface(tic, entry); | ||
| 1087 | if (!view) { | ||
| 1088 | state.images[binding] = 0; | ||
| 1089 | return; | ||
| 1090 | } | ||
| 1091 | if (!tic.IsBuffer()) { | ||
| 1092 | view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source); | ||
| 1093 | } | ||
| 1094 | if (entry.IsWritten()) { | ||
| 1095 | view->MarkAsModified(texture_cache.Tick()); | ||
| 1096 | } | ||
| 1097 | state.images[binding] = view->GetTexture(); | ||
| 1098 | } | ||
| 1099 | |||
| 1031 | void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { | 1100 | void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { |
| 1032 | const auto& regs = system.GPU().Maxwell3D().regs; | 1101 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1033 | const bool geometry_shaders_enabled = | 1102 | const bool geometry_shaders_enabled = |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 9d20a4fbf..eada752e0 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include "video_core/renderer_opengl/gl_state.h" | 32 | #include "video_core/renderer_opengl/gl_state.h" |
| 33 | #include "video_core/renderer_opengl/gl_texture_cache.h" | 33 | #include "video_core/renderer_opengl/gl_texture_cache.h" |
| 34 | #include "video_core/renderer_opengl/utils.h" | 34 | #include "video_core/renderer_opengl/utils.h" |
| 35 | #include "video_core/textures/texture.h" | ||
| 35 | 36 | ||
| 36 | namespace Core { | 37 | namespace Core { |
| 37 | class System; | 38 | class System; |
| @@ -137,8 +138,22 @@ private: | |||
| 137 | 138 | ||
| 138 | /// Configures the current textures to use for the draw command. Returns shaders texture buffer | 139 | /// Configures the current textures to use for the draw command. Returns shaders texture buffer |
| 139 | /// usage. | 140 | /// usage. |
| 140 | TextureBufferUsage SetupTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, | 141 | TextureBufferUsage SetupDrawTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, |
| 141 | const Shader& shader, BaseBindings base_bindings); | 142 | const Shader& shader, BaseBindings base_bindings); |
| 143 | |||
| 144 | /// Configures the textures used in a compute shader. Returns texture buffer usage. | ||
| 145 | TextureBufferUsage SetupComputeTextures(const Shader& kernel); | ||
| 146 | |||
| 147 | /// Configures a texture. Returns true when the texture is a texture buffer. | ||
| 148 | bool SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, | ||
| 149 | const GLShader::SamplerEntry& entry); | ||
| 150 | |||
| 151 | /// Configures images in a compute shader. | ||
| 152 | void SetupComputeImages(const Shader& shader); | ||
| 153 | |||
| 154 | /// Configures an image. | ||
| 155 | void SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic, | ||
| 156 | const GLShader::ImageEntry& entry); | ||
| 142 | 157 | ||
| 143 | /// Syncs the viewport and depth range to match the guest state | 158 | /// Syncs the viewport and depth range to match the guest state |
| 144 | void SyncViewport(OpenGLState& current_state); | 159 | void SyncViewport(OpenGLState& current_state); |
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 909ccb82c..0dbc4c02f 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp | |||
| @@ -214,7 +214,8 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn | |||
| 214 | std::string source = "#version 430 core\n" | 214 | std::string source = "#version 430 core\n" |
| 215 | "#extension GL_ARB_separate_shader_objects : enable\n" | 215 | "#extension GL_ARB_separate_shader_objects : enable\n" |
| 216 | "#extension GL_NV_gpu_shader5 : enable\n" | 216 | "#extension GL_NV_gpu_shader5 : enable\n" |
| 217 | "#extension GL_NV_shader_thread_group : enable\n"; | 217 | "#extension GL_NV_shader_thread_group : enable\n" |
| 218 | "#extension GL_NV_shader_thread_shuffle : enable\n"; | ||
| 218 | if (entries.shader_viewport_layer_array) { | 219 | if (entries.shader_viewport_layer_array) { |
| 219 | source += "#extension GL_ARB_shader_viewport_layer_array : enable\n"; | 220 | source += "#extension GL_ARB_shader_viewport_layer_array : enable\n"; |
| 220 | } | 221 | } |
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index a5cc1a86f..76439e7ab 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp | |||
| @@ -325,6 +325,7 @@ public: | |||
| 325 | DeclareRegisters(); | 325 | DeclareRegisters(); |
| 326 | DeclarePredicates(); | 326 | DeclarePredicates(); |
| 327 | DeclareLocalMemory(); | 327 | DeclareLocalMemory(); |
| 328 | DeclareSharedMemory(); | ||
| 328 | DeclareInternalFlags(); | 329 | DeclareInternalFlags(); |
| 329 | DeclareInputAttributes(); | 330 | DeclareInputAttributes(); |
| 330 | DeclareOutputAttributes(); | 331 | DeclareOutputAttributes(); |
| @@ -389,11 +390,10 @@ public: | |||
| 389 | for (const auto& sampler : ir.GetSamplers()) { | 390 | for (const auto& sampler : ir.GetSamplers()) { |
| 390 | entries.samplers.emplace_back(sampler); | 391 | entries.samplers.emplace_back(sampler); |
| 391 | } | 392 | } |
| 392 | for (const auto& image : ir.GetImages()) { | 393 | for (const auto& [offset, image] : ir.GetImages()) { |
| 393 | entries.images.emplace_back(image); | 394 | entries.images.emplace_back(image); |
| 394 | } | 395 | } |
| 395 | for (const auto& gmem_pair : ir.GetGlobalMemory()) { | 396 | for (const auto& [base, usage] : ir.GetGlobalMemory()) { |
| 396 | const auto& [base, usage] = gmem_pair; | ||
| 397 | entries.global_memory_entries.emplace_back(base.cbuf_index, base.cbuf_offset, | 397 | entries.global_memory_entries.emplace_back(base.cbuf_index, base.cbuf_offset, |
| 398 | usage.is_read, usage.is_written); | 398 | usage.is_read, usage.is_written); |
| 399 | } | 399 | } |
| @@ -500,6 +500,13 @@ private: | |||
| 500 | code.AddNewLine(); | 500 | code.AddNewLine(); |
| 501 | } | 501 | } |
| 502 | 502 | ||
| 503 | void DeclareSharedMemory() { | ||
| 504 | if (stage != ProgramType::Compute) { | ||
| 505 | return; | ||
| 506 | } | ||
| 507 | code.AddLine("shared uint {}[];", GetSharedMemory()); | ||
| 508 | } | ||
| 509 | |||
| 503 | void DeclareInternalFlags() { | 510 | void DeclareInternalFlags() { |
| 504 | for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) { | 511 | for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) { |
| 505 | const auto flag_code = static_cast<InternalFlag>(flag); | 512 | const auto flag_code = static_cast<InternalFlag>(flag); |
| @@ -706,8 +713,8 @@ private: | |||
| 706 | 713 | ||
| 707 | void DeclareImages() { | 714 | void DeclareImages() { |
| 708 | const auto& images{ir.GetImages()}; | 715 | const auto& images{ir.GetImages()}; |
| 709 | for (const auto& image : images) { | 716 | for (const auto& [offset, image] : images) { |
| 710 | const std::string image_type = [&]() { | 717 | const char* image_type = [&] { |
| 711 | switch (image.GetType()) { | 718 | switch (image.GetType()) { |
| 712 | case Tegra::Shader::ImageType::Texture1D: | 719 | case Tegra::Shader::ImageType::Texture1D: |
| 713 | return "image1D"; | 720 | return "image1D"; |
| @@ -726,9 +733,33 @@ private: | |||
| 726 | return "image1D"; | 733 | return "image1D"; |
| 727 | } | 734 | } |
| 728 | }(); | 735 | }(); |
| 729 | code.AddLine("layout (binding = IMAGE_BINDING_{}) coherent volatile writeonly uniform " | 736 | |
| 737 | const auto [type_prefix, format] = [&]() -> std::pair<const char*, const char*> { | ||
| 738 | if (!image.IsSizeKnown()) { | ||
| 739 | return {"", ""}; | ||
| 740 | } | ||
| 741 | switch (image.GetSize()) { | ||
| 742 | case Tegra::Shader::ImageAtomicSize::U32: | ||
| 743 | return {"u", "r32ui, "}; | ||
| 744 | case Tegra::Shader::ImageAtomicSize::S32: | ||
| 745 | return {"i", "r32i, "}; | ||
| 746 | default: | ||
| 747 | UNIMPLEMENTED_MSG("Unimplemented atomic size={}", | ||
| 748 | static_cast<u32>(image.GetSize())); | ||
| 749 | return {"", ""}; | ||
| 750 | } | ||
| 751 | }(); | ||
| 752 | |||
| 753 | std::string qualifier = "coherent volatile"; | ||
| 754 | if (image.IsRead() && !image.IsWritten()) { | ||
| 755 | qualifier += " readonly"; | ||
| 756 | } else if (image.IsWritten() && !image.IsRead()) { | ||
| 757 | qualifier += " writeonly"; | ||
| 758 | } | ||
| 759 | |||
| 760 | code.AddLine("layout (binding = IMAGE_BINDING_{}) {} uniform " | ||
| 730 | "{} {};", | 761 | "{} {};", |
| 731 | image.GetIndex(), image_type, GetImage(image)); | 762 | image.GetIndex(), qualifier, image_type, GetImage(image)); |
| 732 | } | 763 | } |
| 733 | if (!images.empty()) { | 764 | if (!images.empty()) { |
| 734 | code.AddNewLine(); | 765 | code.AddNewLine(); |
| @@ -858,6 +889,12 @@ private: | |||
| 858 | Type::Uint}; | 889 | Type::Uint}; |
| 859 | } | 890 | } |
| 860 | 891 | ||
| 892 | if (const auto smem = std::get_if<SmemNode>(&*node)) { | ||
| 893 | return { | ||
| 894 | fmt::format("{}[{} >> 2]", GetSharedMemory(), Visit(smem->GetAddress()).AsUint()), | ||
| 895 | Type::Uint}; | ||
| 896 | } | ||
| 897 | |||
| 861 | if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { | 898 | if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { |
| 862 | return {GetInternalFlag(internal_flag->GetFlag()), Type::Bool}; | 899 | return {GetInternalFlag(internal_flag->GetFlag()), Type::Bool}; |
| 863 | } | 900 | } |
| @@ -984,10 +1021,10 @@ private: | |||
| 984 | return {std::move(temporary), value.GetType()}; | 1021 | return {std::move(temporary), value.GetType()}; |
| 985 | } | 1022 | } |
| 986 | 1023 | ||
| 987 | Expression GetOutputAttribute(const AbufNode* abuf) { | 1024 | std::optional<Expression> GetOutputAttribute(const AbufNode* abuf) { |
| 988 | switch (const auto attribute = abuf->GetIndex()) { | 1025 | switch (const auto attribute = abuf->GetIndex()) { |
| 989 | case Attribute::Index::Position: | 1026 | case Attribute::Index::Position: |
| 990 | return {"gl_Position"s + GetSwizzle(abuf->GetElement()), Type::Float}; | 1027 | return {{"gl_Position"s + GetSwizzle(abuf->GetElement()), Type::Float}}; |
| 991 | case Attribute::Index::LayerViewportPointSize: | 1028 | case Attribute::Index::LayerViewportPointSize: |
| 992 | switch (abuf->GetElement()) { | 1029 | switch (abuf->GetElement()) { |
| 993 | case 0: | 1030 | case 0: |
| @@ -997,25 +1034,25 @@ private: | |||
| 997 | if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { | 1034 | if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { |
| 998 | return {}; | 1035 | return {}; |
| 999 | } | 1036 | } |
| 1000 | return {"gl_Layer", Type::Int}; | 1037 | return {{"gl_Layer", Type::Int}}; |
| 1001 | case 2: | 1038 | case 2: |
| 1002 | if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { | 1039 | if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { |
| 1003 | return {}; | 1040 | return {}; |
| 1004 | } | 1041 | } |
| 1005 | return {"gl_ViewportIndex", Type::Int}; | 1042 | return {{"gl_ViewportIndex", Type::Int}}; |
| 1006 | case 3: | 1043 | case 3: |
| 1007 | UNIMPLEMENTED_MSG("Requires some state changes for gl_PointSize to work in shader"); | 1044 | UNIMPLEMENTED_MSG("Requires some state changes for gl_PointSize to work in shader"); |
| 1008 | return {"gl_PointSize", Type::Float}; | 1045 | return {{"gl_PointSize", Type::Float}}; |
| 1009 | } | 1046 | } |
| 1010 | return {}; | 1047 | return {}; |
| 1011 | case Attribute::Index::ClipDistances0123: | 1048 | case Attribute::Index::ClipDistances0123: |
| 1012 | return {fmt::format("gl_ClipDistance[{}]", abuf->GetElement()), Type::Float}; | 1049 | return {{fmt::format("gl_ClipDistance[{}]", abuf->GetElement()), Type::Float}}; |
| 1013 | case Attribute::Index::ClipDistances4567: | 1050 | case Attribute::Index::ClipDistances4567: |
| 1014 | return {fmt::format("gl_ClipDistance[{}]", abuf->GetElement() + 4), Type::Float}; | 1051 | return {{fmt::format("gl_ClipDistance[{}]", abuf->GetElement() + 4), Type::Float}}; |
| 1015 | default: | 1052 | default: |
| 1016 | if (IsGenericAttribute(attribute)) { | 1053 | if (IsGenericAttribute(attribute)) { |
| 1017 | return {GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()), | 1054 | return { |
| 1018 | Type::Float}; | 1055 | {GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()), Type::Float}}; |
| 1019 | } | 1056 | } |
| 1020 | UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute)); | 1057 | UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute)); |
| 1021 | return {}; | 1058 | return {}; |
| @@ -1174,6 +1211,74 @@ private: | |||
| 1174 | return expr; | 1211 | return expr; |
| 1175 | } | 1212 | } |
| 1176 | 1213 | ||
| 1214 | std::string BuildIntegerCoordinates(Operation operation) { | ||
| 1215 | constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; | ||
| 1216 | const std::size_t coords_count{operation.GetOperandsCount()}; | ||
| 1217 | std::string expr = constructors.at(coords_count - 1); | ||
| 1218 | for (std::size_t i = 0; i < coords_count; ++i) { | ||
| 1219 | expr += VisitOperand(operation, i).AsInt(); | ||
| 1220 | if (i + 1 < coords_count) { | ||
| 1221 | expr += ", "; | ||
| 1222 | } | ||
| 1223 | } | ||
| 1224 | expr += ')'; | ||
| 1225 | return expr; | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | std::string BuildImageValues(Operation operation) { | ||
| 1229 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | ||
| 1230 | const auto [constructors, type] = [&]() -> std::pair<std::array<const char*, 4>, Type> { | ||
| 1231 | constexpr std::array float_constructors{"float", "vec2", "vec3", "vec4"}; | ||
| 1232 | if (!meta.image.IsSizeKnown()) { | ||
| 1233 | return {float_constructors, Type::Float}; | ||
| 1234 | } | ||
| 1235 | switch (meta.image.GetSize()) { | ||
| 1236 | case Tegra::Shader::ImageAtomicSize::U32: | ||
| 1237 | return {{"uint", "uvec2", "uvec3", "uvec4"}, Type::Uint}; | ||
| 1238 | case Tegra::Shader::ImageAtomicSize::S32: | ||
| 1239 | return {{"int", "ivec2", "ivec3", "ivec4"}, Type::Uint}; | ||
| 1240 | default: | ||
| 1241 | UNIMPLEMENTED_MSG("Unimplemented image size={}", | ||
| 1242 | static_cast<u32>(meta.image.GetSize())); | ||
| 1243 | return {float_constructors, Type::Float}; | ||
| 1244 | } | ||
| 1245 | }(); | ||
| 1246 | |||
| 1247 | const std::size_t values_count{meta.values.size()}; | ||
| 1248 | std::string expr = fmt::format("{}(", constructors.at(values_count - 1)); | ||
| 1249 | for (std::size_t i = 0; i < values_count; ++i) { | ||
| 1250 | expr += Visit(meta.values.at(i)).As(type); | ||
| 1251 | if (i + 1 < values_count) { | ||
| 1252 | expr += ", "; | ||
| 1253 | } | ||
| 1254 | } | ||
| 1255 | expr += ')'; | ||
| 1256 | return expr; | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | Expression AtomicImage(Operation operation, const char* opname) { | ||
| 1260 | constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; | ||
| 1261 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | ||
| 1262 | ASSERT(meta.values.size() == 1); | ||
| 1263 | ASSERT(meta.image.IsSizeKnown()); | ||
| 1264 | |||
| 1265 | const auto type = [&]() { | ||
| 1266 | switch (const auto size = meta.image.GetSize()) { | ||
| 1267 | case Tegra::Shader::ImageAtomicSize::U32: | ||
| 1268 | return Type::Uint; | ||
| 1269 | case Tegra::Shader::ImageAtomicSize::S32: | ||
| 1270 | return Type::Int; | ||
| 1271 | default: | ||
| 1272 | UNIMPLEMENTED_MSG("Unimplemented image size={}", static_cast<u32>(size)); | ||
| 1273 | return Type::Uint; | ||
| 1274 | } | ||
| 1275 | }(); | ||
| 1276 | |||
| 1277 | return {fmt::format("{}({}, {}, {})", opname, GetImage(meta.image), | ||
| 1278 | BuildIntegerCoordinates(operation), Visit(meta.values[0]).As(type)), | ||
| 1279 | type}; | ||
| 1280 | } | ||
| 1281 | |||
| 1177 | Expression Assign(Operation operation) { | 1282 | Expression Assign(Operation operation) { |
| 1178 | const Node& dest = operation[0]; | 1283 | const Node& dest = operation[0]; |
| 1179 | const Node& src = operation[1]; | 1284 | const Node& src = operation[1]; |
| @@ -1187,7 +1292,11 @@ private: | |||
| 1187 | target = {GetRegister(gpr->GetIndex()), Type::Float}; | 1292 | target = {GetRegister(gpr->GetIndex()), Type::Float}; |
| 1188 | } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) { | 1293 | } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) { |
| 1189 | UNIMPLEMENTED_IF(abuf->IsPhysicalBuffer()); | 1294 | UNIMPLEMENTED_IF(abuf->IsPhysicalBuffer()); |
| 1190 | target = GetOutputAttribute(abuf); | 1295 | auto output = GetOutputAttribute(abuf); |
| 1296 | if (!output) { | ||
| 1297 | return {}; | ||
| 1298 | } | ||
| 1299 | target = std::move(*output); | ||
| 1191 | } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { | 1300 | } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { |
| 1192 | if (stage == ProgramType::Compute) { | 1301 | if (stage == ProgramType::Compute) { |
| 1193 | LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders"); | 1302 | LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders"); |
| @@ -1195,6 +1304,11 @@ private: | |||
| 1195 | target = { | 1304 | target = { |
| 1196 | fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()), | 1305 | fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()), |
| 1197 | Type::Uint}; | 1306 | Type::Uint}; |
| 1307 | } else if (const auto smem = std::get_if<SmemNode>(&*dest)) { | ||
| 1308 | ASSERT(stage == ProgramType::Compute); | ||
| 1309 | target = { | ||
| 1310 | fmt::format("{}[{} >> 2]", GetSharedMemory(), Visit(smem->GetAddress()).AsUint()), | ||
| 1311 | Type::Uint}; | ||
| 1198 | } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { | 1312 | } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { |
| 1199 | const std::string real = Visit(gmem->GetRealAddress()).AsUint(); | 1313 | const std::string real = Visit(gmem->GetRealAddress()).AsUint(); |
| 1200 | const std::string base = Visit(gmem->GetBaseAddress()).AsUint(); | 1314 | const std::string base = Visit(gmem->GetBaseAddress()).AsUint(); |
| @@ -1688,36 +1802,37 @@ private: | |||
| 1688 | } | 1802 | } |
| 1689 | 1803 | ||
| 1690 | Expression ImageStore(Operation operation) { | 1804 | Expression ImageStore(Operation operation) { |
| 1691 | constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; | ||
| 1692 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | 1805 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; |
| 1806 | code.AddLine("imageStore({}, {}, {});", GetImage(meta.image), | ||
| 1807 | BuildIntegerCoordinates(operation), BuildImageValues(operation)); | ||
| 1808 | return {}; | ||
| 1809 | } | ||
| 1693 | 1810 | ||
| 1694 | std::string expr = "imageStore("; | 1811 | Expression AtomicImageAdd(Operation operation) { |
| 1695 | expr += GetImage(meta.image); | 1812 | return AtomicImage(operation, "imageAtomicAdd"); |
| 1696 | expr += ", "; | 1813 | } |
| 1697 | 1814 | ||
| 1698 | const std::size_t coords_count{operation.GetOperandsCount()}; | 1815 | Expression AtomicImageMin(Operation operation) { |
| 1699 | expr += constructors.at(coords_count - 1); | 1816 | return AtomicImage(operation, "imageAtomicMin"); |
| 1700 | for (std::size_t i = 0; i < coords_count; ++i) { | 1817 | } |
| 1701 | expr += VisitOperand(operation, i).AsInt(); | ||
| 1702 | if (i + 1 < coords_count) { | ||
| 1703 | expr += ", "; | ||
| 1704 | } | ||
| 1705 | } | ||
| 1706 | expr += "), "; | ||
| 1707 | 1818 | ||
| 1708 | const std::size_t values_count{meta.values.size()}; | 1819 | Expression AtomicImageMax(Operation operation) { |
| 1709 | UNIMPLEMENTED_IF(values_count != 4); | 1820 | return AtomicImage(operation, "imageAtomicMax"); |
| 1710 | expr += "vec4("; | 1821 | } |
| 1711 | for (std::size_t i = 0; i < values_count; ++i) { | 1822 | Expression AtomicImageAnd(Operation operation) { |
| 1712 | expr += Visit(meta.values.at(i)).AsFloat(); | 1823 | return AtomicImage(operation, "imageAtomicAnd"); |
| 1713 | if (i + 1 < values_count) { | 1824 | } |
| 1714 | expr += ", "; | ||
| 1715 | } | ||
| 1716 | } | ||
| 1717 | expr += "));"; | ||
| 1718 | 1825 | ||
| 1719 | code.AddLine(expr); | 1826 | Expression AtomicImageOr(Operation operation) { |
| 1720 | return {}; | 1827 | return AtomicImage(operation, "imageAtomicOr"); |
| 1828 | } | ||
| 1829 | |||
| 1830 | Expression AtomicImageXor(Operation operation) { | ||
| 1831 | return AtomicImage(operation, "imageAtomicXor"); | ||
| 1832 | } | ||
| 1833 | |||
| 1834 | Expression AtomicImageExchange(Operation operation) { | ||
| 1835 | return AtomicImage(operation, "imageAtomicExchange"); | ||
| 1721 | } | 1836 | } |
| 1722 | 1837 | ||
| 1723 | Expression Branch(Operation operation) { | 1838 | Expression Branch(Operation operation) { |
| @@ -1842,8 +1957,7 @@ private: | |||
| 1842 | Expression BallotThread(Operation operation) { | 1957 | Expression BallotThread(Operation operation) { |
| 1843 | const std::string value = VisitOperand(operation, 0).AsBool(); | 1958 | const std::string value = VisitOperand(operation, 0).AsBool(); |
| 1844 | if (!device.HasWarpIntrinsics()) { | 1959 | if (!device.HasWarpIntrinsics()) { |
| 1845 | LOG_ERROR(Render_OpenGL, | 1960 | LOG_ERROR(Render_OpenGL, "Nvidia vote intrinsics are required by this shader"); |
| 1846 | "Nvidia warp intrinsics are not available and its required by a shader"); | ||
| 1847 | // Stub on non-Nvidia devices by simulating all threads voting the same as the active | 1961 | // Stub on non-Nvidia devices by simulating all threads voting the same as the active |
| 1848 | // one. | 1962 | // one. |
| 1849 | return {fmt::format("({} ? 0xFFFFFFFFU : 0U)", value), Type::Uint}; | 1963 | return {fmt::format("({} ? 0xFFFFFFFFU : 0U)", value), Type::Uint}; |
| @@ -1854,8 +1968,7 @@ private: | |||
| 1854 | Expression Vote(Operation operation, const char* func) { | 1968 | Expression Vote(Operation operation, const char* func) { |
| 1855 | const std::string value = VisitOperand(operation, 0).AsBool(); | 1969 | const std::string value = VisitOperand(operation, 0).AsBool(); |
| 1856 | if (!device.HasWarpIntrinsics()) { | 1970 | if (!device.HasWarpIntrinsics()) { |
| 1857 | LOG_ERROR(Render_OpenGL, | 1971 | LOG_ERROR(Render_OpenGL, "Nvidia vote intrinsics are required by this shader"); |
| 1858 | "Nvidia vote intrinsics are not available and its required by a shader"); | ||
| 1859 | // Stub with a warp size of one. | 1972 | // Stub with a warp size of one. |
| 1860 | return {value, Type::Bool}; | 1973 | return {value, Type::Bool}; |
| 1861 | } | 1974 | } |
| @@ -1872,15 +1985,54 @@ private: | |||
| 1872 | 1985 | ||
| 1873 | Expression VoteEqual(Operation operation) { | 1986 | Expression VoteEqual(Operation operation) { |
| 1874 | if (!device.HasWarpIntrinsics()) { | 1987 | if (!device.HasWarpIntrinsics()) { |
| 1875 | LOG_ERROR(Render_OpenGL, | 1988 | LOG_ERROR(Render_OpenGL, "Nvidia vote intrinsics are required by this shader"); |
| 1876 | "Nvidia vote intrinsics are not available and its required by a shader"); | 1989 | // We must return true here since a stub for a theoretical warp size of 1. |
| 1877 | // We must return true here since a stub for a theoretical warp size of 1 will always | 1990 | // This will always return an equal result across all votes. |
| 1878 | // return an equal result for all its votes. | ||
| 1879 | return {"true", Type::Bool}; | 1991 | return {"true", Type::Bool}; |
| 1880 | } | 1992 | } |
| 1881 | return Vote(operation, "allThreadsEqualNV"); | 1993 | return Vote(operation, "allThreadsEqualNV"); |
| 1882 | } | 1994 | } |
| 1883 | 1995 | ||
| 1996 | template <const std::string_view& func> | ||
| 1997 | Expression Shuffle(Operation operation) { | ||
| 1998 | const std::string value = VisitOperand(operation, 0).AsFloat(); | ||
| 1999 | if (!device.HasWarpIntrinsics()) { | ||
| 2000 | LOG_ERROR(Render_OpenGL, "Nvidia shuffle intrinsics are required by this shader"); | ||
| 2001 | // On a "single-thread" device we are either on the same thread or out of bounds. Both | ||
| 2002 | // cases return the passed value. | ||
| 2003 | return {value, Type::Float}; | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | const std::string index = VisitOperand(operation, 1).AsUint(); | ||
| 2007 | const std::string width = VisitOperand(operation, 2).AsUint(); | ||
| 2008 | return {fmt::format("{}({}, {}, {})", func, value, index, width), Type::Float}; | ||
| 2009 | } | ||
| 2010 | |||
| 2011 | template <const std::string_view& func> | ||
| 2012 | Expression InRangeShuffle(Operation operation) { | ||
| 2013 | const std::string index = VisitOperand(operation, 0).AsUint(); | ||
| 2014 | const std::string width = VisitOperand(operation, 1).AsUint(); | ||
| 2015 | if (!device.HasWarpIntrinsics()) { | ||
| 2016 | // On a "single-thread" device we are only in bounds when the requested index is 0. | ||
| 2017 | return {fmt::format("({} == 0U)", index), Type::Bool}; | ||
| 2018 | } | ||
| 2019 | |||
| 2020 | const std::string in_range = code.GenerateTemporary(); | ||
| 2021 | code.AddLine("bool {};", in_range); | ||
| 2022 | code.AddLine("{}(0U, {}, {}, {});", func, index, width, in_range); | ||
| 2023 | return {in_range, Type::Bool}; | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | struct Func final { | ||
| 2027 | Func() = delete; | ||
| 2028 | ~Func() = delete; | ||
| 2029 | |||
| 2030 | static constexpr std::string_view ShuffleIndexed = "shuffleNV"; | ||
| 2031 | static constexpr std::string_view ShuffleUp = "shuffleUpNV"; | ||
| 2032 | static constexpr std::string_view ShuffleDown = "shuffleDownNV"; | ||
| 2033 | static constexpr std::string_view ShuffleButterfly = "shuffleXorNV"; | ||
| 2034 | }; | ||
| 2035 | |||
| 1884 | static constexpr std::array operation_decompilers = { | 2036 | static constexpr std::array operation_decompilers = { |
| 1885 | &GLSLDecompiler::Assign, | 2037 | &GLSLDecompiler::Assign, |
| 1886 | 2038 | ||
| @@ -2013,6 +2165,13 @@ private: | |||
| 2013 | &GLSLDecompiler::TexelFetch, | 2165 | &GLSLDecompiler::TexelFetch, |
| 2014 | 2166 | ||
| 2015 | &GLSLDecompiler::ImageStore, | 2167 | &GLSLDecompiler::ImageStore, |
| 2168 | &GLSLDecompiler::AtomicImageAdd, | ||
| 2169 | &GLSLDecompiler::AtomicImageMin, | ||
| 2170 | &GLSLDecompiler::AtomicImageMax, | ||
| 2171 | &GLSLDecompiler::AtomicImageAnd, | ||
| 2172 | &GLSLDecompiler::AtomicImageOr, | ||
| 2173 | &GLSLDecompiler::AtomicImageXor, | ||
| 2174 | &GLSLDecompiler::AtomicImageExchange, | ||
| 2016 | 2175 | ||
| 2017 | &GLSLDecompiler::Branch, | 2176 | &GLSLDecompiler::Branch, |
| 2018 | &GLSLDecompiler::BranchIndirect, | 2177 | &GLSLDecompiler::BranchIndirect, |
| @@ -2036,6 +2195,16 @@ private: | |||
| 2036 | &GLSLDecompiler::VoteAll, | 2195 | &GLSLDecompiler::VoteAll, |
| 2037 | &GLSLDecompiler::VoteAny, | 2196 | &GLSLDecompiler::VoteAny, |
| 2038 | &GLSLDecompiler::VoteEqual, | 2197 | &GLSLDecompiler::VoteEqual, |
| 2198 | |||
| 2199 | &GLSLDecompiler::Shuffle<Func::ShuffleIndexed>, | ||
| 2200 | &GLSLDecompiler::Shuffle<Func::ShuffleUp>, | ||
| 2201 | &GLSLDecompiler::Shuffle<Func::ShuffleDown>, | ||
| 2202 | &GLSLDecompiler::Shuffle<Func::ShuffleButterfly>, | ||
| 2203 | |||
| 2204 | &GLSLDecompiler::InRangeShuffle<Func::ShuffleIndexed>, | ||
| 2205 | &GLSLDecompiler::InRangeShuffle<Func::ShuffleUp>, | ||
| 2206 | &GLSLDecompiler::InRangeShuffle<Func::ShuffleDown>, | ||
| 2207 | &GLSLDecompiler::InRangeShuffle<Func::ShuffleButterfly>, | ||
| 2039 | }; | 2208 | }; |
| 2040 | static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); | 2209 | static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); |
| 2041 | 2210 | ||
| @@ -2076,6 +2245,10 @@ private: | |||
| 2076 | return "lmem_" + suffix; | 2245 | return "lmem_" + suffix; |
| 2077 | } | 2246 | } |
| 2078 | 2247 | ||
| 2248 | std::string GetSharedMemory() const { | ||
| 2249 | return fmt::format("smem_{}", suffix); | ||
| 2250 | } | ||
| 2251 | |||
| 2079 | std::string GetInternalFlag(InternalFlag flag) const { | 2252 | std::string GetInternalFlag(InternalFlag flag) const { |
| 2080 | constexpr std::array InternalFlagNames = {"zero_flag", "sign_flag", "carry_flag", | 2253 | constexpr std::array InternalFlagNames = {"zero_flag", "sign_flag", "carry_flag", |
| 2081 | "overflow_flag"}; | 2254 | "overflow_flag"}; |
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 969fe9ced..f141c4e3b 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp | |||
| @@ -341,13 +341,22 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn | |||
| 341 | u64 index{}; | 341 | u64 index{}; |
| 342 | u32 type{}; | 342 | u32 type{}; |
| 343 | u8 is_bindless{}; | 343 | u8 is_bindless{}; |
| 344 | u8 is_written{}; | ||
| 345 | u8 is_read{}; | ||
| 346 | u8 is_size_known{}; | ||
| 347 | u32 size{}; | ||
| 344 | if (!LoadObjectFromPrecompiled(offset) || !LoadObjectFromPrecompiled(index) || | 348 | if (!LoadObjectFromPrecompiled(offset) || !LoadObjectFromPrecompiled(index) || |
| 345 | !LoadObjectFromPrecompiled(type) || !LoadObjectFromPrecompiled(is_bindless)) { | 349 | !LoadObjectFromPrecompiled(type) || !LoadObjectFromPrecompiled(is_bindless) || |
| 350 | !LoadObjectFromPrecompiled(is_written) || !LoadObjectFromPrecompiled(is_read) || | ||
| 351 | !LoadObjectFromPrecompiled(is_size_known) || !LoadObjectFromPrecompiled(size)) { | ||
| 346 | return {}; | 352 | return {}; |
| 347 | } | 353 | } |
| 348 | entry.entries.images.emplace_back( | 354 | entry.entries.images.emplace_back( |
| 349 | static_cast<std::size_t>(offset), static_cast<std::size_t>(index), | 355 | static_cast<std::size_t>(offset), static_cast<std::size_t>(index), |
| 350 | static_cast<Tegra::Shader::ImageType>(type), is_bindless != 0); | 356 | static_cast<Tegra::Shader::ImageType>(type), is_bindless != 0, is_written != 0, |
| 357 | is_read != 0, | ||
| 358 | is_size_known ? std::make_optional(static_cast<Tegra::Shader::ImageAtomicSize>(size)) | ||
| 359 | : std::nullopt); | ||
| 351 | } | 360 | } |
| 352 | 361 | ||
| 353 | u32 global_memory_count{}; | 362 | u32 global_memory_count{}; |
| @@ -426,10 +435,14 @@ bool ShaderDiskCacheOpenGL::SaveDecompiledFile(u64 unique_identifier, const std: | |||
| 426 | return false; | 435 | return false; |
| 427 | } | 436 | } |
| 428 | for (const auto& image : entries.images) { | 437 | for (const auto& image : entries.images) { |
| 438 | const u32 size = image.IsSizeKnown() ? static_cast<u32>(image.GetSize()) : 0U; | ||
| 429 | if (!SaveObjectToPrecompiled(static_cast<u64>(image.GetOffset())) || | 439 | if (!SaveObjectToPrecompiled(static_cast<u64>(image.GetOffset())) || |
| 430 | !SaveObjectToPrecompiled(static_cast<u64>(image.GetIndex())) || | 440 | !SaveObjectToPrecompiled(static_cast<u64>(image.GetIndex())) || |
| 431 | !SaveObjectToPrecompiled(static_cast<u32>(image.GetType())) || | 441 | !SaveObjectToPrecompiled(static_cast<u32>(image.GetType())) || |
| 432 | !SaveObjectToPrecompiled(static_cast<u8>(image.IsBindless() ? 1 : 0))) { | 442 | !SaveObjectToPrecompiled(static_cast<u8>(image.IsBindless() ? 1 : 0)) || |
| 443 | !SaveObjectToPrecompiled(static_cast<u8>(image.IsWritten() ? 1 : 0)) || | ||
| 444 | !SaveObjectToPrecompiled(static_cast<u8>(image.IsRead() ? 1 : 0)) || | ||
| 445 | !SaveObjectToPrecompiled(image.IsSizeKnown()) || !SaveObjectToPrecompiled(size)) { | ||
| 433 | return false; | 446 | return false; |
| 434 | } | 447 | } |
| 435 | } | 448 | } |
diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index f4777d0b0..bf86b5a0b 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp | |||
| @@ -16,7 +16,6 @@ namespace OpenGL { | |||
| 16 | using Maxwell = Tegra::Engines::Maxwell3D::Regs; | 16 | using Maxwell = Tegra::Engines::Maxwell3D::Regs; |
| 17 | 17 | ||
| 18 | OpenGLState OpenGLState::cur_state; | 18 | OpenGLState OpenGLState::cur_state; |
| 19 | bool OpenGLState::s_rgb_used; | ||
| 20 | 19 | ||
| 21 | namespace { | 20 | namespace { |
| 22 | 21 | ||
| @@ -34,6 +33,25 @@ bool UpdateTie(T1 current_value, const T2 new_value) { | |||
| 34 | return changed; | 33 | return changed; |
| 35 | } | 34 | } |
| 36 | 35 | ||
| 36 | template <typename T> | ||
| 37 | std::optional<std::pair<GLuint, GLsizei>> UpdateArray(T& current_values, const T& new_values) { | ||
| 38 | std::optional<std::size_t> first; | ||
| 39 | std::size_t last; | ||
| 40 | for (std::size_t i = 0; i < std::size(current_values); ++i) { | ||
| 41 | if (!UpdateValue(current_values[i], new_values[i])) { | ||
| 42 | continue; | ||
| 43 | } | ||
| 44 | if (!first) { | ||
| 45 | first = i; | ||
| 46 | } | ||
| 47 | last = i; | ||
| 48 | } | ||
| 49 | if (!first) { | ||
| 50 | return std::nullopt; | ||
| 51 | } | ||
| 52 | return std::make_pair(static_cast<GLuint>(*first), static_cast<GLsizei>(last - *first + 1)); | ||
| 53 | } | ||
| 54 | |||
| 37 | void Enable(GLenum cap, bool enable) { | 55 | void Enable(GLenum cap, bool enable) { |
| 38 | if (enable) { | 56 | if (enable) { |
| 39 | glEnable(cap); | 57 | glEnable(cap); |
| @@ -134,10 +152,6 @@ OpenGLState::OpenGLState() { | |||
| 134 | logic_op.enabled = false; | 152 | logic_op.enabled = false; |
| 135 | logic_op.operation = GL_COPY; | 153 | logic_op.operation = GL_COPY; |
| 136 | 154 | ||
| 137 | for (auto& texture_unit : texture_units) { | ||
| 138 | texture_unit.Reset(); | ||
| 139 | } | ||
| 140 | |||
| 141 | draw.read_framebuffer = 0; | 155 | draw.read_framebuffer = 0; |
| 142 | draw.draw_framebuffer = 0; | 156 | draw.draw_framebuffer = 0; |
| 143 | draw.vertex_array = 0; | 157 | draw.vertex_array = 0; |
| @@ -267,8 +281,6 @@ void OpenGLState::ApplySRgb() const { | |||
| 267 | return; | 281 | return; |
| 268 | cur_state.framebuffer_srgb.enabled = framebuffer_srgb.enabled; | 282 | cur_state.framebuffer_srgb.enabled = framebuffer_srgb.enabled; |
| 269 | if (framebuffer_srgb.enabled) { | 283 | if (framebuffer_srgb.enabled) { |
| 270 | // Track if sRGB is used | ||
| 271 | s_rgb_used = true; | ||
| 272 | glEnable(GL_FRAMEBUFFER_SRGB); | 284 | glEnable(GL_FRAMEBUFFER_SRGB); |
| 273 | } else { | 285 | } else { |
| 274 | glDisable(GL_FRAMEBUFFER_SRGB); | 286 | glDisable(GL_FRAMEBUFFER_SRGB); |
| @@ -496,52 +508,20 @@ void OpenGLState::ApplyAlphaTest() const { | |||
| 496 | } | 508 | } |
| 497 | 509 | ||
| 498 | void OpenGLState::ApplyTextures() const { | 510 | void OpenGLState::ApplyTextures() const { |
| 499 | bool has_delta{}; | 511 | if (const auto update = UpdateArray(cur_state.textures, textures)) { |
| 500 | std::size_t first{}; | 512 | glBindTextures(update->first, update->second, textures.data() + update->first); |
| 501 | std::size_t last{}; | ||
| 502 | std::array<GLuint, Maxwell::NumTextureSamplers> textures; | ||
| 503 | |||
| 504 | for (std::size_t i = 0; i < std::size(texture_units); ++i) { | ||
| 505 | const auto& texture_unit = texture_units[i]; | ||
| 506 | auto& cur_state_texture_unit = cur_state.texture_units[i]; | ||
| 507 | textures[i] = texture_unit.texture; | ||
| 508 | if (cur_state_texture_unit.texture == textures[i]) { | ||
| 509 | continue; | ||
| 510 | } | ||
| 511 | cur_state_texture_unit.texture = textures[i]; | ||
| 512 | if (!has_delta) { | ||
| 513 | first = i; | ||
| 514 | has_delta = true; | ||
| 515 | } | ||
| 516 | last = i; | ||
| 517 | } | ||
| 518 | if (has_delta) { | ||
| 519 | glBindTextures(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1), | ||
| 520 | textures.data() + first); | ||
| 521 | } | 513 | } |
| 522 | } | 514 | } |
| 523 | 515 | ||
| 524 | void OpenGLState::ApplySamplers() const { | 516 | void OpenGLState::ApplySamplers() const { |
| 525 | bool has_delta{}; | 517 | if (const auto update = UpdateArray(cur_state.samplers, samplers)) { |
| 526 | std::size_t first{}; | 518 | glBindSamplers(update->first, update->second, samplers.data() + update->first); |
| 527 | std::size_t last{}; | ||
| 528 | std::array<GLuint, Maxwell::NumTextureSamplers> samplers; | ||
| 529 | |||
| 530 | for (std::size_t i = 0; i < std::size(samplers); ++i) { | ||
| 531 | samplers[i] = texture_units[i].sampler; | ||
| 532 | if (cur_state.texture_units[i].sampler == texture_units[i].sampler) { | ||
| 533 | continue; | ||
| 534 | } | ||
| 535 | cur_state.texture_units[i].sampler = texture_units[i].sampler; | ||
| 536 | if (!has_delta) { | ||
| 537 | first = i; | ||
| 538 | has_delta = true; | ||
| 539 | } | ||
| 540 | last = i; | ||
| 541 | } | 519 | } |
| 542 | if (has_delta) { | 520 | } |
| 543 | glBindSamplers(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1), | 521 | |
| 544 | samplers.data() + first); | 522 | void OpenGLState::ApplyImages() const { |
| 523 | if (const auto update = UpdateArray(cur_state.images, images)) { | ||
| 524 | glBindImageTextures(update->first, update->second, images.data() + update->first); | ||
| 545 | } | 525 | } |
| 546 | } | 526 | } |
| 547 | 527 | ||
| @@ -576,6 +556,7 @@ void OpenGLState::Apply() { | |||
| 576 | ApplyLogicOp(); | 556 | ApplyLogicOp(); |
| 577 | ApplyTextures(); | 557 | ApplyTextures(); |
| 578 | ApplySamplers(); | 558 | ApplySamplers(); |
| 559 | ApplyImages(); | ||
| 579 | if (dirty.polygon_offset) { | 560 | if (dirty.polygon_offset) { |
| 580 | ApplyPolygonOffset(); | 561 | ApplyPolygonOffset(); |
| 581 | dirty.polygon_offset = false; | 562 | dirty.polygon_offset = false; |
| @@ -606,18 +587,18 @@ void OpenGLState::EmulateViewportWithScissor() { | |||
| 606 | } | 587 | } |
| 607 | 588 | ||
| 608 | OpenGLState& OpenGLState::UnbindTexture(GLuint handle) { | 589 | OpenGLState& OpenGLState::UnbindTexture(GLuint handle) { |
| 609 | for (auto& unit : texture_units) { | 590 | for (auto& texture : textures) { |
| 610 | if (unit.texture == handle) { | 591 | if (texture == handle) { |
| 611 | unit.Unbind(); | 592 | texture = 0; |
| 612 | } | 593 | } |
| 613 | } | 594 | } |
| 614 | return *this; | 595 | return *this; |
| 615 | } | 596 | } |
| 616 | 597 | ||
| 617 | OpenGLState& OpenGLState::ResetSampler(GLuint handle) { | 598 | OpenGLState& OpenGLState::ResetSampler(GLuint handle) { |
| 618 | for (auto& unit : texture_units) { | 599 | for (auto& sampler : samplers) { |
| 619 | if (unit.sampler == handle) { | 600 | if (sampler == handle) { |
| 620 | unit.sampler = 0; | 601 | sampler = 0; |
| 621 | } | 602 | } |
| 622 | } | 603 | } |
| 623 | return *this; | 604 | return *this; |
diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index fdf9a8a12..c358d3b38 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h | |||
| @@ -118,21 +118,9 @@ public: | |||
| 118 | GLenum operation; | 118 | GLenum operation; |
| 119 | } logic_op; | 119 | } logic_op; |
| 120 | 120 | ||
| 121 | // 3 texture units - one for each that is used in PICA fragment shader emulation | 121 | std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> textures{}; |
| 122 | struct TextureUnit { | 122 | std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> samplers{}; |
| 123 | GLuint texture; // GL_TEXTURE_BINDING_2D | 123 | std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumImages> images{}; |
| 124 | GLuint sampler; // GL_SAMPLER_BINDING | ||
| 125 | |||
| 126 | void Unbind() { | ||
| 127 | texture = 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | void Reset() { | ||
| 131 | Unbind(); | ||
| 132 | sampler = 0; | ||
| 133 | } | ||
| 134 | }; | ||
| 135 | std::array<TextureUnit, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> texture_units; | ||
| 136 | 124 | ||
| 137 | struct { | 125 | struct { |
| 138 | GLuint read_framebuffer; // GL_READ_FRAMEBUFFER_BINDING | 126 | GLuint read_framebuffer; // GL_READ_FRAMEBUFFER_BINDING |
| @@ -187,14 +175,6 @@ public: | |||
| 187 | return cur_state; | 175 | return cur_state; |
| 188 | } | 176 | } |
| 189 | 177 | ||
| 190 | static bool GetsRGBUsed() { | ||
| 191 | return s_rgb_used; | ||
| 192 | } | ||
| 193 | |||
| 194 | static void ClearsRGBUsed() { | ||
| 195 | s_rgb_used = false; | ||
| 196 | } | ||
| 197 | |||
| 198 | void SetDefaultViewports(); | 178 | void SetDefaultViewports(); |
| 199 | /// Apply this state as the current OpenGL state | 179 | /// Apply this state as the current OpenGL state |
| 200 | void Apply(); | 180 | void Apply(); |
| @@ -220,6 +200,7 @@ public: | |||
| 220 | void ApplyLogicOp() const; | 200 | void ApplyLogicOp() const; |
| 221 | void ApplyTextures() const; | 201 | void ApplyTextures() const; |
| 222 | void ApplySamplers() const; | 202 | void ApplySamplers() const; |
| 203 | void ApplyImages() const; | ||
| 223 | void ApplyDepthClamp() const; | 204 | void ApplyDepthClamp() const; |
| 224 | void ApplyPolygonOffset() const; | 205 | void ApplyPolygonOffset() const; |
| 225 | void ApplyAlphaTest() const; | 206 | void ApplyAlphaTest() const; |
| @@ -264,8 +245,6 @@ public: | |||
| 264 | private: | 245 | private: |
| 265 | static OpenGLState cur_state; | 246 | static OpenGLState cur_state; |
| 266 | 247 | ||
| 267 | // Workaround for sRGB problems caused by QT not supporting srgb output | ||
| 268 | static bool s_rgb_used; | ||
| 269 | struct { | 248 | struct { |
| 270 | bool blend_state; | 249 | bool blend_state; |
| 271 | bool stencil_state; | 250 | bool stencil_state; |
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h index 21324488a..8e13ab38b 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.h +++ b/src/video_core/renderer_opengl/gl_texture_cache.h | |||
| @@ -78,6 +78,17 @@ public: | |||
| 78 | /// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER | 78 | /// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER |
| 79 | void Attach(GLenum attachment, GLenum target) const; | 79 | void Attach(GLenum attachment, GLenum target) const; |
| 80 | 80 | ||
| 81 | void ApplySwizzle(Tegra::Texture::SwizzleSource x_source, | ||
| 82 | Tegra::Texture::SwizzleSource y_source, | ||
| 83 | Tegra::Texture::SwizzleSource z_source, | ||
| 84 | Tegra::Texture::SwizzleSource w_source); | ||
| 85 | |||
| 86 | void DecorateViewName(GPUVAddr gpu_addr, std::string prefix); | ||
| 87 | |||
| 88 | void MarkAsModified(u64 tick) { | ||
| 89 | surface.MarkAsModified(true, tick); | ||
| 90 | } | ||
| 91 | |||
| 81 | GLuint GetTexture() const { | 92 | GLuint GetTexture() const { |
| 82 | if (is_proxy) { | 93 | if (is_proxy) { |
| 83 | return surface.GetTexture(); | 94 | return surface.GetTexture(); |
| @@ -89,13 +100,6 @@ public: | |||
| 89 | return surface.GetSurfaceParams(); | 100 | return surface.GetSurfaceParams(); |
| 90 | } | 101 | } |
| 91 | 102 | ||
| 92 | void ApplySwizzle(Tegra::Texture::SwizzleSource x_source, | ||
| 93 | Tegra::Texture::SwizzleSource y_source, | ||
| 94 | Tegra::Texture::SwizzleSource z_source, | ||
| 95 | Tegra::Texture::SwizzleSource w_source); | ||
| 96 | |||
| 97 | void DecorateViewName(GPUVAddr gpu_addr, std::string prefix); | ||
| 98 | |||
| 99 | private: | 103 | private: |
| 100 | u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, | 104 | u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, |
| 101 | Tegra::Texture::SwizzleSource y_source, | 105 | Tegra::Texture::SwizzleSource y_source, |
| @@ -111,8 +115,8 @@ private: | |||
| 111 | GLenum target{}; | 115 | GLenum target{}; |
| 112 | 116 | ||
| 113 | OGLTextureView texture_view; | 117 | OGLTextureView texture_view; |
| 114 | u32 swizzle; | 118 | u32 swizzle{}; |
| 115 | bool is_proxy; | 119 | bool is_proxy{}; |
| 116 | }; | 120 | }; |
| 117 | 121 | ||
| 118 | class TextureCacheOpenGL final : public TextureCacheBase { | 122 | class TextureCacheOpenGL final : public TextureCacheBase { |
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index ea77dd211..9ed738171 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h | |||
| @@ -145,7 +145,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, | |||
| 145 | case Tegra::Texture::TextureMipmapFilter::None: | 145 | case Tegra::Texture::TextureMipmapFilter::None: |
| 146 | return GL_LINEAR; | 146 | return GL_LINEAR; |
| 147 | case Tegra::Texture::TextureMipmapFilter::Nearest: | 147 | case Tegra::Texture::TextureMipmapFilter::Nearest: |
| 148 | return GL_NEAREST_MIPMAP_LINEAR; | 148 | return GL_LINEAR_MIPMAP_NEAREST; |
| 149 | case Tegra::Texture::TextureMipmapFilter::Linear: | 149 | case Tegra::Texture::TextureMipmapFilter::Linear: |
| 150 | return GL_LINEAR_MIPMAP_LINEAR; | 150 | return GL_LINEAR_MIPMAP_LINEAR; |
| 151 | } | 151 | } |
| @@ -157,7 +157,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, | |||
| 157 | case Tegra::Texture::TextureMipmapFilter::Nearest: | 157 | case Tegra::Texture::TextureMipmapFilter::Nearest: |
| 158 | return GL_NEAREST_MIPMAP_NEAREST; | 158 | return GL_NEAREST_MIPMAP_NEAREST; |
| 159 | case Tegra::Texture::TextureMipmapFilter::Linear: | 159 | case Tegra::Texture::TextureMipmapFilter::Linear: |
| 160 | return GL_LINEAR_MIPMAP_NEAREST; | 160 | return GL_NEAREST_MIPMAP_LINEAR; |
| 161 | } | 161 | } |
| 162 | } | 162 | } |
| 163 | } | 163 | } |
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index af9684839..1e6ef66ab 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp | |||
| @@ -264,7 +264,6 @@ void RendererOpenGL::CreateRasterizer() { | |||
| 264 | if (rasterizer) { | 264 | if (rasterizer) { |
| 265 | return; | 265 | return; |
| 266 | } | 266 | } |
| 267 | OpenGLState::ClearsRGBUsed(); | ||
| 268 | rasterizer = std::make_unique<RasterizerOpenGL>(system, emu_window, screen_info); | 267 | rasterizer = std::make_unique<RasterizerOpenGL>(system, emu_window, screen_info); |
| 269 | } | 268 | } |
| 270 | 269 | ||
| @@ -342,21 +341,17 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x, | |||
| 342 | ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, right * scale_v), | 341 | ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, right * scale_v), |
| 343 | }}; | 342 | }}; |
| 344 | 343 | ||
| 345 | state.texture_units[0].texture = screen_info.display_texture; | 344 | state.textures[0] = screen_info.display_texture; |
| 346 | // Workaround brigthness problems in SMO by enabling sRGB in the final output | 345 | state.framebuffer_srgb.enabled = screen_info.display_srgb; |
| 347 | // if it has been used in the frame. Needed because of this bug in QT: QTBUG-50987 | ||
| 348 | state.framebuffer_srgb.enabled = OpenGLState::GetsRGBUsed(); | ||
| 349 | state.AllDirty(); | 346 | state.AllDirty(); |
| 350 | state.Apply(); | 347 | state.Apply(); |
| 351 | glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices), vertices.data()); | 348 | glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices), vertices.data()); |
| 352 | glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); | 349 | glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); |
| 353 | // Restore default state | 350 | // Restore default state |
| 354 | state.framebuffer_srgb.enabled = false; | 351 | state.framebuffer_srgb.enabled = false; |
| 355 | state.texture_units[0].texture = 0; | 352 | state.textures[0] = 0; |
| 356 | state.AllDirty(); | 353 | state.AllDirty(); |
| 357 | state.Apply(); | 354 | state.Apply(); |
| 358 | // Clear sRGB state for the next frame | ||
| 359 | OpenGLState::ClearsRGBUsed(); | ||
| 360 | } | 355 | } |
| 361 | 356 | ||
| 362 | /** | 357 | /** |
| @@ -406,8 +401,8 @@ void RendererOpenGL::CaptureScreenshot() { | |||
| 406 | GLuint renderbuffer; | 401 | GLuint renderbuffer; |
| 407 | glGenRenderbuffers(1, &renderbuffer); | 402 | glGenRenderbuffers(1, &renderbuffer); |
| 408 | glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer); | 403 | glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer); |
| 409 | glRenderbufferStorage(GL_RENDERBUFFER, state.GetsRGBUsed() ? GL_SRGB8 : GL_RGB8, layout.width, | 404 | glRenderbufferStorage(GL_RENDERBUFFER, screen_info.display_srgb ? GL_SRGB8 : GL_RGB8, |
| 410 | layout.height); | 405 | layout.width, layout.height); |
| 411 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer); | 406 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer); |
| 412 | 407 | ||
| 413 | DrawScreen(layout); | 408 | DrawScreen(layout); |
diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h index 9bd086368..cf26628ca 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.h +++ b/src/video_core/renderer_opengl/renderer_opengl.h | |||
| @@ -38,7 +38,8 @@ struct TextureInfo { | |||
| 38 | 38 | ||
| 39 | /// Structure used for storing information about the display target for the Switch screen | 39 | /// Structure used for storing information about the display target for the Switch screen |
| 40 | struct ScreenInfo { | 40 | struct ScreenInfo { |
| 41 | GLuint display_texture; | 41 | GLuint display_texture{}; |
| 42 | bool display_srgb{}; | ||
| 42 | const Common::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f}; | 43 | const Common::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f}; |
| 43 | TextureInfo texture; | 44 | TextureInfo texture; |
| 44 | }; | 45 | }; |
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 3b966ddc3..897cbb4e8 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp | |||
| @@ -2,9 +2,10 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <map> | 5 | #include <bitset> |
| 6 | #include <optional> | 6 | #include <optional> |
| 7 | #include <set> | 7 | #include <set> |
| 8 | #include <string_view> | ||
| 8 | #include <vector> | 9 | #include <vector> |
| 9 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 10 | #include "video_core/renderer_vulkan/declarations.h" | 11 | #include "video_core/renderer_vulkan/declarations.h" |
| @@ -12,13 +13,32 @@ | |||
| 12 | 13 | ||
| 13 | namespace Vulkan { | 14 | namespace Vulkan { |
| 14 | 15 | ||
| 16 | namespace { | ||
| 17 | |||
| 18 | template <typename T> | ||
| 19 | void SetNext(void**& next, T& data) { | ||
| 20 | *next = &data; | ||
| 21 | next = &data.pNext; | ||
| 22 | } | ||
| 23 | |||
| 24 | template <typename T> | ||
| 25 | T GetFeatures(vk::PhysicalDevice physical, vk::DispatchLoaderDynamic dldi) { | ||
| 26 | vk::PhysicalDeviceFeatures2 features; | ||
| 27 | T extension_features; | ||
| 28 | features.pNext = &extension_features; | ||
| 29 | physical.getFeatures2(&features, dldi); | ||
| 30 | return extension_features; | ||
| 31 | } | ||
| 32 | |||
| 33 | } // Anonymous namespace | ||
| 34 | |||
| 15 | namespace Alternatives { | 35 | namespace Alternatives { |
| 16 | 36 | ||
| 17 | constexpr std::array<vk::Format, 3> Depth24UnormS8Uint = { | 37 | constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, |
| 18 | vk::Format::eD32SfloatS8Uint, vk::Format::eD16UnormS8Uint, {}}; | 38 | vk::Format::eD16UnormS8Uint, vk::Format{}}; |
| 19 | constexpr std::array<vk::Format, 3> Depth16UnormS8Uint = { | 39 | constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, |
| 20 | vk::Format::eD24UnormS8Uint, vk::Format::eD32SfloatS8Uint, {}}; | 40 | vk::Format::eD32SfloatS8Uint, vk::Format{}}; |
| 21 | constexpr std::array<vk::Format, 2> Astc = {vk::Format::eA8B8G8R8UnormPack32, {}}; | 41 | constexpr std::array Astc = {vk::Format::eA8B8G8R8UnormPack32, vk::Format{}}; |
| 22 | 42 | ||
| 23 | } // namespace Alternatives | 43 | } // namespace Alternatives |
| 24 | 44 | ||
| @@ -58,16 +78,53 @@ VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice phy | |||
| 58 | VKDevice::~VKDevice() = default; | 78 | VKDevice::~VKDevice() = default; |
| 59 | 79 | ||
| 60 | bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance) { | 80 | bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance) { |
| 61 | vk::PhysicalDeviceFeatures device_features; | ||
| 62 | device_features.vertexPipelineStoresAndAtomics = true; | ||
| 63 | device_features.independentBlend = true; | ||
| 64 | device_features.textureCompressionASTC_LDR = is_optimal_astc_supported; | ||
| 65 | |||
| 66 | const auto queue_cis = GetDeviceQueueCreateInfos(); | 81 | const auto queue_cis = GetDeviceQueueCreateInfos(); |
| 67 | const std::vector<const char*> extensions = LoadExtensions(dldi); | 82 | const std::vector extensions = LoadExtensions(dldi); |
| 68 | const vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), | 83 | |
| 69 | 0, nullptr, static_cast<u32>(extensions.size()), | 84 | vk::PhysicalDeviceFeatures2 features2; |
| 70 | extensions.data(), &device_features); | 85 | void** next = &features2.pNext; |
| 86 | auto& features = features2.features; | ||
| 87 | features.vertexPipelineStoresAndAtomics = true; | ||
| 88 | features.independentBlend = true; | ||
| 89 | features.depthClamp = true; | ||
| 90 | features.samplerAnisotropy = true; | ||
| 91 | features.largePoints = true; | ||
| 92 | features.textureCompressionASTC_LDR = is_optimal_astc_supported; | ||
| 93 | |||
| 94 | vk::PhysicalDeviceVertexAttributeDivisorFeaturesEXT vertex_divisor; | ||
| 95 | vertex_divisor.vertexAttributeInstanceRateDivisor = true; | ||
| 96 | vertex_divisor.vertexAttributeInstanceRateZeroDivisor = true; | ||
| 97 | SetNext(next, vertex_divisor); | ||
| 98 | |||
| 99 | vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8; | ||
| 100 | if (is_float16_supported) { | ||
| 101 | float16_int8.shaderFloat16 = true; | ||
| 102 | SetNext(next, float16_int8); | ||
| 103 | } else { | ||
| 104 | LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively"); | ||
| 105 | } | ||
| 106 | |||
| 107 | vk::PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout; | ||
| 108 | if (khr_uniform_buffer_standard_layout) { | ||
| 109 | std430_layout.uniformBufferStandardLayout = true; | ||
| 110 | SetNext(next, std430_layout); | ||
| 111 | } else { | ||
| 112 | LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs"); | ||
| 113 | } | ||
| 114 | |||
| 115 | vk::PhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8; | ||
| 116 | if (ext_index_type_uint8) { | ||
| 117 | index_type_uint8.indexTypeUint8 = true; | ||
| 118 | SetNext(next, index_type_uint8); | ||
| 119 | } else { | ||
| 120 | LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes"); | ||
| 121 | } | ||
| 122 | |||
| 123 | vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0, | ||
| 124 | nullptr, static_cast<u32>(extensions.size()), extensions.data(), | ||
| 125 | nullptr); | ||
| 126 | device_ci.pNext = &features2; | ||
| 127 | |||
| 71 | vk::Device dummy_logical; | 128 | vk::Device dummy_logical; |
| 72 | if (physical.createDevice(&device_ci, nullptr, &dummy_logical, dldi) != vk::Result::eSuccess) { | 129 | if (physical.createDevice(&device_ci, nullptr, &dummy_logical, dldi) != vk::Result::eSuccess) { |
| 73 | LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!"); | 130 | LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!"); |
| @@ -78,6 +135,17 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan | |||
| 78 | logical = UniqueDevice( | 135 | logical = UniqueDevice( |
| 79 | dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld)); | 136 | dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld)); |
| 80 | 137 | ||
| 138 | if (khr_driver_properties) { | ||
| 139 | vk::PhysicalDeviceDriverPropertiesKHR driver; | ||
| 140 | vk::PhysicalDeviceProperties2 properties; | ||
| 141 | properties.pNext = &driver; | ||
| 142 | physical.getProperties2(&properties, dld); | ||
| 143 | driver_id = driver.driverID; | ||
| 144 | LOG_INFO(Render_Vulkan, "Driver: {} {}", driver.driverName, driver.driverInfo); | ||
| 145 | } else { | ||
| 146 | LOG_INFO(Render_Vulkan, "Driver: Unknown"); | ||
| 147 | } | ||
| 148 | |||
| 81 | graphics_queue = logical->getQueue(graphics_family, 0, dld); | 149 | graphics_queue = logical->getQueue(graphics_family, 0, dld); |
| 82 | present_queue = logical->getQueue(present_family, 0, dld); | 150 | present_queue = logical->getQueue(present_family, 0, dld); |
| 83 | return true; | 151 | return true; |
| @@ -92,20 +160,19 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, | |||
| 92 | // The wanted format is not supported by hardware, search for alternatives | 160 | // The wanted format is not supported by hardware, search for alternatives |
| 93 | const vk::Format* alternatives = GetFormatAlternatives(wanted_format); | 161 | const vk::Format* alternatives = GetFormatAlternatives(wanted_format); |
| 94 | if (alternatives == nullptr) { | 162 | if (alternatives == nullptr) { |
| 95 | LOG_CRITICAL(Render_Vulkan, | 163 | UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host " |
| 96 | "Format={} with usage={} and type={} has no defined alternatives and host " | 164 | "hardware does not support it", |
| 97 | "hardware does not support it", | 165 | vk::to_string(wanted_format), vk::to_string(wanted_usage), |
| 98 | vk::to_string(wanted_format), vk::to_string(wanted_usage), | 166 | static_cast<u32>(format_type)); |
| 99 | static_cast<u32>(format_type)); | ||
| 100 | UNREACHABLE(); | ||
| 101 | return wanted_format; | 167 | return wanted_format; |
| 102 | } | 168 | } |
| 103 | 169 | ||
| 104 | std::size_t i = 0; | 170 | std::size_t i = 0; |
| 105 | for (vk::Format alternative = alternatives[0]; alternative != vk::Format{}; | 171 | for (vk::Format alternative = alternatives[0]; alternative != vk::Format{}; |
| 106 | alternative = alternatives[++i]) { | 172 | alternative = alternatives[++i]) { |
| 107 | if (!IsFormatSupported(alternative, wanted_usage, format_type)) | 173 | if (!IsFormatSupported(alternative, wanted_usage, format_type)) { |
| 108 | continue; | 174 | continue; |
| 175 | } | ||
| 109 | LOG_WARNING(Render_Vulkan, | 176 | LOG_WARNING(Render_Vulkan, |
| 110 | "Emulating format={} with alternative format={} with usage={} and type={}", | 177 | "Emulating format={} with alternative format={} with usage={} and type={}", |
| 111 | static_cast<u32>(wanted_format), static_cast<u32>(alternative), | 178 | static_cast<u32>(wanted_format), static_cast<u32>(alternative), |
| @@ -114,12 +181,10 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, | |||
| 114 | } | 181 | } |
| 115 | 182 | ||
| 116 | // No alternatives found, panic | 183 | // No alternatives found, panic |
| 117 | LOG_CRITICAL(Render_Vulkan, | 184 | UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and " |
| 118 | "Format={} with usage={} and type={} is not supported by the host hardware and " | 185 | "doesn't support any of the alternatives", |
| 119 | "doesn't support any of the alternatives", | 186 | static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage), |
| 120 | static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage), | 187 | static_cast<u32>(format_type)); |
| 121 | static_cast<u32>(format_type)); | ||
| 122 | UNREACHABLE(); | ||
| 123 | return wanted_format; | 188 | return wanted_format; |
| 124 | } | 189 | } |
| 125 | 190 | ||
| @@ -132,7 +197,7 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features | |||
| 132 | vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc | | 197 | vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc | |
| 133 | vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc | | 198 | vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc | |
| 134 | vk::FormatFeatureFlagBits::eTransferDst}; | 199 | vk::FormatFeatureFlagBits::eTransferDst}; |
| 135 | constexpr std::array<vk::Format, 9> astc_formats = { | 200 | constexpr std::array astc_formats = { |
| 136 | vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock, | 201 | vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock, |
| 137 | vk::Format::eAstc8x8SrgbBlock, vk::Format::eAstc8x6SrgbBlock, | 202 | vk::Format::eAstc8x8SrgbBlock, vk::Format::eAstc8x6SrgbBlock, |
| 138 | vk::Format::eAstc5x4SrgbBlock, vk::Format::eAstc5x5UnormBlock, | 203 | vk::Format::eAstc5x4SrgbBlock, vk::Format::eAstc5x5UnormBlock, |
| @@ -151,76 +216,120 @@ bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlag | |||
| 151 | FormatType format_type) const { | 216 | FormatType format_type) const { |
| 152 | const auto it = format_properties.find(wanted_format); | 217 | const auto it = format_properties.find(wanted_format); |
| 153 | if (it == format_properties.end()) { | 218 | if (it == format_properties.end()) { |
| 154 | LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}", vk::to_string(wanted_format)); | 219 | UNIMPLEMENTED_MSG("Unimplemented format query={}", vk::to_string(wanted_format)); |
| 155 | UNREACHABLE(); | ||
| 156 | return true; | 220 | return true; |
| 157 | } | 221 | } |
| 158 | const vk::FormatFeatureFlags supported_usage = GetFormatFeatures(it->second, format_type); | 222 | const auto supported_usage = GetFormatFeatures(it->second, format_type); |
| 159 | return (supported_usage & wanted_usage) == wanted_usage; | 223 | return (supported_usage & wanted_usage) == wanted_usage; |
| 160 | } | 224 | } |
| 161 | 225 | ||
| 162 | bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, | 226 | bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, |
| 163 | vk::SurfaceKHR surface) { | 227 | vk::SurfaceKHR surface) { |
| 164 | bool has_swapchain{}; | 228 | LOG_INFO(Render_Vulkan, "{}", physical.getProperties(dldi).deviceName); |
| 229 | bool is_suitable = true; | ||
| 230 | |||
| 231 | constexpr std::array required_extensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME, | ||
| 232 | VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME}; | ||
| 233 | std::bitset<required_extensions.size()> available_extensions{}; | ||
| 234 | |||
| 165 | for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { | 235 | for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { |
| 166 | has_swapchain |= prop.extensionName == std::string(VK_KHR_SWAPCHAIN_EXTENSION_NAME); | 236 | for (std::size_t i = 0; i < required_extensions.size(); ++i) { |
| 237 | if (available_extensions[i]) { | ||
| 238 | continue; | ||
| 239 | } | ||
| 240 | available_extensions[i] = | ||
| 241 | required_extensions[i] == std::string_view{prop.extensionName}; | ||
| 242 | } | ||
| 167 | } | 243 | } |
| 168 | if (!has_swapchain) { | 244 | if (!available_extensions.all()) { |
| 169 | // The device doesn't support creating swapchains. | 245 | for (std::size_t i = 0; i < required_extensions.size(); ++i) { |
| 170 | return false; | 246 | if (available_extensions[i]) { |
| 247 | continue; | ||
| 248 | } | ||
| 249 | LOG_INFO(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); | ||
| 250 | is_suitable = false; | ||
| 251 | } | ||
| 171 | } | 252 | } |
| 172 | 253 | ||
| 173 | bool has_graphics{}, has_present{}; | 254 | bool has_graphics{}, has_present{}; |
| 174 | const auto queue_family_properties = physical.getQueueFamilyProperties(dldi); | 255 | const auto queue_family_properties = physical.getQueueFamilyProperties(dldi); |
| 175 | for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { | 256 | for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { |
| 176 | const auto& family = queue_family_properties[i]; | 257 | const auto& family = queue_family_properties[i]; |
| 177 | if (family.queueCount == 0) | 258 | if (family.queueCount == 0) { |
| 178 | continue; | 259 | continue; |
| 179 | 260 | } | |
| 180 | has_graphics |= | 261 | has_graphics |= |
| 181 | (family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0); | 262 | (family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0); |
| 182 | has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0; | 263 | has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0; |
| 183 | } | 264 | } |
| 184 | if (!has_graphics || !has_present) { | 265 | if (!has_graphics || !has_present) { |
| 185 | // The device doesn't have a graphics and present queue. | 266 | LOG_INFO(Render_Vulkan, "Device lacks a graphics and present queue"); |
| 186 | return false; | 267 | is_suitable = false; |
| 187 | } | 268 | } |
| 188 | 269 | ||
| 189 | // TODO(Rodrigo): Check if the device matches all requeriments. | 270 | // TODO(Rodrigo): Check if the device matches all requeriments. |
| 190 | const auto properties{physical.getProperties(dldi)}; | 271 | const auto properties{physical.getProperties(dldi)}; |
| 191 | const auto limits{properties.limits}; | 272 | const auto& limits{properties.limits}; |
| 192 | if (limits.maxUniformBufferRange < 65536) { | 273 | |
| 193 | return false; | 274 | constexpr u32 required_ubo_size = 65536; |
| 275 | if (limits.maxUniformBufferRange < required_ubo_size) { | ||
| 276 | LOG_INFO(Render_Vulkan, "Device UBO size {} is too small, {} is required)", | ||
| 277 | limits.maxUniformBufferRange, required_ubo_size); | ||
| 278 | is_suitable = false; | ||
| 194 | } | 279 | } |
| 195 | 280 | ||
| 196 | const vk::PhysicalDeviceFeatures features{physical.getFeatures(dldi)}; | 281 | const auto features{physical.getFeatures(dldi)}; |
| 197 | if (!features.vertexPipelineStoresAndAtomics || !features.independentBlend) { | 282 | const std::array feature_report = { |
| 198 | return false; | 283 | std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"), |
| 284 | std::make_pair(features.independentBlend, "independentBlend"), | ||
| 285 | std::make_pair(features.depthClamp, "depthClamp"), | ||
| 286 | std::make_pair(features.samplerAnisotropy, "samplerAnisotropy"), | ||
| 287 | std::make_pair(features.largePoints, "largePoints"), | ||
| 288 | }; | ||
| 289 | for (const auto& [supported, name] : feature_report) { | ||
| 290 | if (supported) { | ||
| 291 | continue; | ||
| 292 | } | ||
| 293 | LOG_INFO(Render_Vulkan, "Missing required feature: {}", name); | ||
| 294 | is_suitable = false; | ||
| 199 | } | 295 | } |
| 200 | 296 | ||
| 201 | // Device is suitable. | 297 | return is_suitable; |
| 202 | return true; | ||
| 203 | } | 298 | } |
| 204 | 299 | ||
| 205 | std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynamic& dldi) { | 300 | std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynamic& dldi) { |
| 206 | std::vector<const char*> extensions; | 301 | std::vector<const char*> extensions; |
| 207 | extensions.reserve(2); | 302 | extensions.reserve(7); |
| 208 | extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); | 303 | extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); |
| 304 | extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); | ||
| 209 | 305 | ||
| 210 | const auto Test = [&](const vk::ExtensionProperties& extension, | 306 | const auto Test = [&](const vk::ExtensionProperties& extension, |
| 211 | std::optional<std::reference_wrapper<bool>> status, const char* name, | 307 | std::optional<std::reference_wrapper<bool>> status, const char* name, |
| 212 | u32 revision) { | 308 | bool push) { |
| 213 | if (extension.extensionName != std::string(name)) { | 309 | if (extension.extensionName != std::string_view(name)) { |
| 214 | return; | 310 | return; |
| 215 | } | 311 | } |
| 216 | extensions.push_back(name); | 312 | if (push) { |
| 313 | extensions.push_back(name); | ||
| 314 | } | ||
| 217 | if (status) { | 315 | if (status) { |
| 218 | status->get() = true; | 316 | status->get() = true; |
| 219 | } | 317 | } |
| 220 | }; | 318 | }; |
| 221 | 319 | ||
| 320 | bool khr_shader_float16_int8{}; | ||
| 222 | for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { | 321 | for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { |
| 223 | Test(extension, ext_scalar_block_layout, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME, 1); | 322 | Test(extension, khr_uniform_buffer_standard_layout, |
| 323 | VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true); | ||
| 324 | Test(extension, ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true); | ||
| 325 | Test(extension, khr_driver_properties, VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, true); | ||
| 326 | Test(extension, khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, false); | ||
| 327 | } | ||
| 328 | |||
| 329 | if (khr_shader_float16_int8) { | ||
| 330 | is_float16_supported = | ||
| 331 | GetFeatures<vk::PhysicalDeviceFloat16Int8FeaturesKHR>(physical, dldi).shaderFloat16; | ||
| 332 | extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); | ||
| 224 | } | 333 | } |
| 225 | 334 | ||
| 226 | return extensions; | 335 | return extensions; |
| @@ -250,9 +359,10 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK | |||
| 250 | } | 359 | } |
| 251 | 360 | ||
| 252 | void VKDevice::SetupProperties(const vk::DispatchLoaderDynamic& dldi) { | 361 | void VKDevice::SetupProperties(const vk::DispatchLoaderDynamic& dldi) { |
| 253 | const vk::PhysicalDeviceProperties props = physical.getProperties(dldi); | 362 | const auto props = physical.getProperties(dldi); |
| 254 | device_type = props.deviceType; | 363 | device_type = props.deviceType; |
| 255 | uniform_buffer_alignment = static_cast<u64>(props.limits.minUniformBufferOffsetAlignment); | 364 | uniform_buffer_alignment = static_cast<u64>(props.limits.minUniformBufferOffsetAlignment); |
| 365 | storage_buffer_alignment = static_cast<u64>(props.limits.minStorageBufferOffsetAlignment); | ||
| 256 | max_storage_buffer_range = static_cast<u64>(props.limits.maxStorageBufferRange); | 366 | max_storage_buffer_range = static_cast<u64>(props.limits.maxStorageBufferRange); |
| 257 | } | 367 | } |
| 258 | 368 | ||
| @@ -273,42 +383,53 @@ std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() con | |||
| 273 | return queue_cis; | 383 | return queue_cis; |
| 274 | } | 384 | } |
| 275 | 385 | ||
| 276 | std::map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( | 386 | std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( |
| 277 | const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) { | 387 | const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) { |
| 278 | static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, | 388 | constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, |
| 279 | vk::Format::eB5G6R5UnormPack16, | 389 | vk::Format::eA8B8G8R8SnormPack32, |
| 280 | vk::Format::eA2B10G10R10UnormPack32, | 390 | vk::Format::eA8B8G8R8SrgbPack32, |
| 281 | vk::Format::eR32G32B32A32Sfloat, | 391 | vk::Format::eB5G6R5UnormPack16, |
| 282 | vk::Format::eR16G16Unorm, | 392 | vk::Format::eA2B10G10R10UnormPack32, |
| 283 | vk::Format::eR16G16Snorm, | 393 | vk::Format::eR32G32B32A32Sfloat, |
| 284 | vk::Format::eR8G8B8A8Srgb, | 394 | vk::Format::eR16G16B16A16Uint, |
| 285 | vk::Format::eR8Unorm, | 395 | vk::Format::eR16G16Unorm, |
| 286 | vk::Format::eB10G11R11UfloatPack32, | 396 | vk::Format::eR16G16Snorm, |
| 287 | vk::Format::eR32Sfloat, | 397 | vk::Format::eR16G16Sfloat, |
| 288 | vk::Format::eR16Sfloat, | 398 | vk::Format::eR16Unorm, |
| 289 | vk::Format::eR16G16B16A16Sfloat, | 399 | vk::Format::eR8G8B8A8Srgb, |
| 290 | vk::Format::eD32Sfloat, | 400 | vk::Format::eR8G8Unorm, |
| 291 | vk::Format::eD16Unorm, | 401 | vk::Format::eR8G8Snorm, |
| 292 | vk::Format::eD16UnormS8Uint, | 402 | vk::Format::eR8Unorm, |
| 293 | vk::Format::eD24UnormS8Uint, | 403 | vk::Format::eB10G11R11UfloatPack32, |
| 294 | vk::Format::eD32SfloatS8Uint, | 404 | vk::Format::eR32Sfloat, |
| 295 | vk::Format::eBc1RgbaUnormBlock, | 405 | vk::Format::eR16Sfloat, |
| 296 | vk::Format::eBc2UnormBlock, | 406 | vk::Format::eR16G16B16A16Sfloat, |
| 297 | vk::Format::eBc3UnormBlock, | 407 | vk::Format::eB8G8R8A8Unorm, |
| 298 | vk::Format::eBc4UnormBlock, | 408 | vk::Format::eD32Sfloat, |
| 299 | vk::Format::eBc5UnormBlock, | 409 | vk::Format::eD16Unorm, |
| 300 | vk::Format::eBc5SnormBlock, | 410 | vk::Format::eD16UnormS8Uint, |
| 301 | vk::Format::eBc7UnormBlock, | 411 | vk::Format::eD24UnormS8Uint, |
| 302 | vk::Format::eAstc4x4UnormBlock, | 412 | vk::Format::eD32SfloatS8Uint, |
| 303 | vk::Format::eAstc4x4SrgbBlock, | 413 | vk::Format::eBc1RgbaUnormBlock, |
| 304 | vk::Format::eAstc8x8SrgbBlock, | 414 | vk::Format::eBc2UnormBlock, |
| 305 | vk::Format::eAstc8x6SrgbBlock, | 415 | vk::Format::eBc3UnormBlock, |
| 306 | vk::Format::eAstc5x4SrgbBlock, | 416 | vk::Format::eBc4UnormBlock, |
| 307 | vk::Format::eAstc5x5UnormBlock, | 417 | vk::Format::eBc5UnormBlock, |
| 308 | vk::Format::eAstc5x5SrgbBlock, | 418 | vk::Format::eBc5SnormBlock, |
| 309 | vk::Format::eAstc10x8UnormBlock, | 419 | vk::Format::eBc7UnormBlock, |
| 310 | vk::Format::eAstc10x8SrgbBlock}; | 420 | vk::Format::eBc1RgbaSrgbBlock, |
| 311 | std::map<vk::Format, vk::FormatProperties> format_properties; | 421 | vk::Format::eBc3SrgbBlock, |
| 422 | vk::Format::eBc7SrgbBlock, | ||
| 423 | vk::Format::eAstc4x4UnormBlock, | ||
| 424 | vk::Format::eAstc4x4SrgbBlock, | ||
| 425 | vk::Format::eAstc8x8SrgbBlock, | ||
| 426 | vk::Format::eAstc8x6SrgbBlock, | ||
| 427 | vk::Format::eAstc5x4SrgbBlock, | ||
| 428 | vk::Format::eAstc5x5UnormBlock, | ||
| 429 | vk::Format::eAstc5x5SrgbBlock, | ||
| 430 | vk::Format::eAstc10x8UnormBlock, | ||
| 431 | vk::Format::eAstc10x8SrgbBlock}; | ||
| 432 | std::unordered_map<vk::Format, vk::FormatProperties> format_properties; | ||
| 312 | for (const auto format : formats) { | 433 | for (const auto format : formats) { |
| 313 | format_properties.emplace(format, physical.getFormatProperties(format, dldi)); | 434 | format_properties.emplace(format, physical.getFormatProperties(format, dldi)); |
| 314 | } | 435 | } |
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index 537825d8b..010d4c3d6 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <map> | 7 | #include <unordered_map> |
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "video_core/renderer_vulkan/declarations.h" | 10 | #include "video_core/renderer_vulkan/declarations.h" |
| @@ -69,16 +69,26 @@ public: | |||
| 69 | return present_family; | 69 | return present_family; |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | /// Returns if the device is integrated with the host CPU. | 72 | /// Returns true if the device is integrated with the host CPU. |
| 73 | bool IsIntegrated() const { | 73 | bool IsIntegrated() const { |
| 74 | return device_type == vk::PhysicalDeviceType::eIntegratedGpu; | 74 | return device_type == vk::PhysicalDeviceType::eIntegratedGpu; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | /// Returns the driver ID. | ||
| 78 | vk::DriverIdKHR GetDriverID() const { | ||
| 79 | return driver_id; | ||
| 80 | } | ||
| 81 | |||
| 77 | /// Returns uniform buffer alignment requeriment. | 82 | /// Returns uniform buffer alignment requeriment. |
| 78 | u64 GetUniformBufferAlignment() const { | 83 | u64 GetUniformBufferAlignment() const { |
| 79 | return uniform_buffer_alignment; | 84 | return uniform_buffer_alignment; |
| 80 | } | 85 | } |
| 81 | 86 | ||
| 87 | /// Returns storage alignment requeriment. | ||
| 88 | u64 GetStorageBufferAlignment() const { | ||
| 89 | return storage_buffer_alignment; | ||
| 90 | } | ||
| 91 | |||
| 82 | /// Returns the maximum range for storage buffers. | 92 | /// Returns the maximum range for storage buffers. |
| 83 | u64 GetMaxStorageBufferRange() const { | 93 | u64 GetMaxStorageBufferRange() const { |
| 84 | return max_storage_buffer_range; | 94 | return max_storage_buffer_range; |
| @@ -89,9 +99,19 @@ public: | |||
| 89 | return is_optimal_astc_supported; | 99 | return is_optimal_astc_supported; |
| 90 | } | 100 | } |
| 91 | 101 | ||
| 102 | /// Returns true if the device supports float16 natively | ||
| 103 | bool IsFloat16Supported() const { | ||
| 104 | return is_float16_supported; | ||
| 105 | } | ||
| 106 | |||
| 92 | /// Returns true if the device supports VK_EXT_scalar_block_layout. | 107 | /// Returns true if the device supports VK_EXT_scalar_block_layout. |
| 93 | bool IsExtScalarBlockLayoutSupported() const { | 108 | bool IsKhrUniformBufferStandardLayoutSupported() const { |
| 94 | return ext_scalar_block_layout; | 109 | return khr_uniform_buffer_standard_layout; |
| 110 | } | ||
| 111 | |||
| 112 | /// Returns true if the device supports VK_EXT_index_type_uint8. | ||
| 113 | bool IsExtIndexTypeUint8Supported() const { | ||
| 114 | return ext_index_type_uint8; | ||
| 95 | } | 115 | } |
| 96 | 116 | ||
| 97 | /// Checks if the physical device is suitable. | 117 | /// Checks if the physical device is suitable. |
| @@ -123,22 +143,28 @@ private: | |||
| 123 | FormatType format_type) const; | 143 | FormatType format_type) const; |
| 124 | 144 | ||
| 125 | /// Returns the device properties for Vulkan formats. | 145 | /// Returns the device properties for Vulkan formats. |
| 126 | static std::map<vk::Format, vk::FormatProperties> GetFormatProperties( | 146 | static std::unordered_map<vk::Format, vk::FormatProperties> GetFormatProperties( |
| 127 | const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical); | 147 | const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical); |
| 128 | 148 | ||
| 129 | const vk::PhysicalDevice physical; ///< Physical device. | 149 | const vk::PhysicalDevice physical; ///< Physical device. |
| 130 | vk::DispatchLoaderDynamic dld; ///< Device function pointers. | 150 | vk::DispatchLoaderDynamic dld; ///< Device function pointers. |
| 131 | UniqueDevice logical; ///< Logical device. | 151 | UniqueDevice logical; ///< Logical device. |
| 132 | vk::Queue graphics_queue; ///< Main graphics queue. | 152 | vk::Queue graphics_queue; ///< Main graphics queue. |
| 133 | vk::Queue present_queue; ///< Main present queue. | 153 | vk::Queue present_queue; ///< Main present queue. |
| 134 | u32 graphics_family{}; ///< Main graphics queue family index. | 154 | u32 graphics_family{}; ///< Main graphics queue family index. |
| 135 | u32 present_family{}; ///< Main present queue family index. | 155 | u32 present_family{}; ///< Main present queue family index. |
| 136 | vk::PhysicalDeviceType device_type; ///< Physical device type. | 156 | vk::PhysicalDeviceType device_type; ///< Physical device type. |
| 137 | u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment. | 157 | vk::DriverIdKHR driver_id{}; ///< Driver ID. |
| 138 | u64 max_storage_buffer_range{}; ///< Max storage buffer size. | 158 | u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment. |
| 139 | bool is_optimal_astc_supported{}; ///< Support for native ASTC. | 159 | u64 storage_buffer_alignment{}; ///< Storage buffer alignment requeriment. |
| 140 | bool ext_scalar_block_layout{}; ///< Support for VK_EXT_scalar_block_layout. | 160 | u64 max_storage_buffer_range{}; ///< Max storage buffer size. |
| 141 | std::map<vk::Format, vk::FormatProperties> format_properties; ///< Format properties dictionary. | 161 | bool is_optimal_astc_supported{}; ///< Support for native ASTC. |
| 162 | bool is_float16_supported{}; ///< Support for float16 arithmetics. | ||
| 163 | bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs. | ||
| 164 | bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8. | ||
| 165 | bool khr_driver_properties{}; ///< Support for VK_KHR_driver_properties. | ||
| 166 | std::unordered_map<vk::Format, vk::FormatProperties> | ||
| 167 | format_properties; ///< Format properties dictionary. | ||
| 142 | }; | 168 | }; |
| 143 | 169 | ||
| 144 | } // namespace Vulkan | 170 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index a35b45c9c..f7fbbb6e4 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp | |||
| @@ -370,8 +370,8 @@ private: | |||
| 370 | u32 binding = const_buffers_base_binding; | 370 | u32 binding = const_buffers_base_binding; |
| 371 | for (const auto& entry : ir.GetConstantBuffers()) { | 371 | for (const auto& entry : ir.GetConstantBuffers()) { |
| 372 | const auto [index, size] = entry; | 372 | const auto [index, size] = entry; |
| 373 | const Id type = | 373 | const Id type = device.IsKhrUniformBufferStandardLayoutSupported() ? t_cbuf_scalar_ubo |
| 374 | device.IsExtScalarBlockLayoutSupported() ? t_cbuf_scalar_ubo : t_cbuf_std140_ubo; | 374 | : t_cbuf_std140_ubo; |
| 375 | const Id id = OpVariable(type, spv::StorageClass::Uniform); | 375 | const Id id = OpVariable(type, spv::StorageClass::Uniform); |
| 376 | AddGlobalVariable(Name(id, fmt::format("cbuf_{}", index))); | 376 | AddGlobalVariable(Name(id, fmt::format("cbuf_{}", index))); |
| 377 | 377 | ||
| @@ -565,7 +565,7 @@ private: | |||
| 565 | const Id buffer_id = constant_buffers.at(cbuf->GetIndex()); | 565 | const Id buffer_id = constant_buffers.at(cbuf->GetIndex()); |
| 566 | 566 | ||
| 567 | Id pointer{}; | 567 | Id pointer{}; |
| 568 | if (device.IsExtScalarBlockLayoutSupported()) { | 568 | if (device.IsKhrUniformBufferStandardLayoutSupported()) { |
| 569 | const Id buffer_offset = Emit(OpShiftRightLogical( | 569 | const Id buffer_offset = Emit(OpShiftRightLogical( |
| 570 | t_uint, BitcastTo<Type::Uint>(Visit(offset)), Constant(t_uint, 2u))); | 570 | t_uint, BitcastTo<Type::Uint>(Visit(offset)), Constant(t_uint, 2u))); |
| 571 | pointer = Emit( | 571 | pointer = Emit( |
| @@ -944,6 +944,41 @@ private: | |||
| 944 | return {}; | 944 | return {}; |
| 945 | } | 945 | } |
| 946 | 946 | ||
| 947 | Id AtomicImageAdd(Operation operation) { | ||
| 948 | UNIMPLEMENTED(); | ||
| 949 | return {}; | ||
| 950 | } | ||
| 951 | |||
| 952 | Id AtomicImageMin(Operation operation) { | ||
| 953 | UNIMPLEMENTED(); | ||
| 954 | return {}; | ||
| 955 | } | ||
| 956 | |||
| 957 | Id AtomicImageMax(Operation operation) { | ||
| 958 | UNIMPLEMENTED(); | ||
| 959 | return {}; | ||
| 960 | } | ||
| 961 | |||
| 962 | Id AtomicImageAnd(Operation operation) { | ||
| 963 | UNIMPLEMENTED(); | ||
| 964 | return {}; | ||
| 965 | } | ||
| 966 | |||
| 967 | Id AtomicImageOr(Operation operation) { | ||
| 968 | UNIMPLEMENTED(); | ||
| 969 | return {}; | ||
| 970 | } | ||
| 971 | |||
| 972 | Id AtomicImageXor(Operation operation) { | ||
| 973 | UNIMPLEMENTED(); | ||
| 974 | return {}; | ||
| 975 | } | ||
| 976 | |||
| 977 | Id AtomicImageExchange(Operation operation) { | ||
| 978 | UNIMPLEMENTED(); | ||
| 979 | return {}; | ||
| 980 | } | ||
| 981 | |||
| 947 | Id Branch(Operation operation) { | 982 | Id Branch(Operation operation) { |
| 948 | const auto target = std::get_if<ImmediateNode>(&*operation[0]); | 983 | const auto target = std::get_if<ImmediateNode>(&*operation[0]); |
| 949 | UNIMPLEMENTED_IF(!target); | 984 | UNIMPLEMENTED_IF(!target); |
| @@ -1092,6 +1127,46 @@ private: | |||
| 1092 | return {}; | 1127 | return {}; |
| 1093 | } | 1128 | } |
| 1094 | 1129 | ||
| 1130 | Id ShuffleIndexed(Operation) { | ||
| 1131 | UNIMPLEMENTED(); | ||
| 1132 | return {}; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | Id ShuffleUp(Operation) { | ||
| 1136 | UNIMPLEMENTED(); | ||
| 1137 | return {}; | ||
| 1138 | } | ||
| 1139 | |||
| 1140 | Id ShuffleDown(Operation) { | ||
| 1141 | UNIMPLEMENTED(); | ||
| 1142 | return {}; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | Id ShuffleButterfly(Operation) { | ||
| 1146 | UNIMPLEMENTED(); | ||
| 1147 | return {}; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | Id InRangeShuffleIndexed(Operation) { | ||
| 1151 | UNIMPLEMENTED(); | ||
| 1152 | return {}; | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | Id InRangeShuffleUp(Operation) { | ||
| 1156 | UNIMPLEMENTED(); | ||
| 1157 | return {}; | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | Id InRangeShuffleDown(Operation) { | ||
| 1161 | UNIMPLEMENTED(); | ||
| 1162 | return {}; | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | Id InRangeShuffleButterfly(Operation) { | ||
| 1166 | UNIMPLEMENTED(); | ||
| 1167 | return {}; | ||
| 1168 | } | ||
| 1169 | |||
| 1095 | Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, | 1170 | Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, |
| 1096 | const std::string& name) { | 1171 | const std::string& name) { |
| 1097 | const Id id = OpVariable(type, storage); | 1172 | const Id id = OpVariable(type, storage); |
| @@ -1366,6 +1441,13 @@ private: | |||
| 1366 | &SPIRVDecompiler::TexelFetch, | 1441 | &SPIRVDecompiler::TexelFetch, |
| 1367 | 1442 | ||
| 1368 | &SPIRVDecompiler::ImageStore, | 1443 | &SPIRVDecompiler::ImageStore, |
| 1444 | &SPIRVDecompiler::AtomicImageAdd, | ||
| 1445 | &SPIRVDecompiler::AtomicImageMin, | ||
| 1446 | &SPIRVDecompiler::AtomicImageMax, | ||
| 1447 | &SPIRVDecompiler::AtomicImageAnd, | ||
| 1448 | &SPIRVDecompiler::AtomicImageOr, | ||
| 1449 | &SPIRVDecompiler::AtomicImageXor, | ||
| 1450 | &SPIRVDecompiler::AtomicImageExchange, | ||
| 1369 | 1451 | ||
| 1370 | &SPIRVDecompiler::Branch, | 1452 | &SPIRVDecompiler::Branch, |
| 1371 | &SPIRVDecompiler::BranchIndirect, | 1453 | &SPIRVDecompiler::BranchIndirect, |
| @@ -1389,6 +1471,16 @@ private: | |||
| 1389 | &SPIRVDecompiler::VoteAll, | 1471 | &SPIRVDecompiler::VoteAll, |
| 1390 | &SPIRVDecompiler::VoteAny, | 1472 | &SPIRVDecompiler::VoteAny, |
| 1391 | &SPIRVDecompiler::VoteEqual, | 1473 | &SPIRVDecompiler::VoteEqual, |
| 1474 | |||
| 1475 | &SPIRVDecompiler::ShuffleIndexed, | ||
| 1476 | &SPIRVDecompiler::ShuffleUp, | ||
| 1477 | &SPIRVDecompiler::ShuffleDown, | ||
| 1478 | &SPIRVDecompiler::ShuffleButterfly, | ||
| 1479 | |||
| 1480 | &SPIRVDecompiler::InRangeShuffleIndexed, | ||
| 1481 | &SPIRVDecompiler::InRangeShuffleUp, | ||
| 1482 | &SPIRVDecompiler::InRangeShuffleDown, | ||
| 1483 | &SPIRVDecompiler::InRangeShuffleButterfly, | ||
| 1392 | }; | 1484 | }; |
| 1393 | static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); | 1485 | static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); |
| 1394 | 1486 | ||
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp index 77151a24b..d54fb88c9 100644 --- a/src/video_core/shader/decode/image.cpp +++ b/src/video_core/shader/decode/image.cpp | |||
| @@ -44,7 +44,6 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { | |||
| 44 | switch (opcode->get().GetId()) { | 44 | switch (opcode->get().GetId()) { |
| 45 | case OpCode::Id::SUST: { | 45 | case OpCode::Id::SUST: { |
| 46 | UNIMPLEMENTED_IF(instr.sust.mode != Tegra::Shader::SurfaceDataMode::P); | 46 | UNIMPLEMENTED_IF(instr.sust.mode != Tegra::Shader::SurfaceDataMode::P); |
| 47 | UNIMPLEMENTED_IF(instr.sust.image_type == Tegra::Shader::ImageType::TextureBuffer); | ||
| 48 | UNIMPLEMENTED_IF(instr.sust.out_of_bounds_store != Tegra::Shader::OutOfBoundsStore::Ignore); | 47 | UNIMPLEMENTED_IF(instr.sust.out_of_bounds_store != Tegra::Shader::OutOfBoundsStore::Ignore); |
| 49 | UNIMPLEMENTED_IF(instr.sust.component_mask_selector != 0xf); // Ensure we have an RGBA store | 48 | UNIMPLEMENTED_IF(instr.sust.component_mask_selector != 0xf); // Ensure we have an RGBA store |
| 50 | 49 | ||
| @@ -61,56 +60,105 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { | |||
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | const auto type{instr.sust.image_type}; | 62 | const auto type{instr.sust.image_type}; |
| 64 | const auto& image{instr.sust.is_immediate ? GetImage(instr.image, type) | 63 | auto& image{instr.sust.is_immediate ? GetImage(instr.image, type) |
| 65 | : GetBindlessImage(instr.gpr39, type)}; | 64 | : GetBindlessImage(instr.gpr39, type)}; |
| 65 | image.MarkWrite(); | ||
| 66 | |||
| 66 | MetaImage meta{image, values}; | 67 | MetaImage meta{image, values}; |
| 67 | const Node store{Operation(OperationCode::ImageStore, meta, std::move(coords))}; | 68 | bb.push_back(Operation(OperationCode::ImageStore, meta, std::move(coords))); |
| 68 | bb.push_back(store); | 69 | break; |
| 70 | } | ||
| 71 | case OpCode::Id::SUATOM: { | ||
| 72 | UNIMPLEMENTED_IF(instr.suatom_d.is_ba != 0); | ||
| 73 | |||
| 74 | Node value = GetRegister(instr.gpr0); | ||
| 75 | |||
| 76 | std::vector<Node> coords; | ||
| 77 | const std::size_t num_coords{GetImageTypeNumCoordinates(instr.sust.image_type)}; | ||
| 78 | for (std::size_t i = 0; i < num_coords; ++i) { | ||
| 79 | coords.push_back(GetRegister(instr.gpr8.Value() + i)); | ||
| 80 | } | ||
| 81 | |||
| 82 | const OperationCode operation_code = [instr] { | ||
| 83 | switch (instr.suatom_d.operation) { | ||
| 84 | case Tegra::Shader::ImageAtomicOperation::Add: | ||
| 85 | return OperationCode::AtomicImageAdd; | ||
| 86 | case Tegra::Shader::ImageAtomicOperation::Min: | ||
| 87 | return OperationCode::AtomicImageMin; | ||
| 88 | case Tegra::Shader::ImageAtomicOperation::Max: | ||
| 89 | return OperationCode::AtomicImageMax; | ||
| 90 | case Tegra::Shader::ImageAtomicOperation::And: | ||
| 91 | return OperationCode::AtomicImageAnd; | ||
| 92 | case Tegra::Shader::ImageAtomicOperation::Or: | ||
| 93 | return OperationCode::AtomicImageOr; | ||
| 94 | case Tegra::Shader::ImageAtomicOperation::Xor: | ||
| 95 | return OperationCode::AtomicImageXor; | ||
| 96 | case Tegra::Shader::ImageAtomicOperation::Exch: | ||
| 97 | return OperationCode::AtomicImageExchange; | ||
| 98 | default: | ||
| 99 | UNIMPLEMENTED_MSG("Unimplemented operation={}", | ||
| 100 | static_cast<u32>(instr.suatom_d.operation.Value())); | ||
| 101 | return OperationCode::AtomicImageAdd; | ||
| 102 | } | ||
| 103 | }(); | ||
| 104 | |||
| 105 | const auto& image{GetImage(instr.image, instr.suatom_d.image_type, instr.suatom_d.size)}; | ||
| 106 | MetaImage meta{image, {std::move(value)}}; | ||
| 107 | SetRegister(bb, instr.gpr0, Operation(operation_code, meta, std::move(coords))); | ||
| 69 | break; | 108 | break; |
| 70 | } | 109 | } |
| 71 | default: | 110 | default: |
| 72 | UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName()); | 111 | UNIMPLEMENTED_MSG("Unhandled image instruction: {}", opcode->get().GetName()); |
| 73 | } | 112 | } |
| 74 | 113 | ||
| 75 | return pc; | 114 | return pc; |
| 76 | } | 115 | } |
| 77 | 116 | ||
| 78 | const Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) { | 117 | Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type, |
| 118 | std::optional<Tegra::Shader::ImageAtomicSize> size) { | ||
| 79 | const auto offset{static_cast<std::size_t>(image.index.Value())}; | 119 | const auto offset{static_cast<std::size_t>(image.index.Value())}; |
| 80 | 120 | if (const auto image = TryUseExistingImage(offset, type, size)) { | |
| 81 | // If this image has already been used, return the existing mapping. | 121 | return *image; |
| 82 | const auto itr{std::find_if(used_images.begin(), used_images.end(), | ||
| 83 | [=](const Image& entry) { return entry.GetOffset() == offset; })}; | ||
| 84 | if (itr != used_images.end()) { | ||
| 85 | ASSERT(itr->GetType() == type); | ||
| 86 | return *itr; | ||
| 87 | } | 122 | } |
| 88 | 123 | ||
| 89 | // Otherwise create a new mapping for this image. | ||
| 90 | const std::size_t next_index{used_images.size()}; | 124 | const std::size_t next_index{used_images.size()}; |
| 91 | const Image entry{offset, next_index, type}; | 125 | return used_images.emplace(offset, Image{offset, next_index, type, size}).first->second; |
| 92 | return *used_images.emplace(entry).first; | ||
| 93 | } | 126 | } |
| 94 | 127 | ||
| 95 | const Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, | 128 | Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type, |
| 96 | Tegra::Shader::ImageType type) { | 129 | std::optional<Tegra::Shader::ImageAtomicSize> size) { |
| 97 | const Node image_register{GetRegister(reg)}; | 130 | const Node image_register{GetRegister(reg)}; |
| 98 | const auto [base_image, cbuf_index, cbuf_offset]{ | 131 | const auto [base_image, cbuf_index, cbuf_offset]{ |
| 99 | TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))}; | 132 | TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))}; |
| 100 | const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)}; | 133 | const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)}; |
| 101 | 134 | ||
| 102 | // If this image has already been used, return the existing mapping. | 135 | if (const auto image = TryUseExistingImage(cbuf_key, type, size)) { |
| 103 | const auto itr{std::find_if(used_images.begin(), used_images.end(), | 136 | return *image; |
| 104 | [=](const Image& entry) { return entry.GetOffset() == cbuf_key; })}; | ||
| 105 | if (itr != used_images.end()) { | ||
| 106 | ASSERT(itr->GetType() == type); | ||
| 107 | return *itr; | ||
| 108 | } | 137 | } |
| 109 | 138 | ||
| 110 | // Otherwise create a new mapping for this image. | ||
| 111 | const std::size_t next_index{used_images.size()}; | 139 | const std::size_t next_index{used_images.size()}; |
| 112 | const Image entry{cbuf_index, cbuf_offset, next_index, type}; | 140 | return used_images.emplace(cbuf_key, Image{cbuf_index, cbuf_offset, next_index, type, size}) |
| 113 | return *used_images.emplace(entry).first; | 141 | .first->second; |
| 142 | } | ||
| 143 | |||
| 144 | Image* ShaderIR::TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type, | ||
| 145 | std::optional<Tegra::Shader::ImageAtomicSize> size) { | ||
| 146 | auto it = used_images.find(offset); | ||
| 147 | if (it == used_images.end()) { | ||
| 148 | return nullptr; | ||
| 149 | } | ||
| 150 | auto& image = it->second; | ||
| 151 | ASSERT(image.GetType() == type); | ||
| 152 | |||
| 153 | if (size) { | ||
| 154 | // We know the size, if it's known it has to be the same as before, otherwise we can set it. | ||
| 155 | if (image.IsSizeKnown()) { | ||
| 156 | ASSERT(image.GetSize() == size); | ||
| 157 | } else { | ||
| 158 | image.SetSize(*size); | ||
| 159 | } | ||
| 160 | } | ||
| 161 | return ℑ | ||
| 114 | } | 162 | } |
| 115 | 163 | ||
| 116 | } // namespace VideoCommon::Shader | 164 | } // namespace VideoCommon::Shader |
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index ed108bea8..7923d4d69 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp | |||
| @@ -35,7 +35,7 @@ u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) { | |||
| 35 | return 1; | 35 | return 1; |
| 36 | } | 36 | } |
| 37 | } | 37 | } |
| 38 | } // namespace | 38 | } // Anonymous namespace |
| 39 | 39 | ||
| 40 | u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | 40 | u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { |
| 41 | const Instruction instr = {program_code[pc]}; | 41 | const Instruction instr = {program_code[pc]}; |
| @@ -106,16 +106,17 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | |||
| 106 | } | 106 | } |
| 107 | break; | 107 | break; |
| 108 | } | 108 | } |
| 109 | case OpCode::Id::LD_L: { | 109 | case OpCode::Id::LD_L: |
| 110 | LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}", | 110 | LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}", static_cast<u64>(instr.ld_l.unknown)); |
| 111 | static_cast<u64>(instr.ld_l.unknown.Value())); | 111 | [[fallthrough]]; |
| 112 | 112 | case OpCode::Id::LD_S: { | |
| 113 | const auto GetLmem = [&](s32 offset) { | 113 | const auto GetMemory = [&](s32 offset) { |
| 114 | ASSERT(offset % 4 == 0); | 114 | ASSERT(offset % 4 == 0); |
| 115 | const Node immediate_offset = Immediate(static_cast<s32>(instr.smem_imm) + offset); | 115 | const Node immediate_offset = Immediate(static_cast<s32>(instr.smem_imm) + offset); |
| 116 | const Node address = Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), | 116 | const Node address = Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), |
| 117 | immediate_offset); | 117 | immediate_offset); |
| 118 | return GetLocalMemory(address); | 118 | return opcode->get().GetId() == OpCode::Id::LD_S ? GetSharedMemory(address) |
| 119 | : GetLocalMemory(address); | ||
| 119 | }; | 120 | }; |
| 120 | 121 | ||
| 121 | switch (instr.ldst_sl.type.Value()) { | 122 | switch (instr.ldst_sl.type.Value()) { |
| @@ -135,14 +136,16 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | |||
| 135 | return 0; | 136 | return 0; |
| 136 | } | 137 | } |
| 137 | }(); | 138 | }(); |
| 138 | for (u32 i = 0; i < count; ++i) | 139 | for (u32 i = 0; i < count; ++i) { |
| 139 | SetTemporary(bb, i, GetLmem(i * 4)); | 140 | SetTemporary(bb, i, GetMemory(i * 4)); |
| 140 | for (u32 i = 0; i < count; ++i) | 141 | } |
| 142 | for (u32 i = 0; i < count; ++i) { | ||
| 141 | SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); | 143 | SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); |
| 144 | } | ||
| 142 | break; | 145 | break; |
| 143 | } | 146 | } |
| 144 | default: | 147 | default: |
| 145 | UNIMPLEMENTED_MSG("LD_L Unhandled type: {}", | 148 | UNIMPLEMENTED_MSG("{} Unhandled type: {}", opcode->get().GetName(), |
| 146 | static_cast<u32>(instr.ldst_sl.type.Value())); | 149 | static_cast<u32>(instr.ldst_sl.type.Value())); |
| 147 | } | 150 | } |
| 148 | break; | 151 | break; |
| @@ -209,27 +212,34 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | |||
| 209 | 212 | ||
| 210 | break; | 213 | break; |
| 211 | } | 214 | } |
| 212 | case OpCode::Id::ST_L: { | 215 | case OpCode::Id::ST_L: |
| 213 | LOG_DEBUG(HW_GPU, "ST_L cache management mode: {}", | 216 | LOG_DEBUG(HW_GPU, "ST_L cache management mode: {}", |
| 214 | static_cast<u64>(instr.st_l.cache_management.Value())); | 217 | static_cast<u64>(instr.st_l.cache_management.Value())); |
| 215 | 218 | [[fallthrough]]; | |
| 216 | const auto GetLmemAddr = [&](s32 offset) { | 219 | case OpCode::Id::ST_S: { |
| 220 | const auto GetAddress = [&](s32 offset) { | ||
| 217 | ASSERT(offset % 4 == 0); | 221 | ASSERT(offset % 4 == 0); |
| 218 | const Node immediate = Immediate(static_cast<s32>(instr.smem_imm) + offset); | 222 | const Node immediate = Immediate(static_cast<s32>(instr.smem_imm) + offset); |
| 219 | return Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), immediate); | 223 | return Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), immediate); |
| 220 | }; | 224 | }; |
| 221 | 225 | ||
| 226 | const auto set_memory = opcode->get().GetId() == OpCode::Id::ST_L | ||
| 227 | ? &ShaderIR::SetLocalMemory | ||
| 228 | : &ShaderIR::SetSharedMemory; | ||
| 229 | |||
| 222 | switch (instr.ldst_sl.type.Value()) { | 230 | switch (instr.ldst_sl.type.Value()) { |
| 223 | case Tegra::Shader::StoreType::Bits128: | 231 | case Tegra::Shader::StoreType::Bits128: |
| 224 | SetLocalMemory(bb, GetLmemAddr(12), GetRegister(instr.gpr0.Value() + 3)); | 232 | (this->*set_memory)(bb, GetAddress(12), GetRegister(instr.gpr0.Value() + 3)); |
| 225 | SetLocalMemory(bb, GetLmemAddr(8), GetRegister(instr.gpr0.Value() + 2)); | 233 | (this->*set_memory)(bb, GetAddress(8), GetRegister(instr.gpr0.Value() + 2)); |
| 234 | [[fallthrough]]; | ||
| 226 | case Tegra::Shader::StoreType::Bits64: | 235 | case Tegra::Shader::StoreType::Bits64: |
| 227 | SetLocalMemory(bb, GetLmemAddr(4), GetRegister(instr.gpr0.Value() + 1)); | 236 | (this->*set_memory)(bb, GetAddress(4), GetRegister(instr.gpr0.Value() + 1)); |
| 237 | [[fallthrough]]; | ||
| 228 | case Tegra::Shader::StoreType::Bits32: | 238 | case Tegra::Shader::StoreType::Bits32: |
| 229 | SetLocalMemory(bb, GetLmemAddr(0), GetRegister(instr.gpr0)); | 239 | (this->*set_memory)(bb, GetAddress(0), GetRegister(instr.gpr0)); |
| 230 | break; | 240 | break; |
| 231 | default: | 241 | default: |
| 232 | UNIMPLEMENTED_MSG("ST_L Unhandled type: {}", | 242 | UNIMPLEMENTED_MSG("{} unhandled type: {}", opcode->get().GetName(), |
| 233 | static_cast<u32>(instr.ldst_sl.type.Value())); | 243 | static_cast<u32>(instr.ldst_sl.type.Value())); |
| 234 | } | 244 | } |
| 235 | break; | 245 | break; |
diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp index 2ac16eeb0..f6ee68a54 100644 --- a/src/video_core/shader/decode/shift.cpp +++ b/src/video_core/shader/decode/shift.cpp | |||
| @@ -17,8 +17,8 @@ u32 ShaderIR::DecodeShift(NodeBlock& bb, u32 pc) { | |||
| 17 | const Instruction instr = {program_code[pc]}; | 17 | const Instruction instr = {program_code[pc]}; |
| 18 | const auto opcode = OpCode::Decode(instr); | 18 | const auto opcode = OpCode::Decode(instr); |
| 19 | 19 | ||
| 20 | const Node op_a = GetRegister(instr.gpr8); | 20 | Node op_a = GetRegister(instr.gpr8); |
| 21 | const Node op_b = [&]() { | 21 | Node op_b = [&]() { |
| 22 | if (instr.is_b_imm) { | 22 | if (instr.is_b_imm) { |
| 23 | return Immediate(instr.alu.GetSignedImm20_20()); | 23 | return Immediate(instr.alu.GetSignedImm20_20()); |
| 24 | } else if (instr.is_b_gpr) { | 24 | } else if (instr.is_b_gpr) { |
| @@ -32,16 +32,23 @@ u32 ShaderIR::DecodeShift(NodeBlock& bb, u32 pc) { | |||
| 32 | case OpCode::Id::SHR_C: | 32 | case OpCode::Id::SHR_C: |
| 33 | case OpCode::Id::SHR_R: | 33 | case OpCode::Id::SHR_R: |
| 34 | case OpCode::Id::SHR_IMM: { | 34 | case OpCode::Id::SHR_IMM: { |
| 35 | const Node value = SignedOperation(OperationCode::IArithmeticShiftRight, | 35 | if (instr.shr.wrap) { |
| 36 | instr.shift.is_signed, PRECISE, op_a, op_b); | 36 | op_b = Operation(OperationCode::UBitwiseAnd, std::move(op_b), Immediate(0x1f)); |
| 37 | } else { | ||
| 38 | op_b = Operation(OperationCode::IMax, std::move(op_b), Immediate(0)); | ||
| 39 | op_b = Operation(OperationCode::IMin, std::move(op_b), Immediate(31)); | ||
| 40 | } | ||
| 41 | |||
| 42 | Node value = SignedOperation(OperationCode::IArithmeticShiftRight, instr.shift.is_signed, | ||
| 43 | std::move(op_a), std::move(op_b)); | ||
| 37 | SetInternalFlagsFromInteger(bb, value, instr.generates_cc); | 44 | SetInternalFlagsFromInteger(bb, value, instr.generates_cc); |
| 38 | SetRegister(bb, instr.gpr0, value); | 45 | SetRegister(bb, instr.gpr0, std::move(value)); |
| 39 | break; | 46 | break; |
| 40 | } | 47 | } |
| 41 | case OpCode::Id::SHL_C: | 48 | case OpCode::Id::SHL_C: |
| 42 | case OpCode::Id::SHL_R: | 49 | case OpCode::Id::SHL_R: |
| 43 | case OpCode::Id::SHL_IMM: { | 50 | case OpCode::Id::SHL_IMM: { |
| 44 | const Node value = Operation(OperationCode::ILogicalShiftLeft, PRECISE, op_a, op_b); | 51 | const Node value = Operation(OperationCode::ILogicalShiftLeft, op_a, op_b); |
| 45 | SetInternalFlagsFromInteger(bb, value, instr.generates_cc); | 52 | SetInternalFlagsFromInteger(bb, value, instr.generates_cc); |
| 46 | SetRegister(bb, instr.gpr0, value); | 53 | SetRegister(bb, instr.gpr0, value); |
| 47 | break; | 54 | break; |
diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp index 04ca74f46..a8e481b3c 100644 --- a/src/video_core/shader/decode/warp.cpp +++ b/src/video_core/shader/decode/warp.cpp | |||
| @@ -13,6 +13,7 @@ namespace VideoCommon::Shader { | |||
| 13 | using Tegra::Shader::Instruction; | 13 | using Tegra::Shader::Instruction; |
| 14 | using Tegra::Shader::OpCode; | 14 | using Tegra::Shader::OpCode; |
| 15 | using Tegra::Shader::Pred; | 15 | using Tegra::Shader::Pred; |
| 16 | using Tegra::Shader::ShuffleOperation; | ||
| 16 | using Tegra::Shader::VoteOperation; | 17 | using Tegra::Shader::VoteOperation; |
| 17 | 18 | ||
| 18 | namespace { | 19 | namespace { |
| @@ -44,6 +45,52 @@ u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) { | |||
| 44 | SetPredicate(bb, instr.vote.dest_pred, vote); | 45 | SetPredicate(bb, instr.vote.dest_pred, vote); |
| 45 | break; | 46 | break; |
| 46 | } | 47 | } |
| 48 | case OpCode::Id::SHFL: { | ||
| 49 | Node mask = instr.shfl.is_mask_imm ? Immediate(static_cast<u32>(instr.shfl.mask_imm)) | ||
| 50 | : GetRegister(instr.gpr39); | ||
| 51 | Node width = [&] { | ||
| 52 | // Convert the obscure SHFL mask back into GL_NV_shader_thread_shuffle's width. This has | ||
| 53 | // been done reversing Nvidia's math. It won't work on all cases due to SHFL having | ||
| 54 | // different parameters that don't properly map to GLSL's interface, but it should work | ||
| 55 | // for cases emitted by Nvidia's compiler. | ||
| 56 | if (instr.shfl.operation == ShuffleOperation::Up) { | ||
| 57 | return Operation( | ||
| 58 | OperationCode::ILogicalShiftRight, | ||
| 59 | Operation(OperationCode::IAdd, std::move(mask), Immediate(-0x2000)), | ||
| 60 | Immediate(8)); | ||
| 61 | } else { | ||
| 62 | return Operation(OperationCode::ILogicalShiftRight, | ||
| 63 | Operation(OperationCode::IAdd, Immediate(0x201F), | ||
| 64 | Operation(OperationCode::INegate, std::move(mask))), | ||
| 65 | Immediate(8)); | ||
| 66 | } | ||
| 67 | }(); | ||
| 68 | |||
| 69 | const auto [operation, in_range] = [instr]() -> std::pair<OperationCode, OperationCode> { | ||
| 70 | switch (instr.shfl.operation) { | ||
| 71 | case ShuffleOperation::Idx: | ||
| 72 | return {OperationCode::ShuffleIndexed, OperationCode::InRangeShuffleIndexed}; | ||
| 73 | case ShuffleOperation::Up: | ||
| 74 | return {OperationCode::ShuffleUp, OperationCode::InRangeShuffleUp}; | ||
| 75 | case ShuffleOperation::Down: | ||
| 76 | return {OperationCode::ShuffleDown, OperationCode::InRangeShuffleDown}; | ||
| 77 | case ShuffleOperation::Bfly: | ||
| 78 | return {OperationCode::ShuffleButterfly, OperationCode::InRangeShuffleButterfly}; | ||
| 79 | } | ||
| 80 | UNREACHABLE_MSG("Invalid SHFL operation: {}", | ||
| 81 | static_cast<u64>(instr.shfl.operation.Value())); | ||
| 82 | return {}; | ||
| 83 | }(); | ||
| 84 | |||
| 85 | // Setting the predicate before the register is intentional to avoid overwriting. | ||
| 86 | Node index = instr.shfl.is_index_imm ? Immediate(static_cast<u32>(instr.shfl.index_imm)) | ||
| 87 | : GetRegister(instr.gpr20); | ||
| 88 | SetPredicate(bb, instr.shfl.pred48, Operation(in_range, index, width)); | ||
| 89 | SetRegister( | ||
| 90 | bb, instr.gpr0, | ||
| 91 | Operation(operation, GetRegister(instr.gpr8), std::move(index), std::move(width))); | ||
| 92 | break; | ||
| 93 | } | ||
| 47 | default: | 94 | default: |
| 48 | UNIMPLEMENTED_MSG("Unhandled warp instruction: {}", opcode->get().GetName()); | 95 | UNIMPLEMENTED_MSG("Unhandled warp instruction: {}", opcode->get().GetName()); |
| 49 | break; | 96 | break; |
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index 5db9313c4..abf2cb1ab 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <cstddef> | 8 | #include <cstddef> |
| 9 | #include <memory> | 9 | #include <memory> |
| 10 | #include <optional> | ||
| 10 | #include <string> | 11 | #include <string> |
| 11 | #include <tuple> | 12 | #include <tuple> |
| 12 | #include <utility> | 13 | #include <utility> |
| @@ -148,7 +149,14 @@ enum class OperationCode { | |||
| 148 | TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4 | 149 | TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4 |
| 149 | TexelFetch, /// (MetaTexture, int[N], int) -> float4 | 150 | TexelFetch, /// (MetaTexture, int[N], int) -> float4 |
| 150 | 151 | ||
| 151 | ImageStore, /// (MetaImage, float[N] coords) -> void | 152 | ImageStore, /// (MetaImage, int[N] values) -> void |
| 153 | AtomicImageAdd, /// (MetaImage, int[N] coords) -> void | ||
| 154 | AtomicImageMin, /// (MetaImage, int[N] coords) -> void | ||
| 155 | AtomicImageMax, /// (MetaImage, int[N] coords) -> void | ||
| 156 | AtomicImageAnd, /// (MetaImage, int[N] coords) -> void | ||
| 157 | AtomicImageOr, /// (MetaImage, int[N] coords) -> void | ||
| 158 | AtomicImageXor, /// (MetaImage, int[N] coords) -> void | ||
| 159 | AtomicImageExchange, /// (MetaImage, int[N] coords) -> void | ||
| 152 | 160 | ||
| 153 | Branch, /// (uint branch_target) -> void | 161 | Branch, /// (uint branch_target) -> void |
| 154 | BranchIndirect, /// (uint branch_target) -> void | 162 | BranchIndirect, /// (uint branch_target) -> void |
| @@ -173,6 +181,16 @@ enum class OperationCode { | |||
| 173 | VoteAny, /// (bool) -> bool | 181 | VoteAny, /// (bool) -> bool |
| 174 | VoteEqual, /// (bool) -> bool | 182 | VoteEqual, /// (bool) -> bool |
| 175 | 183 | ||
| 184 | ShuffleIndexed, /// (uint value, uint index, uint width) -> uint | ||
| 185 | ShuffleUp, /// (uint value, uint index, uint width) -> uint | ||
| 186 | ShuffleDown, /// (uint value, uint index, uint width) -> uint | ||
| 187 | ShuffleButterfly, /// (uint value, uint index, uint width) -> uint | ||
| 188 | |||
| 189 | InRangeShuffleIndexed, /// (uint index, uint width) -> bool | ||
| 190 | InRangeShuffleUp, /// (uint index, uint width) -> bool | ||
| 191 | InRangeShuffleDown, /// (uint index, uint width) -> bool | ||
| 192 | InRangeShuffleButterfly, /// (uint index, uint width) -> bool | ||
| 193 | |||
| 176 | Amount, | 194 | Amount, |
| 177 | }; | 195 | }; |
| 178 | 196 | ||
| @@ -198,12 +216,13 @@ class PredicateNode; | |||
| 198 | class AbufNode; | 216 | class AbufNode; |
| 199 | class CbufNode; | 217 | class CbufNode; |
| 200 | class LmemNode; | 218 | class LmemNode; |
| 219 | class SmemNode; | ||
| 201 | class GmemNode; | 220 | class GmemNode; |
| 202 | class CommentNode; | 221 | class CommentNode; |
| 203 | 222 | ||
| 204 | using NodeData = | 223 | using NodeData = |
| 205 | std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, InternalFlagNode, | 224 | std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, InternalFlagNode, |
| 206 | PredicateNode, AbufNode, CbufNode, LmemNode, GmemNode, CommentNode>; | 225 | PredicateNode, AbufNode, CbufNode, LmemNode, SmemNode, GmemNode, CommentNode>; |
| 207 | using Node = std::shared_ptr<NodeData>; | 226 | using Node = std::shared_ptr<NodeData>; |
| 208 | using Node4 = std::array<Node, 4>; | 227 | using Node4 = std::array<Node, 4>; |
| 209 | using NodeBlock = std::vector<Node>; | 228 | using NodeBlock = std::vector<Node>; |
| @@ -273,46 +292,85 @@ private: | |||
| 273 | bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not. | 292 | bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not. |
| 274 | }; | 293 | }; |
| 275 | 294 | ||
| 276 | class Image { | 295 | class Image final { |
| 277 | public: | 296 | public: |
| 278 | explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type) | 297 | constexpr explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type, |
| 279 | : offset{offset}, index{index}, type{type}, is_bindless{false} {} | 298 | std::optional<Tegra::Shader::ImageAtomicSize> size) |
| 299 | : offset{offset}, index{index}, type{type}, is_bindless{false}, size{size} {} | ||
| 280 | 300 | ||
| 281 | explicit Image(u32 cbuf_index, u32 cbuf_offset, std::size_t index, | 301 | constexpr explicit Image(u32 cbuf_index, u32 cbuf_offset, std::size_t index, |
| 282 | Tegra::Shader::ImageType type) | 302 | Tegra::Shader::ImageType type, |
| 303 | std::optional<Tegra::Shader::ImageAtomicSize> size) | ||
| 283 | : offset{(static_cast<u64>(cbuf_index) << 32) | cbuf_offset}, index{index}, type{type}, | 304 | : offset{(static_cast<u64>(cbuf_index) << 32) | cbuf_offset}, index{index}, type{type}, |
| 284 | is_bindless{true} {} | 305 | is_bindless{true}, size{size} {} |
| 285 | 306 | ||
| 286 | explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type, | 307 | constexpr explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type, |
| 287 | bool is_bindless) | 308 | bool is_bindless, bool is_written, bool is_read, |
| 288 | : offset{offset}, index{index}, type{type}, is_bindless{is_bindless} {} | 309 | std::optional<Tegra::Shader::ImageAtomicSize> size) |
| 310 | : offset{offset}, index{index}, type{type}, is_bindless{is_bindless}, | ||
| 311 | is_written{is_written}, is_read{is_read}, size{size} {} | ||
| 289 | 312 | ||
| 290 | std::size_t GetOffset() const { | 313 | void MarkWrite() { |
| 314 | is_written = true; | ||
| 315 | } | ||
| 316 | |||
| 317 | void MarkRead() { | ||
| 318 | is_read = true; | ||
| 319 | } | ||
| 320 | |||
| 321 | void SetSize(Tegra::Shader::ImageAtomicSize size_) { | ||
| 322 | size = size_; | ||
| 323 | } | ||
| 324 | |||
| 325 | constexpr std::size_t GetOffset() const { | ||
| 291 | return offset; | 326 | return offset; |
| 292 | } | 327 | } |
| 293 | 328 | ||
| 294 | std::size_t GetIndex() const { | 329 | constexpr std::size_t GetIndex() const { |
| 295 | return index; | 330 | return index; |
| 296 | } | 331 | } |
| 297 | 332 | ||
| 298 | Tegra::Shader::ImageType GetType() const { | 333 | constexpr Tegra::Shader::ImageType GetType() const { |
| 299 | return type; | 334 | return type; |
| 300 | } | 335 | } |
| 301 | 336 | ||
| 302 | bool IsBindless() const { | 337 | constexpr bool IsBindless() const { |
| 303 | return is_bindless; | 338 | return is_bindless; |
| 304 | } | 339 | } |
| 305 | 340 | ||
| 306 | bool operator<(const Image& rhs) const { | 341 | constexpr bool IsWritten() const { |
| 307 | return std::tie(offset, index, type, is_bindless) < | 342 | return is_written; |
| 308 | std::tie(rhs.offset, rhs.index, rhs.type, rhs.is_bindless); | 343 | } |
| 344 | |||
| 345 | constexpr bool IsRead() const { | ||
| 346 | return is_read; | ||
| 347 | } | ||
| 348 | |||
| 349 | constexpr std::pair<u32, u32> GetBindlessCBuf() const { | ||
| 350 | return {static_cast<u32>(offset >> 32), static_cast<u32>(offset)}; | ||
| 351 | } | ||
| 352 | |||
| 353 | constexpr bool IsSizeKnown() const { | ||
| 354 | return size.has_value(); | ||
| 355 | } | ||
| 356 | |||
| 357 | constexpr Tegra::Shader::ImageAtomicSize GetSize() const { | ||
| 358 | return size.value(); | ||
| 359 | } | ||
| 360 | |||
| 361 | constexpr bool operator<(const Image& rhs) const { | ||
| 362 | return std::tie(offset, index, type, size, is_bindless) < | ||
| 363 | std::tie(rhs.offset, rhs.index, rhs.type, rhs.size, rhs.is_bindless); | ||
| 309 | } | 364 | } |
| 310 | 365 | ||
| 311 | private: | 366 | private: |
| 312 | std::size_t offset{}; | 367 | u64 offset{}; |
| 313 | std::size_t index{}; | 368 | std::size_t index{}; |
| 314 | Tegra::Shader::ImageType type{}; | 369 | Tegra::Shader::ImageType type{}; |
| 315 | bool is_bindless{}; | 370 | bool is_bindless{}; |
| 371 | bool is_written{}; | ||
| 372 | bool is_read{}; | ||
| 373 | std::optional<Tegra::Shader::ImageAtomicSize> size{}; | ||
| 316 | }; | 374 | }; |
| 317 | 375 | ||
| 318 | struct GlobalMemoryBase { | 376 | struct GlobalMemoryBase { |
| @@ -536,6 +594,19 @@ private: | |||
| 536 | Node address; | 594 | Node address; |
| 537 | }; | 595 | }; |
| 538 | 596 | ||
| 597 | /// Shared memory node | ||
| 598 | class SmemNode final { | ||
| 599 | public: | ||
| 600 | explicit SmemNode(Node address) : address{std::move(address)} {} | ||
| 601 | |||
| 602 | const Node& GetAddress() const { | ||
| 603 | return address; | ||
| 604 | } | ||
| 605 | |||
| 606 | private: | ||
| 607 | Node address; | ||
| 608 | }; | ||
| 609 | |||
| 539 | /// Global memory node | 610 | /// Global memory node |
| 540 | class GmemNode final { | 611 | class GmemNode final { |
| 541 | public: | 612 | public: |
diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp index 1e5c7f660..bbbab0bca 100644 --- a/src/video_core/shader/shader_ir.cpp +++ b/src/video_core/shader/shader_ir.cpp | |||
| @@ -137,6 +137,10 @@ Node ShaderIR::GetLocalMemory(Node address) { | |||
| 137 | return MakeNode<LmemNode>(std::move(address)); | 137 | return MakeNode<LmemNode>(std::move(address)); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | Node ShaderIR::GetSharedMemory(Node address) { | ||
| 141 | return MakeNode<SmemNode>(std::move(address)); | ||
| 142 | } | ||
| 143 | |||
| 140 | Node ShaderIR::GetTemporary(u32 id) { | 144 | Node ShaderIR::GetTemporary(u32 id) { |
| 141 | return GetRegister(Register::ZeroIndex + 1 + id); | 145 | return GetRegister(Register::ZeroIndex + 1 + id); |
| 142 | } | 146 | } |
| @@ -378,6 +382,11 @@ void ShaderIR::SetLocalMemory(NodeBlock& bb, Node address, Node value) { | |||
| 378 | Operation(OperationCode::Assign, GetLocalMemory(std::move(address)), std::move(value))); | 382 | Operation(OperationCode::Assign, GetLocalMemory(std::move(address)), std::move(value))); |
| 379 | } | 383 | } |
| 380 | 384 | ||
| 385 | void ShaderIR::SetSharedMemory(NodeBlock& bb, Node address, Node value) { | ||
| 386 | bb.push_back( | ||
| 387 | Operation(OperationCode::Assign, GetSharedMemory(std::move(address)), std::move(value))); | ||
| 388 | } | ||
| 389 | |||
| 381 | void ShaderIR::SetTemporary(NodeBlock& bb, u32 id, Node value) { | 390 | void ShaderIR::SetTemporary(NodeBlock& bb, u32 id, Node value) { |
| 382 | SetRegister(bb, Register::ZeroIndex + 1 + id, std::move(value)); | 391 | SetRegister(bb, Register::ZeroIndex + 1 + id, std::move(value)); |
| 383 | } | 392 | } |
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index bcc9b79b6..6aed9bb84 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h | |||
| @@ -95,7 +95,7 @@ public: | |||
| 95 | return used_samplers; | 95 | return used_samplers; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | const std::set<Image>& GetImages() const { | 98 | const std::map<u64, Image>& GetImages() const { |
| 99 | return used_images; | 99 | return used_images; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| @@ -208,6 +208,8 @@ private: | |||
| 208 | Node GetInternalFlag(InternalFlag flag, bool negated = false); | 208 | Node GetInternalFlag(InternalFlag flag, bool negated = false); |
| 209 | /// Generates a node representing a local memory address | 209 | /// Generates a node representing a local memory address |
| 210 | Node GetLocalMemory(Node address); | 210 | Node GetLocalMemory(Node address); |
| 211 | /// Generates a node representing a shared memory address | ||
| 212 | Node GetSharedMemory(Node address); | ||
| 211 | /// Generates a temporary, internally it uses a post-RZ register | 213 | /// Generates a temporary, internally it uses a post-RZ register |
| 212 | Node GetTemporary(u32 id); | 214 | Node GetTemporary(u32 id); |
| 213 | 215 | ||
| @@ -217,8 +219,10 @@ private: | |||
| 217 | void SetPredicate(NodeBlock& bb, u64 dest, Node src); | 219 | void SetPredicate(NodeBlock& bb, u64 dest, Node src); |
| 218 | /// Sets an internal flag. src value must be a bool-evaluated node | 220 | /// Sets an internal flag. src value must be a bool-evaluated node |
| 219 | void SetInternalFlag(NodeBlock& bb, InternalFlag flag, Node value); | 221 | void SetInternalFlag(NodeBlock& bb, InternalFlag flag, Node value); |
| 220 | /// Sets a local memory address. address and value must be a number-evaluated node | 222 | /// Sets a local memory address with a value. |
| 221 | void SetLocalMemory(NodeBlock& bb, Node address, Node value); | 223 | void SetLocalMemory(NodeBlock& bb, Node address, Node value); |
| 224 | /// Sets a shared memory address with a value. | ||
| 225 | void SetSharedMemory(NodeBlock& bb, Node address, Node value); | ||
| 222 | /// Sets a temporary. Internally it uses a post-RZ register | 226 | /// Sets a temporary. Internally it uses a post-RZ register |
| 223 | void SetTemporary(NodeBlock& bb, u32 id, Node value); | 227 | void SetTemporary(NodeBlock& bb, u32 id, Node value); |
| 224 | 228 | ||
| @@ -272,10 +276,16 @@ private: | |||
| 272 | bool is_shadow); | 276 | bool is_shadow); |
| 273 | 277 | ||
| 274 | /// Accesses an image. | 278 | /// Accesses an image. |
| 275 | const Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type); | 279 | Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type, |
| 280 | std::optional<Tegra::Shader::ImageAtomicSize> size = {}); | ||
| 276 | 281 | ||
| 277 | /// Access a bindless image sampler. | 282 | /// Access a bindless image sampler. |
| 278 | const Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type); | 283 | Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type, |
| 284 | std::optional<Tegra::Shader::ImageAtomicSize> size = {}); | ||
| 285 | |||
| 286 | /// Tries to access an existing image, updating it's state as needed | ||
| 287 | Image* TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type, | ||
| 288 | std::optional<Tegra::Shader::ImageAtomicSize> size); | ||
| 279 | 289 | ||
| 280 | /// Extracts a sequence of bits from a node | 290 | /// Extracts a sequence of bits from a node |
| 281 | Node BitfieldExtract(Node value, u32 offset, u32 bits); | 291 | Node BitfieldExtract(Node value, u32 offset, u32 bits); |
| @@ -356,7 +366,7 @@ private: | |||
| 356 | std::set<Tegra::Shader::Attribute::Index> used_output_attributes; | 366 | std::set<Tegra::Shader::Attribute::Index> used_output_attributes; |
| 357 | std::map<u32, ConstBuffer> used_cbufs; | 367 | std::map<u32, ConstBuffer> used_cbufs; |
| 358 | std::set<Sampler> used_samplers; | 368 | std::set<Sampler> used_samplers; |
| 359 | std::set<Image> used_images; | 369 | std::map<u64, Image> used_images; |
| 360 | std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{}; | 370 | std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{}; |
| 361 | std::map<GlobalMemoryBase, GlobalMemoryUsage> used_global_memory; | 371 | std::map<GlobalMemoryBase, GlobalMemoryUsage> used_global_memory; |
| 362 | bool uses_layer{}; | 372 | bool uses_layer{}; |
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 4ceb219be..53d0142cb 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp | |||
| @@ -513,6 +513,26 @@ bool IsPixelFormatASTC(PixelFormat format) { | |||
| 513 | } | 513 | } |
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | bool IsPixelFormatSRGB(PixelFormat format) { | ||
| 517 | switch (format) { | ||
| 518 | case PixelFormat::RGBA8_SRGB: | ||
| 519 | case PixelFormat::BGRA8_SRGB: | ||
| 520 | case PixelFormat::DXT1_SRGB: | ||
| 521 | case PixelFormat::DXT23_SRGB: | ||
| 522 | case PixelFormat::DXT45_SRGB: | ||
| 523 | case PixelFormat::BC7U_SRGB: | ||
| 524 | case PixelFormat::ASTC_2D_4X4_SRGB: | ||
| 525 | case PixelFormat::ASTC_2D_8X8_SRGB: | ||
| 526 | case PixelFormat::ASTC_2D_8X5_SRGB: | ||
| 527 | case PixelFormat::ASTC_2D_5X4_SRGB: | ||
| 528 | case PixelFormat::ASTC_2D_5X5_SRGB: | ||
| 529 | case PixelFormat::ASTC_2D_10X8_SRGB: | ||
| 530 | return true; | ||
| 531 | default: | ||
| 532 | return false; | ||
| 533 | } | ||
| 534 | } | ||
| 535 | |||
| 516 | std::pair<u32, u32> GetASTCBlockSize(PixelFormat format) { | 536 | std::pair<u32, u32> GetASTCBlockSize(PixelFormat format) { |
| 517 | return {GetDefaultBlockWidth(format), GetDefaultBlockHeight(format)}; | 537 | return {GetDefaultBlockWidth(format), GetDefaultBlockHeight(format)}; |
| 518 | } | 538 | } |
diff --git a/src/video_core/surface.h b/src/video_core/surface.h index 83f31c12c..19268b7cd 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h | |||
| @@ -547,6 +547,8 @@ SurfaceType GetFormatType(PixelFormat pixel_format); | |||
| 547 | 547 | ||
| 548 | bool IsPixelFormatASTC(PixelFormat format); | 548 | bool IsPixelFormatASTC(PixelFormat format); |
| 549 | 549 | ||
| 550 | bool IsPixelFormatSRGB(PixelFormat format); | ||
| 551 | |||
| 550 | std::pair<u32, u32> GetASTCBlockSize(PixelFormat format); | 552 | std::pair<u32, u32> GetASTCBlockSize(PixelFormat format); |
| 551 | 553 | ||
| 552 | /// Returns true if the specified PixelFormat is a BCn format, e.g. DXT or DXN | 554 | /// Returns true if the specified PixelFormat is a BCn format, e.g. DXT or DXN |
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index bcce8d863..5e497e49f 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h | |||
| @@ -195,18 +195,18 @@ public: | |||
| 195 | 195 | ||
| 196 | virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0; | 196 | virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0; |
| 197 | 197 | ||
| 198 | void MarkAsModified(const bool is_modified_, const u64 tick) { | 198 | void MarkAsModified(bool is_modified_, u64 tick) { |
| 199 | is_modified = is_modified_ || is_target; | 199 | is_modified = is_modified_ || is_target; |
| 200 | modification_tick = tick; | 200 | modification_tick = tick; |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | void MarkAsRenderTarget(const bool is_target, const u32 index) { | 203 | void MarkAsRenderTarget(bool is_target_, u32 index_) { |
| 204 | this->is_target = is_target; | 204 | is_target = is_target_; |
| 205 | this->index = index; | 205 | index = index_; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | void MarkAsPicked(const bool is_picked) { | 208 | void MarkAsPicked(bool is_picked_) { |
| 209 | this->is_picked = is_picked; | 209 | is_picked = is_picked_; |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | bool IsModified() const { | 212 | bool IsModified() const { |
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index fd5472451..1e4d3fb79 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp | |||
| @@ -24,55 +24,62 @@ using VideoCore::Surface::SurfaceTarget; | |||
| 24 | using VideoCore::Surface::SurfaceTargetFromTextureType; | 24 | using VideoCore::Surface::SurfaceTargetFromTextureType; |
| 25 | using VideoCore::Surface::SurfaceType; | 25 | using VideoCore::Surface::SurfaceType; |
| 26 | 26 | ||
| 27 | SurfaceTarget TextureType2SurfaceTarget(Tegra::Shader::TextureType type, bool is_array) { | 27 | namespace { |
| 28 | |||
| 29 | SurfaceTarget TextureTypeToSurfaceTarget(Tegra::Shader::TextureType type, bool is_array) { | ||
| 28 | switch (type) { | 30 | switch (type) { |
| 29 | case Tegra::Shader::TextureType::Texture1D: { | 31 | case Tegra::Shader::TextureType::Texture1D: |
| 30 | if (is_array) | 32 | return is_array ? SurfaceTarget::Texture1DArray : SurfaceTarget::Texture1D; |
| 31 | return SurfaceTarget::Texture1DArray; | 33 | case Tegra::Shader::TextureType::Texture2D: |
| 32 | else | 34 | return is_array ? SurfaceTarget::Texture2DArray : SurfaceTarget::Texture2D; |
| 33 | return SurfaceTarget::Texture1D; | 35 | case Tegra::Shader::TextureType::Texture3D: |
| 34 | } | ||
| 35 | case Tegra::Shader::TextureType::Texture2D: { | ||
| 36 | if (is_array) | ||
| 37 | return SurfaceTarget::Texture2DArray; | ||
| 38 | else | ||
| 39 | return SurfaceTarget::Texture2D; | ||
| 40 | } | ||
| 41 | case Tegra::Shader::TextureType::Texture3D: { | ||
| 42 | ASSERT(!is_array); | 36 | ASSERT(!is_array); |
| 43 | return SurfaceTarget::Texture3D; | 37 | return SurfaceTarget::Texture3D; |
| 44 | } | 38 | case Tegra::Shader::TextureType::TextureCube: |
| 45 | case Tegra::Shader::TextureType::TextureCube: { | 39 | return is_array ? SurfaceTarget::TextureCubeArray : SurfaceTarget::TextureCubemap; |
| 46 | if (is_array) | 40 | default: |
| 47 | return SurfaceTarget::TextureCubeArray; | ||
| 48 | else | ||
| 49 | return SurfaceTarget::TextureCubemap; | ||
| 50 | } | ||
| 51 | default: { | ||
| 52 | UNREACHABLE(); | 41 | UNREACHABLE(); |
| 53 | return SurfaceTarget::Texture2D; | 42 | return SurfaceTarget::Texture2D; |
| 54 | } | 43 | } |
| 44 | } | ||
| 45 | |||
| 46 | SurfaceTarget ImageTypeToSurfaceTarget(Tegra::Shader::ImageType type) { | ||
| 47 | switch (type) { | ||
| 48 | case Tegra::Shader::ImageType::Texture1D: | ||
| 49 | return SurfaceTarget::Texture1D; | ||
| 50 | case Tegra::Shader::ImageType::TextureBuffer: | ||
| 51 | return SurfaceTarget::TextureBuffer; | ||
| 52 | case Tegra::Shader::ImageType::Texture1DArray: | ||
| 53 | return SurfaceTarget::Texture1DArray; | ||
| 54 | case Tegra::Shader::ImageType::Texture2D: | ||
| 55 | return SurfaceTarget::Texture2D; | ||
| 56 | case Tegra::Shader::ImageType::Texture2DArray: | ||
| 57 | return SurfaceTarget::Texture2DArray; | ||
| 58 | case Tegra::Shader::ImageType::Texture3D: | ||
| 59 | return SurfaceTarget::Texture3D; | ||
| 60 | default: | ||
| 61 | UNREACHABLE(); | ||
| 62 | return SurfaceTarget::Texture2D; | ||
| 55 | } | 63 | } |
| 56 | } | 64 | } |
| 57 | 65 | ||
| 58 | namespace { | ||
| 59 | constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) { | 66 | constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) { |
| 60 | return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile); | 67 | return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile); |
| 61 | } | 68 | } |
| 69 | |||
| 62 | } // Anonymous namespace | 70 | } // Anonymous namespace |
| 63 | 71 | ||
| 64 | SurfaceParams SurfaceParams::CreateForTexture(Core::System& system, | 72 | SurfaceParams SurfaceParams::CreateForTexture(const Tegra::Texture::TICEntry& tic, |
| 65 | const Tegra::Texture::FullTextureInfo& config, | ||
| 66 | const VideoCommon::Shader::Sampler& entry) { | 73 | const VideoCommon::Shader::Sampler& entry) { |
| 67 | SurfaceParams params; | 74 | SurfaceParams params; |
| 68 | params.is_tiled = config.tic.IsTiled(); | 75 | params.is_tiled = tic.IsTiled(); |
| 69 | params.srgb_conversion = config.tic.IsSrgbConversionEnabled(); | 76 | params.srgb_conversion = tic.IsSrgbConversionEnabled(); |
| 70 | params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0, | 77 | params.block_width = params.is_tiled ? tic.BlockWidth() : 0, |
| 71 | params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0, | 78 | params.block_height = params.is_tiled ? tic.BlockHeight() : 0, |
| 72 | params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0, | 79 | params.block_depth = params.is_tiled ? tic.BlockDepth() : 0, |
| 73 | params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1; | 80 | params.tile_width_spacing = params.is_tiled ? (1 << tic.tile_width_spacing.Value()) : 1; |
| 74 | params.pixel_format = PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(), | 81 | params.pixel_format = |
| 75 | params.srgb_conversion); | 82 | PixelFormatFromTextureFormat(tic.format, tic.r_type.Value(), params.srgb_conversion); |
| 76 | params.type = GetFormatType(params.pixel_format); | 83 | params.type = GetFormatType(params.pixel_format); |
| 77 | if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) { | 84 | if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) { |
| 78 | switch (params.pixel_format) { | 85 | switch (params.pixel_format) { |
| @@ -92,31 +99,72 @@ SurfaceParams SurfaceParams::CreateForTexture(Core::System& system, | |||
| 92 | } | 99 | } |
| 93 | params.type = GetFormatType(params.pixel_format); | 100 | params.type = GetFormatType(params.pixel_format); |
| 94 | } | 101 | } |
| 95 | params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value()); | 102 | params.component_type = ComponentTypeFromTexture(tic.r_type.Value()); |
| 96 | params.type = GetFormatType(params.pixel_format); | 103 | params.type = GetFormatType(params.pixel_format); |
| 97 | // TODO: on 1DBuffer we should use the tic info. | 104 | // TODO: on 1DBuffer we should use the tic info. |
| 98 | if (!config.tic.IsBuffer()) { | 105 | if (tic.IsBuffer()) { |
| 99 | params.target = TextureType2SurfaceTarget(entry.GetType(), entry.IsArray()); | 106 | params.target = SurfaceTarget::TextureBuffer; |
| 100 | params.width = config.tic.Width(); | 107 | params.width = tic.Width(); |
| 101 | params.height = config.tic.Height(); | 108 | params.pitch = params.width * params.GetBytesPerPixel(); |
| 102 | params.depth = config.tic.Depth(); | 109 | params.height = 1; |
| 103 | params.pitch = params.is_tiled ? 0 : config.tic.Pitch(); | 110 | params.depth = 1; |
| 111 | params.num_levels = 1; | ||
| 112 | params.emulated_levels = 1; | ||
| 113 | params.is_layered = false; | ||
| 114 | } else { | ||
| 115 | params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); | ||
| 116 | params.width = tic.Width(); | ||
| 117 | params.height = tic.Height(); | ||
| 118 | params.depth = tic.Depth(); | ||
| 119 | params.pitch = params.is_tiled ? 0 : tic.Pitch(); | ||
| 104 | if (params.target == SurfaceTarget::TextureCubemap || | 120 | if (params.target == SurfaceTarget::TextureCubemap || |
| 105 | params.target == SurfaceTarget::TextureCubeArray) { | 121 | params.target == SurfaceTarget::TextureCubeArray) { |
| 106 | params.depth *= 6; | 122 | params.depth *= 6; |
| 107 | } | 123 | } |
| 108 | params.num_levels = config.tic.max_mip_level + 1; | 124 | params.num_levels = tic.max_mip_level + 1; |
| 109 | params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap()); | 125 | params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap()); |
| 110 | params.is_layered = params.IsLayered(); | 126 | params.is_layered = params.IsLayered(); |
| 111 | } else { | 127 | } |
| 128 | return params; | ||
| 129 | } | ||
| 130 | |||
| 131 | SurfaceParams SurfaceParams::CreateForImage(const Tegra::Texture::TICEntry& tic, | ||
| 132 | const VideoCommon::Shader::Image& entry) { | ||
| 133 | SurfaceParams params; | ||
| 134 | params.is_tiled = tic.IsTiled(); | ||
| 135 | params.srgb_conversion = tic.IsSrgbConversionEnabled(); | ||
| 136 | params.block_width = params.is_tiled ? tic.BlockWidth() : 0, | ||
| 137 | params.block_height = params.is_tiled ? tic.BlockHeight() : 0, | ||
| 138 | params.block_depth = params.is_tiled ? tic.BlockDepth() : 0, | ||
| 139 | params.tile_width_spacing = params.is_tiled ? (1 << tic.tile_width_spacing.Value()) : 1; | ||
| 140 | params.pixel_format = | ||
| 141 | PixelFormatFromTextureFormat(tic.format, tic.r_type.Value(), params.srgb_conversion); | ||
| 142 | params.type = GetFormatType(params.pixel_format); | ||
| 143 | params.component_type = ComponentTypeFromTexture(tic.r_type.Value()); | ||
| 144 | params.type = GetFormatType(params.pixel_format); | ||
| 145 | params.target = ImageTypeToSurfaceTarget(entry.GetType()); | ||
| 146 | // TODO: on 1DBuffer we should use the tic info. | ||
| 147 | if (tic.IsBuffer()) { | ||
| 112 | params.target = SurfaceTarget::TextureBuffer; | 148 | params.target = SurfaceTarget::TextureBuffer; |
| 113 | params.width = config.tic.Width(); | 149 | params.width = tic.Width(); |
| 114 | params.pitch = params.width * params.GetBytesPerPixel(); | 150 | params.pitch = params.width * params.GetBytesPerPixel(); |
| 115 | params.height = 1; | 151 | params.height = 1; |
| 116 | params.depth = 1; | 152 | params.depth = 1; |
| 117 | params.num_levels = 1; | 153 | params.num_levels = 1; |
| 118 | params.emulated_levels = 1; | 154 | params.emulated_levels = 1; |
| 119 | params.is_layered = false; | 155 | params.is_layered = false; |
| 156 | } else { | ||
| 157 | params.width = tic.Width(); | ||
| 158 | params.height = tic.Height(); | ||
| 159 | params.depth = tic.Depth(); | ||
| 160 | params.pitch = params.is_tiled ? 0 : tic.Pitch(); | ||
| 161 | if (params.target == SurfaceTarget::TextureCubemap || | ||
| 162 | params.target == SurfaceTarget::TextureCubeArray) { | ||
| 163 | params.depth *= 6; | ||
| 164 | } | ||
| 165 | params.num_levels = tic.max_mip_level + 1; | ||
| 166 | params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap()); | ||
| 167 | params.is_layered = params.IsLayered(); | ||
| 120 | } | 168 | } |
| 121 | return params; | 169 | return params; |
| 122 | } | 170 | } |
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index e7ef66ee2..c58e7f8a4 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h | |||
| @@ -4,8 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <map> | ||
| 8 | |||
| 9 | #include "common/alignment.h" | 7 | #include "common/alignment.h" |
| 10 | #include "common/bit_util.h" | 8 | #include "common/bit_util.h" |
| 11 | #include "common/cityhash.h" | 9 | #include "common/cityhash.h" |
| @@ -23,10 +21,13 @@ using VideoCore::Surface::SurfaceCompression; | |||
| 23 | class SurfaceParams { | 21 | class SurfaceParams { |
| 24 | public: | 22 | public: |
| 25 | /// Creates SurfaceCachedParams from a texture configuration. | 23 | /// Creates SurfaceCachedParams from a texture configuration. |
| 26 | static SurfaceParams CreateForTexture(Core::System& system, | 24 | static SurfaceParams CreateForTexture(const Tegra::Texture::TICEntry& tic, |
| 27 | const Tegra::Texture::FullTextureInfo& config, | ||
| 28 | const VideoCommon::Shader::Sampler& entry); | 25 | const VideoCommon::Shader::Sampler& entry); |
| 29 | 26 | ||
| 27 | /// Creates SurfaceCachedParams from an image configuration. | ||
| 28 | static SurfaceParams CreateForImage(const Tegra::Texture::TICEntry& tic, | ||
| 29 | const VideoCommon::Shader::Image& entry); | ||
| 30 | |||
| 30 | /// Creates SurfaceCachedParams for a depth buffer configuration. | 31 | /// Creates SurfaceCachedParams for a depth buffer configuration. |
| 31 | static SurfaceParams CreateForDepthBuffer( | 32 | static SurfaceParams CreateForDepthBuffer( |
| 32 | Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format, | 33 | Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format, |
diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp index 467696a4c..57a1f5803 100644 --- a/src/video_core/texture_cache/surface_view.cpp +++ b/src/video_core/texture_cache/surface_view.cpp | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | namespace VideoCommon { | 10 | namespace VideoCommon { |
| 11 | 11 | ||
| 12 | std::size_t ViewParams::Hash() const { | 12 | std::size_t ViewParams::Hash() const { |
| 13 | return static_cast<std::size_t>(base_layer) ^ static_cast<std::size_t>(num_layers << 16) ^ | 13 | return static_cast<std::size_t>(base_layer) ^ (static_cast<std::size_t>(num_layers) << 16) ^ |
| 14 | (static_cast<std::size_t>(base_level) << 24) ^ | 14 | (static_cast<std::size_t>(base_level) << 24) ^ |
| 15 | (static_cast<std::size_t>(num_levels) << 32) ^ (static_cast<std::size_t>(target) << 36); | 15 | (static_cast<std::size_t>(num_levels) << 32) ^ (static_cast<std::size_t>(target) << 36); |
| 16 | } | 16 | } |
diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h index 04ca5639b..b17fd11a9 100644 --- a/src/video_core/texture_cache/surface_view.h +++ b/src/video_core/texture_cache/surface_view.h | |||
| @@ -13,8 +13,8 @@ | |||
| 13 | namespace VideoCommon { | 13 | namespace VideoCommon { |
| 14 | 14 | ||
| 15 | struct ViewParams { | 15 | struct ViewParams { |
| 16 | ViewParams(VideoCore::Surface::SurfaceTarget target, u32 base_layer, u32 num_layers, | 16 | constexpr explicit ViewParams(VideoCore::Surface::SurfaceTarget target, u32 base_layer, |
| 17 | u32 base_level, u32 num_levels) | 17 | u32 num_layers, u32 base_level, u32 num_levels) |
| 18 | : target{target}, base_layer{base_layer}, num_layers{num_layers}, base_level{base_level}, | 18 | : target{target}, base_layer{base_layer}, num_layers{num_layers}, base_level{base_level}, |
| 19 | num_levels{num_levels} {} | 19 | num_levels{num_levels} {} |
| 20 | 20 | ||
| @@ -22,12 +22,6 @@ struct ViewParams { | |||
| 22 | 22 | ||
| 23 | bool operator==(const ViewParams& rhs) const; | 23 | bool operator==(const ViewParams& rhs) const; |
| 24 | 24 | ||
| 25 | VideoCore::Surface::SurfaceTarget target{}; | ||
| 26 | u32 base_layer{}; | ||
| 27 | u32 num_layers{}; | ||
| 28 | u32 base_level{}; | ||
| 29 | u32 num_levels{}; | ||
| 30 | |||
| 31 | bool IsLayered() const { | 25 | bool IsLayered() const { |
| 32 | switch (target) { | 26 | switch (target) { |
| 33 | case VideoCore::Surface::SurfaceTarget::Texture1DArray: | 27 | case VideoCore::Surface::SurfaceTarget::Texture1DArray: |
| @@ -39,13 +33,19 @@ struct ViewParams { | |||
| 39 | return false; | 33 | return false; |
| 40 | } | 34 | } |
| 41 | } | 35 | } |
| 36 | |||
| 37 | VideoCore::Surface::SurfaceTarget target{}; | ||
| 38 | u32 base_layer{}; | ||
| 39 | u32 num_layers{}; | ||
| 40 | u32 base_level{}; | ||
| 41 | u32 num_levels{}; | ||
| 42 | }; | 42 | }; |
| 43 | 43 | ||
| 44 | class ViewBase { | 44 | class ViewBase { |
| 45 | public: | 45 | public: |
| 46 | ViewBase(const ViewParams& params) : params{params} {} | 46 | constexpr explicit ViewBase(const ViewParams& params) : params{params} {} |
| 47 | 47 | ||
| 48 | const ViewParams& GetViewParams() const { | 48 | constexpr const ViewParams& GetViewParams() const { |
| 49 | return params; | 49 | return params; |
| 50 | } | 50 | } |
| 51 | 51 | ||
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 2ec0203d1..877c6635d 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -89,14 +89,29 @@ public: | |||
| 89 | } | 89 | } |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | TView GetTextureSurface(const Tegra::Texture::FullTextureInfo& config, | 92 | TView GetTextureSurface(const Tegra::Texture::TICEntry& tic, |
| 93 | const VideoCommon::Shader::Sampler& entry) { | 93 | const VideoCommon::Shader::Sampler& entry) { |
| 94 | std::lock_guard lock{mutex}; | 94 | std::lock_guard lock{mutex}; |
| 95 | const auto gpu_addr{config.tic.Address()}; | 95 | const auto gpu_addr{tic.Address()}; |
| 96 | if (!gpu_addr) { | 96 | if (!gpu_addr) { |
| 97 | return {}; | 97 | return {}; |
| 98 | } | 98 | } |
| 99 | const auto params{SurfaceParams::CreateForTexture(system, config, entry)}; | 99 | const auto params{SurfaceParams::CreateForTexture(tic, entry)}; |
| 100 | const auto [surface, view] = GetSurface(gpu_addr, params, true, false); | ||
| 101 | if (guard_samplers) { | ||
| 102 | sampled_textures.push_back(surface); | ||
| 103 | } | ||
| 104 | return view; | ||
| 105 | } | ||
| 106 | |||
| 107 | TView GetImageSurface(const Tegra::Texture::TICEntry& tic, | ||
| 108 | const VideoCommon::Shader::Image& entry) { | ||
| 109 | std::lock_guard lock{mutex}; | ||
| 110 | const auto gpu_addr{tic.Address()}; | ||
| 111 | if (!gpu_addr) { | ||
| 112 | return {}; | ||
| 113 | } | ||
| 114 | const auto params{SurfaceParams::CreateForImage(tic, entry)}; | ||
| 100 | const auto [surface, view] = GetSurface(gpu_addr, params, true, false); | 115 | const auto [surface, view] = GetSurface(gpu_addr, params, true, false); |
| 101 | if (guard_samplers) { | 116 | if (guard_samplers) { |
| 102 | sampled_textures.push_back(surface); | 117 | sampled_textures.push_back(surface); |
diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp index 7613197f2..f2977719c 100644 --- a/src/yuzu/configuration/configure_input.cpp +++ b/src/yuzu/configuration/configure_input.cpp | |||
| @@ -182,6 +182,8 @@ void ConfigureInput::UpdateUIEnabled() { | |||
| 182 | players_configure[i]->setEnabled(players_controller[i]->currentIndex() != 0); | 182 | players_configure[i]->setEnabled(players_controller[i]->currentIndex() != 0); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | ui->handheld_connected->setChecked(ui->handheld_connected->isChecked() && | ||
| 186 | !ui->use_docked_mode->isChecked()); | ||
| 185 | ui->handheld_connected->setEnabled(!ui->use_docked_mode->isChecked()); | 187 | ui->handheld_connected->setEnabled(!ui->use_docked_mode->isChecked()); |
| 186 | ui->handheld_configure->setEnabled(ui->handheld_connected->isChecked() && | 188 | ui->handheld_configure->setEnabled(ui->handheld_connected->isChecked() && |
| 187 | !ui->use_docked_mode->isChecked()); | 189 | !ui->use_docked_mode->isChecked()); |