summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/video_core/engines/kepler_compute.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp9
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp19
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp6
8 files changed, 16 insertions, 33 deletions
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 1ecd65925..368c75a66 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -119,14 +119,6 @@ Texture::TICEntry KeplerCompute::GetTICEntry(u32 tic_index) const {
119 Texture::TICEntry tic_entry; 119 Texture::TICEntry tic_entry;
120 memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry)); 120 memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
121 121
122 const auto r_type{tic_entry.r_type.Value()};
123 const auto g_type{tic_entry.g_type.Value()};
124 const auto b_type{tic_entry.b_type.Value()};
125 const auto a_type{tic_entry.a_type.Value()};
126
127 // TODO(Subv): Different data types for separate components are not supported
128 DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
129
130 return tic_entry; 122 return tic_entry;
131} 123}
132 124
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 1af4268a4..063f41327 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -93,10 +93,6 @@ void oglEnable(GLenum cap, bool state) {
93 (state ? glEnable : glDisable)(cap); 93 (state ? glEnable : glDisable)(cap);
94} 94}
95 95
96void oglEnablei(GLenum cap, bool state, GLuint index) {
97 (state ? glEnablei : glDisablei)(cap, index);
98}
99
100} // Anonymous namespace 96} // Anonymous namespace
101 97
102RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, 98RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window,
@@ -478,7 +474,6 @@ void RasterizerOpenGL::Clear() {
478void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { 474void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
479 MICROPROFILE_SCOPE(OpenGL_Drawing); 475 MICROPROFILE_SCOPE(OpenGL_Drawing);
480 auto& gpu = system.GPU().Maxwell3D(); 476 auto& gpu = system.GPU().Maxwell3D();
481 const auto& regs = gpu.regs;
482 477
483 query_cache.UpdateCounters(); 478 query_cache.UpdateCounters();
484 479
@@ -529,7 +524,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
529 // Upload vertex and index data. 524 // Upload vertex and index data.
530 SetupVertexBuffer(); 525 SetupVertexBuffer();
531 SetupVertexInstances(); 526 SetupVertexInstances();
532 GLintptr index_buffer_offset; 527 GLintptr index_buffer_offset = 0;
533 if (is_indexed) { 528 if (is_indexed) {
534 index_buffer_offset = SetupIndexBuffer(); 529 index_buffer_offset = SetupIndexBuffer();
535 } 530 }
@@ -555,7 +550,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
555 ConfigureFramebuffers(); 550 ConfigureFramebuffers();
556 551
557 // Signal the buffer cache that we are not going to upload more things. 552 // Signal the buffer cache that we are not going to upload more things.
558 const bool invalidate = buffer_cache.Unmap(); 553 buffer_cache.Unmap();
559 554
560 // Now that we are no longer uploading data, we can safely bind the buffers to OpenGL. 555 // Now that we are no longer uploading data, we can safely bind the buffers to OpenGL.
561 vertex_array_pushbuffer.Bind(); 556 vertex_array_pushbuffer.Bind();
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 849839fe3..2c38f57fd 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -393,10 +393,6 @@ std::string FlowStackTopName(MetaStackClass stack) {
393 return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack)); 393 return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack));
394} 394}
395 395
396[[deprecated]] constexpr bool IsVertexShader(ShaderType stage) {
397 return stage == ShaderType::Vertex;
398}
399
400struct GenericVaryingDescription { 396struct GenericVaryingDescription {
401 std::string name; 397 std::string name;
402 u8 first_element = 0; 398 u8 first_element = 0;
@@ -529,8 +525,9 @@ private:
529 } 525 }
530 526
531 void DeclareVertex() { 527 void DeclareVertex() {
532 if (!IsVertexShader(stage)) 528 if (stage != ShaderType::Vertex) {
533 return; 529 return;
530 }
534 531
535 DeclareVertexRedeclarations(); 532 DeclareVertexRedeclarations();
536 } 533 }
@@ -602,14 +599,14 @@ private:
602 break; 599 break;
603 } 600 }
604 } 601 }
605 if (!IsVertexShader(stage) || device.HasVertexViewportLayer()) { 602 if (stage != ShaderType::Vertex || device.HasVertexViewportLayer()) {
606 if (ir.UsesLayer()) { 603 if (ir.UsesLayer()) {
607 code.AddLine("int gl_Layer;"); 604 code.AddLine("int gl_Layer;");
608 } 605 }
609 if (ir.UsesViewportIndex()) { 606 if (ir.UsesViewportIndex()) {
610 code.AddLine("int gl_ViewportIndex;"); 607 code.AddLine("int gl_ViewportIndex;");
611 } 608 }
612 } else if ((ir.UsesLayer() || ir.UsesViewportIndex()) && IsVertexShader(stage) && 609 } else if ((ir.UsesLayer() || ir.UsesViewportIndex()) && stage == ShaderType::Vertex &&
613 !device.HasVertexViewportLayer()) { 610 !device.HasVertexViewportLayer()) {
614 LOG_ERROR( 611 LOG_ERROR(
615 Render_OpenGL, 612 Render_OpenGL,
@@ -1147,7 +1144,7 @@ private:
1147 // TODO(Subv): Find out what the values are for the first two elements when inside a 1144 // TODO(Subv): Find out what the values are for the first two elements when inside a
1148 // vertex shader, and what's the value of the fourth element when inside a Tess Eval 1145 // vertex shader, and what's the value of the fourth element when inside a Tess Eval
1149 // shader. 1146 // shader.
1150 ASSERT(IsVertexShader(stage)); 1147 ASSERT(stage == ShaderType::Vertex);
1151 switch (element) { 1148 switch (element) {
1152 case 2: 1149 case 2:
1153 // Config pack's first value is instance_id. 1150 // Config pack's first value is instance_id.
@@ -1218,12 +1215,12 @@ private:
1218 UNIMPLEMENTED(); 1215 UNIMPLEMENTED();
1219 return {}; 1216 return {};
1220 case 1: 1217 case 1:
1221 if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { 1218 if (stage == ShaderType::Vertex && !device.HasVertexViewportLayer()) {
1222 return {}; 1219 return {};
1223 } 1220 }
1224 return {{"gl_Layer", Type::Int}}; 1221 return {{"gl_Layer", Type::Int}};
1225 case 2: 1222 case 2:
1226 if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { 1223 if (stage == ShaderType::Vertex && !device.HasVertexViewportLayer()) {
1227 return {}; 1224 return {};
1228 } 1225 }
1229 return {{"gl_ViewportIndex", Type::Int}}; 1226 return {{"gl_ViewportIndex", Type::Int}};
@@ -2532,7 +2529,7 @@ private:
2532 } 2529 }
2533 2530
2534 u32 GetNumPhysicalInputAttributes() const { 2531 u32 GetNumPhysicalInputAttributes() const {
2535 return IsVertexShader(stage) ? GetNumPhysicalAttributes() : GetNumPhysicalVaryings(); 2532 return stage == ShaderType::Vertex ? GetNumPhysicalAttributes() : GetNumPhysicalVaryings();
2536 } 2533 }
2537 2534
2538 u32 GetNumPhysicalAttributes() const { 2535 u32 GetNumPhysicalAttributes() const {
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 0e2e5e6c7..f93447610 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -257,6 +257,8 @@ vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
257 return vk::ShaderStageFlagBits::eGeometry; 257 return vk::ShaderStageFlagBits::eGeometry;
258 case Tegra::Engines::ShaderType::Fragment: 258 case Tegra::Engines::ShaderType::Fragment:
259 return vk::ShaderStageFlagBits::eFragment; 259 return vk::ShaderStageFlagBits::eFragment;
260 case Tegra::Engines::ShaderType::Compute:
261 return vk::ShaderStageFlagBits::eCompute;
260 } 262 }
261 UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage)); 263 UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage));
262 return {}; 264 return {};
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 91e7b7791..557b9d662 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -192,7 +192,6 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
192 192
193 std::array<Shader, Maxwell::MaxShaderProgram> shaders; 193 std::array<Shader, Maxwell::MaxShaderProgram> shaders;
194 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { 194 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
195 const auto& shader_config = gpu.regs.shader_config[index];
196 const auto program{static_cast<Maxwell::ShaderProgram>(index)}; 195 const auto program{static_cast<Maxwell::ShaderProgram>(index)};
197 196
198 // Skip stages that are not enabled 197 // Skip stages that are not enabled
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 755aad643..58c69b786 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -548,8 +548,6 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
548 548
549 // Verify that the cached surface is the same size and format as the requested framebuffer 549 // Verify that the cached surface is the same size and format as the requested framebuffer
550 const auto& params{surface->GetSurfaceParams()}; 550 const auto& params{surface->GetSurfaceParams()};
551 const auto& pixel_format{
552 VideoCore::Surface::PixelFormatFromGPUPixelFormat(config.pixel_format)};
553 ASSERT_MSG(params.width == config.width, "Framebuffer width is different"); 551 ASSERT_MSG(params.width == config.width, "Framebuffer width is different");
554 ASSERT_MSG(params.height == config.height, "Framebuffer height is different"); 552 ASSERT_MSG(params.height == config.height, "Framebuffer height is different");
555 553
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index d9ea3cc21..374959f82 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -100,7 +100,6 @@ void VKStagingBufferPool::ReleaseCache(bool host_visible) {
100} 100}
101 101
102u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { 102u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) {
103 static constexpr u64 epochs_to_destroy = 180;
104 static constexpr std::size_t deletions_per_tick = 16; 103 static constexpr std::size_t deletions_per_tick = 16;
105 104
106 auto& staging = cache[log2]; 105 auto& staging = cache[log2];
@@ -108,6 +107,7 @@ u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t lo
108 const std::size_t old_size = entries.size(); 107 const std::size_t old_size = entries.size();
109 108
110 const auto is_deleteable = [this](const auto& entry) { 109 const auto is_deleteable = [this](const auto& entry) {
110 static constexpr u64 epochs_to_destroy = 180;
111 return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed(); 111 return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed();
112 }; 112 };
113 const std::size_t begin_offset = staging.delete_index; 113 const std::size_t begin_offset = staging.delete_index;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 73d92a5ae..26175921b 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -52,6 +52,9 @@ vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
52 return vk::ImageType::e2D; 52 return vk::ImageType::e2D;
53 case SurfaceTarget::Texture3D: 53 case SurfaceTarget::Texture3D:
54 return vk::ImageType::e3D; 54 return vk::ImageType::e3D;
55 case SurfaceTarget::TextureBuffer:
56 UNREACHABLE();
57 return {};
55 } 58 }
56 UNREACHABLE_MSG("Unknown texture target={}", static_cast<u32>(target)); 59 UNREACHABLE_MSG("Unknown texture target={}", static_cast<u32>(target));
57 return {}; 60 return {};
@@ -273,7 +276,6 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
273 276
274 for (u32 level = 0; level < params.num_levels; ++level) { 277 for (u32 level = 0; level < params.num_levels; ++level) {
275 vk::BufferImageCopy copy = GetBufferImageCopy(level); 278 vk::BufferImageCopy copy = GetBufferImageCopy(level);
276 const auto& dld = device.GetDispatchLoader();
277 if (image->GetAspectMask() == 279 if (image->GetAspectMask() ==
278 (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) { 280 (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) {
279 vk::BufferImageCopy depth = copy; 281 vk::BufferImageCopy depth = copy;
@@ -422,7 +424,6 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
422 dst_base_layer, num_layers, copy_params.dest_level, 1, vk::PipelineStageFlagBits::eTransfer, 424 dst_base_layer, num_layers, copy_params.dest_level, 1, vk::PipelineStageFlagBits::eTransfer,
423 vk::AccessFlagBits::eTransferWrite, vk::ImageLayout::eTransferDstOptimal); 425 vk::AccessFlagBits::eTransferWrite, vk::ImageLayout::eTransferDstOptimal);
424 426
425 const auto& dld{device.GetDispatchLoader()};
426 const vk::ImageSubresourceLayers src_subresource( 427 const vk::ImageSubresourceLayers src_subresource(
427 src_surface->GetAspectMask(), copy_params.source_level, copy_params.source_z, num_layers); 428 src_surface->GetAspectMask(), copy_params.source_level, copy_params.source_z, num_layers);
428 const vk::ImageSubresourceLayers dst_subresource( 429 const vk::ImageSubresourceLayers dst_subresource(
@@ -458,7 +459,6 @@ void VKTextureCache::ImageBlit(View& src_view, View& dst_view,
458 dst_view->GetImageSubresourceLayers(), {dst_top_left, dst_bot_right}); 459 dst_view->GetImageSubresourceLayers(), {dst_top_left, dst_bot_right});
459 const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear; 460 const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
460 461
461 const auto& dld{device.GetDispatchLoader()};
462 scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit, 462 scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit,
463 is_linear](auto cmdbuf, auto& dld) { 463 is_linear](auto cmdbuf, auto& dld) {
464 cmdbuf.blitImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image, 464 cmdbuf.blitImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image,