diff options
| -rw-r--r-- | src/core/hle/service/vi/vi.cpp | 77 |
1 files changed, 43 insertions, 34 deletions
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 45cfffe06..93a113b93 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -536,9 +536,6 @@ private: | |||
| 536 | LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, | 536 | LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, |
| 537 | transaction, flags); | 537 | transaction, flags); |
| 538 | 538 | ||
| 539 | const auto guard = nv_flinger.Lock(); | ||
| 540 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||
| 541 | |||
| 542 | switch (transaction) { | 539 | switch (transaction) { |
| 543 | case TransactionId::Connect: { | 540 | case TransactionId::Connect: { |
| 544 | IGBPConnectRequestParcel request{ctx.ReadBuffer()}; | 541 | IGBPConnectRequestParcel request{ctx.ReadBuffer()}; |
| @@ -553,7 +550,11 @@ private: | |||
| 553 | case TransactionId::SetPreallocatedBuffer: { | 550 | case TransactionId::SetPreallocatedBuffer: { |
| 554 | IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; | 551 | IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; |
| 555 | 552 | ||
| 556 | buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer_container.buffer); | 553 | { |
| 554 | const auto guard = nv_flinger.Lock(); | ||
| 555 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||
| 556 | buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer_container.buffer); | ||
| 557 | } | ||
| 557 | 558 | ||
| 558 | IGBPSetPreallocatedBufferResponseParcel response{}; | 559 | IGBPSetPreallocatedBufferResponseParcel response{}; |
| 559 | ctx.WriteBuffer(response.Serialize()); | 560 | ctx.WriteBuffer(response.Serialize()); |
| @@ -563,48 +564,46 @@ private: | |||
| 563 | IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; | 564 | IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; |
| 564 | const u32 width{request.data.width}; | 565 | const u32 width{request.data.width}; |
| 565 | const u32 height{request.data.height}; | 566 | const u32 height{request.data.height}; |
| 566 | auto result = buffer_queue.DequeueBuffer(width, height); | 567 | |
| 567 | 568 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> result; | |
| 568 | if (result) { | 569 | |
| 569 | // Buffer is available | 570 | while (!result) { |
| 570 | IGBPDequeueBufferResponseParcel response{result->first, *result->second}; | 571 | { |
| 571 | ctx.WriteBuffer(response.Serialize()); | 572 | const auto guard = nv_flinger.Lock(); |
| 572 | } else { | 573 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); |
| 573 | // Wait the current thread until a buffer becomes available | 574 | result = buffer_queue.DequeueBuffer(width, height); |
| 574 | ctx.SleepClientThread( | 575 | } |
| 575 | "IHOSBinderDriver::DequeueBuffer", UINT64_MAX, | 576 | |
| 576 | [=, this](std::shared_ptr<Kernel::Thread> thread, | 577 | if (result) { |
| 577 | Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { | 578 | // Buffer is available |
| 578 | // Repeat TransactParcel DequeueBuffer when a buffer is available | 579 | IGBPDequeueBufferResponseParcel response{result->first, *result->second}; |
| 579 | const auto guard = nv_flinger.Lock(); | 580 | ctx.WriteBuffer(response.Serialize()); |
| 580 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | 581 | } |
| 581 | auto result = buffer_queue.DequeueBuffer(width, height); | ||
| 582 | ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer."); | ||
| 583 | |||
| 584 | IGBPDequeueBufferResponseParcel response{result->first, *result->second}; | ||
| 585 | ctx.WriteBuffer(response.Serialize()); | ||
| 586 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 587 | rb.Push(RESULT_SUCCESS); | ||
| 588 | }, | ||
| 589 | buffer_queue.GetWritableBufferWaitEvent()); | ||
| 590 | } | 582 | } |
| 583 | |||
| 591 | break; | 584 | break; |
| 592 | } | 585 | } |
| 593 | case TransactionId::RequestBuffer: { | 586 | case TransactionId::RequestBuffer: { |
| 594 | IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; | 587 | IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; |
| 595 | 588 | ||
| 589 | const auto guard = nv_flinger.Lock(); | ||
| 590 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||
| 596 | auto& buffer = buffer_queue.RequestBuffer(request.slot); | 591 | auto& buffer = buffer_queue.RequestBuffer(request.slot); |
| 597 | |||
| 598 | IGBPRequestBufferResponseParcel response{buffer}; | 592 | IGBPRequestBufferResponseParcel response{buffer}; |
| 599 | ctx.WriteBuffer(response.Serialize()); | 593 | ctx.WriteBuffer(response.Serialize()); |
| 594 | |||
| 600 | break; | 595 | break; |
| 601 | } | 596 | } |
| 602 | case TransactionId::QueueBuffer: { | 597 | case TransactionId::QueueBuffer: { |
| 603 | IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; | 598 | IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; |
| 604 | 599 | ||
| 605 | buffer_queue.QueueBuffer(request.data.slot, request.data.transform, | 600 | { |
| 606 | request.data.GetCropRect(), request.data.swap_interval, | 601 | const auto guard = nv_flinger.Lock(); |
| 607 | request.data.multi_fence); | 602 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); |
| 603 | buffer_queue.QueueBuffer(request.data.slot, request.data.transform, | ||
| 604 | request.data.GetCropRect(), request.data.swap_interval, | ||
| 605 | request.data.multi_fence); | ||
| 606 | } | ||
| 608 | 607 | ||
| 609 | IGBPQueueBufferResponseParcel response{1280, 720}; | 608 | IGBPQueueBufferResponseParcel response{1280, 720}; |
| 610 | ctx.WriteBuffer(response.Serialize()); | 609 | ctx.WriteBuffer(response.Serialize()); |
| @@ -613,6 +612,8 @@ private: | |||
| 613 | case TransactionId::Query: { | 612 | case TransactionId::Query: { |
| 614 | IGBPQueryRequestParcel request{ctx.ReadBuffer()}; | 613 | IGBPQueryRequestParcel request{ctx.ReadBuffer()}; |
| 615 | 614 | ||
| 615 | const auto guard = nv_flinger.Lock(); | ||
| 616 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||
| 616 | const u32 value = | 617 | const u32 value = |
| 617 | buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); | 618 | buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); |
| 618 | 619 | ||
| @@ -623,7 +624,11 @@ private: | |||
| 623 | case TransactionId::CancelBuffer: { | 624 | case TransactionId::CancelBuffer: { |
| 624 | IGBPCancelBufferRequestParcel request{ctx.ReadBuffer()}; | 625 | IGBPCancelBufferRequestParcel request{ctx.ReadBuffer()}; |
| 625 | 626 | ||
| 626 | buffer_queue.CancelBuffer(request.data.slot, request.data.multi_fence); | 627 | { |
| 628 | const auto guard = nv_flinger.Lock(); | ||
| 629 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||
| 630 | buffer_queue.CancelBuffer(request.data.slot, request.data.multi_fence); | ||
| 631 | } | ||
| 627 | 632 | ||
| 628 | IGBPCancelBufferResponseParcel response{}; | 633 | IGBPCancelBufferResponseParcel response{}; |
| 629 | ctx.WriteBuffer(response.Serialize()); | 634 | ctx.WriteBuffer(response.Serialize()); |
| @@ -633,7 +638,11 @@ private: | |||
| 633 | LOG_WARNING(Service_VI, "(STUBBED) called, transaction=Disconnect"); | 638 | LOG_WARNING(Service_VI, "(STUBBED) called, transaction=Disconnect"); |
| 634 | const auto buffer = ctx.ReadBuffer(); | 639 | const auto buffer = ctx.ReadBuffer(); |
| 635 | 640 | ||
| 636 | buffer_queue.Disconnect(); | 641 | { |
| 642 | const auto guard = nv_flinger.Lock(); | ||
| 643 | auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||
| 644 | buffer_queue.Disconnect(); | ||
| 645 | } | ||
| 637 | 646 | ||
| 638 | IGBPEmptyResponseParcel response{}; | 647 | IGBPEmptyResponseParcel response{}; |
| 639 | ctx.WriteBuffer(response.Serialize()); | 648 | ctx.WriteBuffer(response.Serialize()); |