video_core/dma_pusher: Simplyfy Step() logic.

As fetching command list headers and and the list of command headers is a fixed 1:1 relation now, they can be implemented within a single call.
This cleans up the Step() logic quite a bit.
This commit is contained in:
Markus Wick 2019-02-19 10:26:58 +01:00
parent 717394c980
commit 6dd40976d0
2 changed files with 71 additions and 75 deletions

View File

@ -33,16 +33,34 @@ void DmaPusher::DispatchCalls() {
}
bool DmaPusher::Step() {
if (dma_get != dma_put) {
if (!ib_enable || dma_pushbuffer.empty()) {
// pushbuffer empty and IB empty or nonexistent - nothing to do
return false;
}
const CommandList& command_list{dma_pushbuffer.front()};
const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
GPUVAddr dma_get = command_list_header.addr;
GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32);
bool non_main = command_list_header.is_non_main;
if (dma_pushbuffer_subindex >= command_list.size()) {
// We've gone through the current list, remove it from the queue
dma_pushbuffer.pop();
dma_pushbuffer_subindex = 0;
}
if (command_list_header.size == 0) {
return true;
}
// Push buffer non-empty, read a word
const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
ASSERT_MSG(address, "Invalid GPU address");
GPUVAddr size = dma_put - dma_get;
ASSERT_MSG(size % sizeof(CommandHeader) == 0, "Invalid aligned GPU addresses");
command_headers.resize(size / sizeof(CommandHeader));
command_headers.resize(command_list_header.size);
Memory::ReadBlock(*address, command_headers.data(), size);
Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32));
for (const CommandHeader& command_header : command_headers) {
@ -93,28 +111,9 @@ bool DmaPusher::Step() {
}
}
dma_get = dma_put;
if (!non_main) {
// TODO (degasus): This is dead code, as dma_mget is never read.
dma_mget = dma_get;
}
} else if (ib_enable && !dma_pushbuffer.empty()) {
// Current pushbuffer empty, but we have more IB entries to read
const CommandList& command_list{dma_pushbuffer.front()};
const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
dma_get = command_list_header.addr;
dma_put = dma_get + command_list_header.size * sizeof(u32);
non_main = command_list_header.is_non_main;
if (dma_pushbuffer_subindex >= command_list.size()) {
// We've gone through the current list, remove it from the queue
dma_pushbuffer.pop();
dma_pushbuffer_subindex = 0;
}
} else {
// Otherwise, pushbuffer empty and IB empty or nonexistent - nothing to do
return {};
dma_mget = dma_put;
}
return true;

View File

@ -91,11 +91,8 @@ private:
DmaState dma_state{};
bool dma_increment_once{};
GPUVAddr dma_put{}; ///< pushbuffer current end address
GPUVAddr dma_get{}; ///< pushbuffer current read address
GPUVAddr dma_mget{}; ///< main pushbuffer last read address
bool ib_enable{true}; ///< IB mode enabled
bool non_main{}; ///< non-main pushbuffer active
};
} // namespace Tegra