pvr: Add initial vkCmdPipelineBarrier skeleton.

Signed-off-by: Karmjit Mahil <Karmjit.Mahil@imgtec.com>
Reviewed-by: Rajnesh Kanwal <rajnesh.kanwal@imgtec.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17683>
This commit is contained in:
Karmjit Mahil 2022-03-04 14:22:42 +00:00 committed by Marge Bot
parent c70924b20e
commit 6d672e0336
3 changed files with 150 additions and 44 deletions

View File

@ -45,6 +45,7 @@
#include "pvr_private.h"
#include "pvr_types.h"
#include "pvr_winsys.h"
#include "util/bitscan.h"
#include "util/compiler.h"
#include "util/list.h"
#include "util/macros.h"
@ -4775,10 +4776,95 @@ void pvr_CmdNextSubpass2(VkCommandBuffer commandBuffer,
assert(!"Unimplemented");
}
void pvr_CmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
const VkDependencyInfo *pDependencyInfo)
/* This is just enough to handle vkCmdPipelineBarrier().
* TODO: Complete?
*/
void pvr_CmdPipelineBarrier2(VkCommandBuffer commandBuffer,
const VkDependencyInfo *pDependencyInfo)
{
assert(!"Unimplemented");
PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
struct pvr_cmd_buffer_state *const state = &cmd_buffer->state;
const struct pvr_render_pass *const render_pass =
state->render_pass_info.pass;
VkPipelineStageFlags vk_src_stage_mask = 0U;
VkPipelineStageFlags vk_dst_stage_mask = 0U;
uint32_t required_stage_mask = 0U;
uint32_t src_stage_mask;
uint32_t dst_stage_mask;
bool is_barrier_needed;
PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer);
for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++) {
vk_src_stage_mask |= pDependencyInfo->pMemoryBarriers[i].srcStageMask;
vk_dst_stage_mask |= pDependencyInfo->pMemoryBarriers[i].dstStageMask;
}
for (uint32_t i = 0; i < pDependencyInfo->bufferMemoryBarrierCount; i++) {
vk_src_stage_mask |=
pDependencyInfo->pBufferMemoryBarriers[i].srcStageMask;
vk_dst_stage_mask |=
pDependencyInfo->pBufferMemoryBarriers[i].dstStageMask;
}
for (uint32_t i = 0; i < pDependencyInfo->imageMemoryBarrierCount; i++) {
vk_src_stage_mask |=
pDependencyInfo->pImageMemoryBarriers[i].srcStageMask;
vk_dst_stage_mask |=
pDependencyInfo->pImageMemoryBarriers[i].dstStageMask;
}
src_stage_mask = pvr_stage_mask_src(vk_src_stage_mask);
dst_stage_mask = pvr_stage_mask_dst(vk_dst_stage_mask);
for (uint32_t stage = 0U; stage != PVR_NUM_SYNC_PIPELINE_STAGES; stage++) {
if (!(dst_stage_mask & BITFIELD_BIT(stage)))
continue;
required_stage_mask |= state->barriers_needed[stage];
}
src_stage_mask &= required_stage_mask;
for (uint32_t stage = 0U; stage != PVR_NUM_SYNC_PIPELINE_STAGES; stage++) {
if (!(dst_stage_mask & BITFIELD_BIT(stage)))
continue;
state->barriers_needed[stage] &= ~src_stage_mask;
}
if (src_stage_mask == 0 || dst_stage_mask == 0) {
is_barrier_needed = false;
} else if (src_stage_mask == PVR_PIPELINE_STAGE_GEOM_BIT &&
dst_stage_mask == PVR_PIPELINE_STAGE_FRAG_BIT) {
/* This is implicit so no need to barrier. */
is_barrier_needed = false;
} else if (src_stage_mask == dst_stage_mask &&
util_bitcount(src_stage_mask) == 1) {
switch (src_stage_mask) {
case PVR_PIPELINE_STAGE_FRAG_BIT:
pvr_finishme("Handle fragment stage pipeline barrier.");
is_barrier_needed = true;
break;
case PVR_PIPELINE_STAGE_COMPUTE_BIT:
pvr_finishme("Handle compute stage pipeline barrier.");
is_barrier_needed = false;
break;
default:
is_barrier_needed = false;
break;
};
} else {
is_barrier_needed = true;
}
if (render_pass) {
pvr_finishme("Insert mid fragment stage barrier if needed.");
} else {
if (is_barrier_needed)
pvr_finishme("Insert barrier if needed.");
}
}
void pvr_CmdResetEvent2KHR(VkCommandBuffer commandBuffer,

View File

@ -1338,6 +1338,66 @@ to_pvr_graphics_pipeline(struct pvr_pipeline *pipeline)
return container_of(pipeline, struct pvr_graphics_pipeline, base);
}
static enum pvr_pipeline_stage_bits
pvr_stage_mask(VkPipelineStageFlags2 stage_mask)
{
enum pvr_pipeline_stage_bits stages = 0;
if (stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)
return PVR_PIPELINE_STAGE_ALL_BITS;
if (stage_mask & (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT))
stages |= PVR_PIPELINE_STAGE_ALL_GRAPHICS_BITS;
if (stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
stages |= PVR_PIPELINE_STAGE_GEOM_BIT;
}
if (stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) {
stages |= PVR_PIPELINE_STAGE_FRAG_BIT;
}
if (stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)) {
stages |= PVR_PIPELINE_STAGE_COMPUTE_BIT;
}
if (stage_mask & (VK_PIPELINE_STAGE_TRANSFER_BIT))
stages |= PVR_PIPELINE_STAGE_TRANSFER_BIT;
return stages;
}
static inline enum pvr_pipeline_stage_bits
pvr_stage_mask_src(VkPipelineStageFlags2KHR stage_mask)
{
/* If the source is bottom of pipe, all stages will need to be waited for. */
if (stage_mask & VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
return PVR_PIPELINE_STAGE_ALL_BITS;
return pvr_stage_mask(stage_mask);
}
static inline enum pvr_pipeline_stage_bits
pvr_stage_mask_dst(VkPipelineStageFlags2KHR stage_mask)
{
/* If the destination is top of pipe, all stages should be blocked by prior
* commands.
*/
if (stage_mask & VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)
return PVR_PIPELINE_STAGE_ALL_BITS;
return pvr_stage_mask(stage_mask);
}
VkResult pvr_pds_fragment_program_create_and_upload(
struct pvr_device *device,
const VkAllocationCallbacks *allocator,

View File

@ -189,46 +189,6 @@ VkResult pvr_QueueWaitIdle(VkQueue _queue)
return VK_SUCCESS;
}
static enum pvr_pipeline_stage_bits
pvr_convert_stage_mask(VkPipelineStageFlags2 stage_mask)
{
enum pvr_pipeline_stage_bits stages = 0;
if (stage_mask & VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT ||
stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) {
return PVR_PIPELINE_STAGE_ALL_BITS;
}
if (stage_mask & (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT))
stages |= PVR_PIPELINE_STAGE_ALL_GRAPHICS_BITS;
if (stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
stages |= PVR_PIPELINE_STAGE_GEOM_BIT;
}
if (stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) {
stages |= PVR_PIPELINE_STAGE_FRAG_BIT;
}
if (stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)) {
stages |= PVR_PIPELINE_STAGE_COMPUTE_BIT;
}
if (stage_mask & (VK_PIPELINE_STAGE_TRANSFER_BIT))
stages |= PVR_PIPELINE_STAGE_TRANSFER_BIT;
return stages;
}
static VkResult
pvr_process_graphics_cmd(struct pvr_device *device,
struct pvr_queue *queue,
@ -655,7 +615,7 @@ VkResult pvr_QueueSubmit(VkQueue _queue,
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
stage_flags[wait_count] =
pvr_convert_stage_mask(desc->pWaitDstStageMask[j]);
pvr_stage_mask_dst(desc->pWaitDstStageMask[j]);
waits[wait_count] = vk_semaphore_get_active_sync(semaphore);
wait_count++;
}