diff --git a/src/imagination/vulkan/pvr_cmd_buffer.c b/src/imagination/vulkan/pvr_cmd_buffer.c index 9f75c98c7b4..d3221d2c181 100644 --- a/src/imagination/vulkan/pvr_cmd_buffer.c +++ b/src/imagination/vulkan/pvr_cmd_buffer.c @@ -2643,11 +2643,11 @@ pvr_setup_vertex_buffers(struct pvr_cmd_buffer *cmd_buffer, static VkResult pvr_setup_descriptor_mappings( struct pvr_cmd_buffer *const cmd_buffer, enum pvr_stage_allocation stage, - const struct pvr_stage_allocation_uniform_state *uniform_state, + const struct pvr_stage_allocation_descriptor_state *descriptor_state, UNUSED const pvr_dev_addr_t *const num_worgroups_buff_addr, - uint32_t *const uniform_data_offset_out) + uint32_t *const descriptor_data_offset_out) { - const struct pvr_pds_info *const pds_info = &uniform_state->pds_info; + const struct pvr_pds_info *const pds_info = &descriptor_state->pds_info; const struct pvr_descriptor_state *desc_state; const uint8_t *entries; uint32_t *dword_buffer; @@ -2695,7 +2695,7 @@ static VkResult pvr_setup_descriptor_mappings( /* TODO: See if instead of reusing the blend constant buffer type entry, * we can setup a new buffer type specifically for num_workgroups or other * built-in variables. The mappings are setup at pipeline creation when - * creating the uniform program. + * creating the descriptor program. */ pvr_finishme("Handle blend constant reuse for compute."); @@ -2833,7 +2833,7 @@ static VkResult pvr_setup_descriptor_mappings( pvr_bo_cpu_unmap(cmd_buffer->device, pvr_bo); - *uniform_data_offset_out = + *descriptor_data_offset_out = pvr_bo->vma->dev_addr.addr - cmd_buffer->device->heaps.pds_heap->base_addr.addr; @@ -2876,18 +2876,18 @@ static void pvr_compute_update_shared(struct pvr_cmd_buffer *cmd_buffer, * allocation of the local/common store shared registers so we repurpose the * deallocation PDS program. */ - if (pipeline->state.uniform.pds_info.code_size_in_dwords) { + if (pipeline->state.descriptor.pds_info.code_size_in_dwords) { uint32_t pds_data_size_in_dwords = - pipeline->state.uniform.pds_info.data_size_in_dwords; + pipeline->state.descriptor.pds_info.data_size_in_dwords; - info.pds_data_offset = state->pds_compute_uniform_data_offset; + info.pds_data_offset = state->pds_compute_descriptor_data_offset; info.pds_data_size = DIV_ROUND_UP(pds_data_size_in_dwords << 2U, PVRX(CDMCTRL_KERNEL0_PDS_DATA_SIZE_UNIT_SIZE)); /* Check that we have upload the code section. */ - assert(pipeline->state.uniform.pds_code.code_size); - info.pds_code_offset = pipeline->state.uniform.pds_code.code_offset; + assert(pipeline->state.descriptor.pds_code.code_size); + info.pds_code_offset = pipeline->state.descriptor.pds_code.code_offset; } else { /* FIXME: There should be a deallocation pds program already uploaded * that we use at this point. @@ -3061,24 +3061,24 @@ void pvr_CmdDispatch(VkCommandBuffer commandBuffer, if (result != VK_SUCCESS) return; - result = - pvr_setup_descriptor_mappings(cmd_buffer, - PVR_STAGE_ALLOCATION_COMPUTE, - &compute_pipeline->state.uniform, - &num_workgroups_bo->vma->dev_addr, - &state->pds_compute_uniform_data_offset); + result = pvr_setup_descriptor_mappings( + cmd_buffer, + PVR_STAGE_ALLOCATION_COMPUTE, + &compute_pipeline->state.descriptor, + &num_workgroups_bo->vma->dev_addr, + &state->pds_compute_descriptor_data_offset); if (result != VK_SUCCESS) return; } else if ((compute_pipeline->base.layout ->per_stage_descriptor_masks[PVR_STAGE_ALLOCATION_COMPUTE] && state->dirty.compute_desc_dirty) || state->dirty.compute_pipeline_binding || push_descriptors_dirty) { - result = - pvr_setup_descriptor_mappings(cmd_buffer, - PVR_STAGE_ALLOCATION_COMPUTE, - &compute_pipeline->state.uniform, - NULL, - &state->pds_compute_uniform_data_offset); + result = pvr_setup_descriptor_mappings( + cmd_buffer, + PVR_STAGE_ALLOCATION_COMPUTE, + &compute_pipeline->state.descriptor, + NULL, + &state->pds_compute_descriptor_data_offset); if (result != VK_SUCCESS) return; } @@ -3145,16 +3145,17 @@ static uint32_t pvr_calc_shared_regs_count( static void pvr_emit_dirty_pds_state(const struct pvr_cmd_buffer *const cmd_buffer, struct pvr_sub_cmd_gfx *const sub_cmd, - const uint32_t pds_vertex_uniform_data_offset) + const uint32_t pds_vertex_descriptor_data_offset) { const struct pvr_cmd_buffer_state *const state = &cmd_buffer->state; - const struct pvr_stage_allocation_uniform_state *const vertex_uniform_state = - &state->gfx_pipeline->vertex_shader_state.uniform_state; + const struct pvr_stage_allocation_descriptor_state + *const vertex_descriptor_state = + &state->gfx_pipeline->vertex_shader_state.descriptor_state; const struct pvr_pipeline_stage_state *const vertex_stage_state = &state->gfx_pipeline->vertex_shader_state.stage_state; struct pvr_csb *const csb = &sub_cmd->control_stream; - if (!vertex_uniform_state->pds_info.code_size_in_dwords) + if (!vertex_descriptor_state->pds_info.code_size_in_dwords) return; pvr_csb_emit (csb, VDMCTRL_PDS_STATE0, state0) { @@ -3164,19 +3165,19 @@ pvr_emit_dirty_pds_state(const struct pvr_cmd_buffer *const cmd_buffer, DIV_ROUND_UP(vertex_stage_state->const_shared_reg_count << 2, PVRX(VDMCTRL_PDS_STATE0_USC_COMMON_SIZE_UNIT_SIZE)); - state0.pds_data_size = - DIV_ROUND_UP(vertex_uniform_state->pds_info.data_size_in_dwords << 2, - PVRX(VDMCTRL_PDS_STATE0_PDS_DATA_SIZE_UNIT_SIZE)); + state0.pds_data_size = DIV_ROUND_UP( + vertex_descriptor_state->pds_info.data_size_in_dwords << 2, + PVRX(VDMCTRL_PDS_STATE0_PDS_DATA_SIZE_UNIT_SIZE)); } pvr_csb_emit (csb, VDMCTRL_PDS_STATE1, state1) { - state1.pds_data_addr = PVR_DEV_ADDR(pds_vertex_uniform_data_offset); + state1.pds_data_addr = PVR_DEV_ADDR(pds_vertex_descriptor_data_offset); state1.sd_type = PVRX(VDMCTRL_SD_TYPE_NONE); } pvr_csb_emit (csb, VDMCTRL_PDS_STATE2, state2) { state2.pds_code_addr = - PVR_DEV_ADDR(vertex_uniform_state->pds_code.code_offset); + PVR_DEV_ADDR(vertex_descriptor_state->pds_code.code_offset); } } @@ -3636,8 +3637,8 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer, struct pvr_sub_cmd_gfx *const sub_cmd) { struct pvr_cmd_buffer_state *const state = &cmd_buffer->state; - const struct pvr_stage_allocation_uniform_state *uniform_shader_state = - &state->gfx_pipeline->fragment_shader_state.uniform_state; + const struct pvr_stage_allocation_descriptor_state *descriptor_shader_state = + &state->gfx_pipeline->fragment_shader_state.descriptor_state; const struct pvr_pds_upload *pds_coeff_program = &state->gfx_pipeline->fragment_shader_state.pds_coeff_program; const struct pvr_pipeline_stage_state *fragment_state = @@ -3647,7 +3648,7 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer, struct pvr_ppp_state *const ppp_state = &state->ppp_state; const uint32_t pds_uniform_size = - DIV_ROUND_UP(uniform_shader_state->pds_info.data_size_in_dwords, + DIV_ROUND_UP(descriptor_shader_state->pds_info.data_size_in_dwords, PVRX(TA_STATE_PDS_SIZEINFO1_PDS_UNIFORMSIZE_UNIT_SIZE)); const uint32_t pds_varying_state_size = @@ -3687,12 +3688,12 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer, shader_base.addr = PVR_DEV_ADDR(pds_upload->data_offset); } - if (uniform_shader_state->pds_code.pvr_bo) { + if (descriptor_shader_state->pds_code.pvr_bo) { pvr_csb_pack (&ppp_state->pds.texture_uniform_code_base, TA_STATE_PDS_TEXUNICODEBASE, tex_base) { tex_base.addr = - PVR_DEV_ADDR(uniform_shader_state->pds_code.code_offset); + PVR_DEV_ADDR(descriptor_shader_state->pds_code.code_offset); } } else { ppp_state->pds.texture_uniform_code_base = 0U; @@ -3733,7 +3734,7 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer, pvr_csb_pack (&ppp_state->pds.uniform_state_data_base, TA_STATE_PDS_UNIFORMDATABASE, base) { - base.addr = PVR_DEV_ADDR(state->pds_fragment_uniform_data_offset); + base.addr = PVR_DEV_ADDR(state->pds_fragment_descriptor_data_offset); } emit_state->pds_fragment_stateptr0 = true; @@ -4433,9 +4434,9 @@ static VkResult pvr_validate_draw_state(struct pvr_cmd_buffer *cmd_buffer) result = pvr_setup_descriptor_mappings( cmd_buffer, PVR_STAGE_ALLOCATION_FRAGMENT, - &state->gfx_pipeline->fragment_shader_state.uniform_state, + &state->gfx_pipeline->fragment_shader_state.descriptor_state, NULL, - &state->pds_fragment_uniform_data_offset); + &state->pds_fragment_descriptor_data_offset); if (result != VK_SUCCESS) { mesa_loge("Could not setup fragment descriptor mappings."); return result; @@ -4443,14 +4444,14 @@ static VkResult pvr_validate_draw_state(struct pvr_cmd_buffer *cmd_buffer) } if (state->dirty.vertex_descriptors) { - uint32_t pds_vertex_uniform_data_offset; + uint32_t pds_vertex_descriptor_data_offset; result = pvr_setup_descriptor_mappings( cmd_buffer, PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY, - &state->gfx_pipeline->vertex_shader_state.uniform_state, + &state->gfx_pipeline->vertex_shader_state.descriptor_state, NULL, - &pds_vertex_uniform_data_offset); + &pds_vertex_descriptor_data_offset); if (result != VK_SUCCESS) { mesa_loge("Could not setup vertex descriptor mappings."); return result; @@ -4458,7 +4459,7 @@ static VkResult pvr_validate_draw_state(struct pvr_cmd_buffer *cmd_buffer) pvr_emit_dirty_pds_state(cmd_buffer, sub_cmd, - pds_vertex_uniform_data_offset); + pds_vertex_descriptor_data_offset); } pvr_emit_dirty_ppp_state(cmd_buffer, sub_cmd); diff --git a/src/imagination/vulkan/pvr_pipeline.c b/src/imagination/vulkan/pvr_pipeline.c index ade658c0774..d68dd7b92b8 100644 --- a/src/imagination/vulkan/pvr_pipeline.c +++ b/src/imagination/vulkan/pvr_pipeline.c @@ -559,12 +559,12 @@ static size_t pvr_pds_get_max_descriptor_upload_const_map_size_in_bytes() * structs. */ typedef struct pvr_pds_buffer ( - *const pvr_pds_uniform_program_buffer_array_ptr)[PVR_PDS_MAX_BUFFERS]; + *const pvr_pds_descriptor_program_buffer_array_ptr)[PVR_PDS_MAX_BUFFERS]; -static void pvr_pds_uniform_program_setup_buffers( +static void pvr_pds_descriptor_program_setup_buffers( bool robust_buffer_access, const struct rogue_ubo_data *ubo_data, - pvr_pds_uniform_program_buffer_array_ptr buffers_out_ptr, + pvr_pds_descriptor_program_buffer_array_ptr buffers_out_ptr, uint32_t *const buffer_count_out) { struct pvr_pds_buffer *const buffers = *buffers_out_ptr; @@ -594,7 +594,7 @@ static void pvr_pds_uniform_program_setup_buffers( *buffer_count_out = buffer_count; } -static VkResult pvr_pds_uniform_program_create_and_upload( +static VkResult pvr_pds_descriptor_program_create_and_upload( struct pvr_device *const device, const VkAllocationCallbacks *const allocator, const struct rogue_ubo_data *const ubo_data, @@ -617,10 +617,10 @@ static VkResult pvr_pds_uniform_program_create_and_upload( memset(pds_info_out, 0, sizeof(*pds_info_out)); - pvr_pds_uniform_program_setup_buffers(device->features.robustBufferAccess, - ubo_data, - &program.buffers, - &program.buffer_count); + pvr_pds_descriptor_program_setup_buffers(device->features.robustBufferAccess, + ubo_data, + &program.buffers, + &program.buffer_count); for (uint32_t dma = 0; dma < program.buffer_count; dma++) { if (program.buffers[dma].type != PVR_BUFFER_TYPES_COMPILE_TIME) @@ -740,7 +740,7 @@ static VkResult pvr_pds_uniform_program_create_and_upload( return VK_SUCCESS; } -static void pvr_pds_uniform_program_destroy( +static void pvr_pds_descriptor_program_destroy( struct pvr_device *const device, const struct VkAllocationCallbacks *const allocator, struct pvr_pds_upload *const pds_code, @@ -1062,15 +1062,15 @@ static VkResult pvr_compute_pipeline_compile( abort(); }; - result = pvr_pds_uniform_program_create_and_upload( + result = pvr_pds_descriptor_program_create_and_upload( device, allocator, &ubo_data, &explicit_const_usage, compute_pipeline->base.layout, PVR_STAGE_ALLOCATION_COMPUTE, - &compute_pipeline->state.uniform.pds_code, - &compute_pipeline->state.uniform.pds_info); + &compute_pipeline->state.descriptor.pds_code, + &compute_pipeline->state.descriptor.pds_info); if (result != VK_SUCCESS) goto err_free_shader; @@ -1085,7 +1085,7 @@ static VkResult pvr_compute_pipeline_compile( &compute_pipeline->state.primary_program, &compute_pipeline->state.primary_program_info); if (result != VK_SUCCESS) - goto err_free_uniform_program; + goto err_free_descriptor_program; /* If the workgroup ID is required, then we require the base workgroup * variant of the PDS compute program as well. @@ -1118,8 +1118,8 @@ err_destroy_compute_program: &compute_pipeline->state.primary_program, &compute_pipeline->state.primary_program_info); -err_free_uniform_program: - pvr_bo_free(device, compute_pipeline->state.uniform.pds_code.pvr_bo); +err_free_descriptor_program: + pvr_bo_free(device, compute_pipeline->state.descriptor.pds_code.pvr_bo); err_free_shader: pvr_bo_free(device, compute_pipeline->state.shader.bo); @@ -1207,10 +1207,11 @@ static void pvr_compute_pipeline_destroy( allocator, &compute_pipeline->state.primary_program, &compute_pipeline->state.primary_program_info); - pvr_pds_uniform_program_destroy(device, - allocator, - &compute_pipeline->state.uniform.pds_code, - &compute_pipeline->state.uniform.pds_info); + pvr_pds_descriptor_program_destroy( + device, + allocator, + &compute_pipeline->state.descriptor.pds_code, + &compute_pipeline->state.descriptor.pds_info); pvr_bo_free(device, compute_pipeline->state.shader.bo); pvr_pipeline_finish(&compute_pipeline->base); @@ -1282,17 +1283,17 @@ pvr_graphics_pipeline_destroy(struct pvr_device *const device, const uint32_t num_vertex_attrib_programs = ARRAY_SIZE(gfx_pipeline->vertex_shader_state.pds_attrib_programs); - pvr_pds_uniform_program_destroy( + pvr_pds_descriptor_program_destroy( device, allocator, - &gfx_pipeline->fragment_shader_state.uniform_state.pds_code, - &gfx_pipeline->fragment_shader_state.uniform_state.pds_info); + &gfx_pipeline->fragment_shader_state.descriptor_state.pds_code, + &gfx_pipeline->fragment_shader_state.descriptor_state.pds_info); - pvr_pds_uniform_program_destroy( + pvr_pds_descriptor_program_destroy( device, allocator, - &gfx_pipeline->vertex_shader_state.uniform_state.pds_code, - &gfx_pipeline->vertex_shader_state.uniform_state.pds_info); + &gfx_pipeline->vertex_shader_state.descriptor_state.pds_code, + &gfx_pipeline->vertex_shader_state.descriptor_state.pds_info); for (uint32_t i = 0; i < num_vertex_attrib_programs; i++) { struct pvr_pds_attrib_program *const attrib_program = @@ -1583,15 +1584,15 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device, if (result != VK_SUCCESS) goto err_free_frag_program; - result = pvr_pds_uniform_program_create_and_upload( + result = pvr_pds_descriptor_program_create_and_upload( device, allocator, &ctx->common_data[MESA_SHADER_VERTEX].ubo_data, &vert_explicit_const_usage, gfx_pipeline->base.layout, PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY, - &gfx_pipeline->vertex_shader_state.uniform_state.pds_code, - &gfx_pipeline->vertex_shader_state.uniform_state.pds_info); + &gfx_pipeline->vertex_shader_state.descriptor_state.pds_code, + &gfx_pipeline->vertex_shader_state.descriptor_state.pds_info); if (result != VK_SUCCESS) goto err_free_vertex_attrib_program; @@ -1599,24 +1600,24 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device, * scratch buffer for both vertex and fragment stage. * Figure out the best place to do this. */ - /* assert(pvr_pds_uniform_program_variables.temp_buff_total_size == 0); */ + /* assert(pvr_pds_descriptor_program_variables.temp_buff_total_size == 0); */ /* TODO: Implement spilling with the above. */ - /* TODO: Call pvr_pds_uniform_program_create_and_upload in a loop. */ + /* TODO: Call pvr_pds_program_program_create_and_upload in a loop. */ /* FIXME: For now we pass in the same explicit_const_usage since it contains * all invalid entries. Fix this by hooking it up to the compiler. */ - result = pvr_pds_uniform_program_create_and_upload( + result = pvr_pds_descriptor_program_create_and_upload( device, allocator, &ctx->common_data[MESA_SHADER_FRAGMENT].ubo_data, &frag_explicit_const_usage, gfx_pipeline->base.layout, PVR_STAGE_ALLOCATION_FRAGMENT, - &gfx_pipeline->fragment_shader_state.uniform_state.pds_code, - &gfx_pipeline->fragment_shader_state.uniform_state.pds_info); + &gfx_pipeline->fragment_shader_state.descriptor_state.pds_code, + &gfx_pipeline->fragment_shader_state.descriptor_state.pds_info); if (result != VK_SUCCESS) - goto err_free_vertex_uniform_program; + goto err_free_vertex_descriptor_program; ralloc_free(ctx); @@ -1624,12 +1625,12 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device, return VK_SUCCESS; -err_free_vertex_uniform_program: - pvr_pds_uniform_program_destroy( +err_free_vertex_descriptor_program: + pvr_pds_descriptor_program_destroy( device, allocator, - &gfx_pipeline->vertex_shader_state.uniform_state.pds_code, - &gfx_pipeline->vertex_shader_state.uniform_state.pds_info); + &gfx_pipeline->vertex_shader_state.descriptor_state.pds_code, + &gfx_pipeline->vertex_shader_state.descriptor_state.pds_info); err_free_vertex_attrib_program: for (uint32_t i = 0; i < ARRAY_SIZE(gfx_pipeline->vertex_shader_state.pds_attrib_programs); diff --git a/src/imagination/vulkan/pvr_private.h b/src/imagination/vulkan/pvr_private.h index c33e40908b4..59db32b98cf 100644 --- a/src/imagination/vulkan/pvr_private.h +++ b/src/imagination/vulkan/pvr_private.h @@ -882,8 +882,8 @@ struct pvr_cmd_buffer_state { /* Address of data segment for vertex attrib upload program. */ uint32_t pds_vertex_attrib_offset; - uint32_t pds_fragment_uniform_data_offset; - uint32_t pds_compute_uniform_data_offset; + uint32_t pds_fragment_descriptor_data_offset; + uint32_t pds_compute_descriptor_data_offset; }; static_assert( @@ -962,7 +962,7 @@ struct pvr_pipeline_cache { struct pvr_device *device; }; -struct pvr_stage_allocation_uniform_state { +struct pvr_stage_allocation_descriptor_state { struct pvr_pds_upload pds_code; /* Since we upload the code segment separately from the data segment * pds_code->data_size might be 0 whilst @@ -1017,7 +1017,7 @@ struct pvr_vertex_shader_state { struct pvr_pipeline_stage_state stage_state; /* FIXME: Move this into stage_state? */ - struct pvr_stage_allocation_uniform_state uniform_state; + struct pvr_stage_allocation_descriptor_state descriptor_state; uint32_t vertex_input_size; uint32_t vertex_output_size; uint32_t user_clip_planes_mask; @@ -1030,7 +1030,7 @@ struct pvr_fragment_shader_state { struct pvr_pipeline_stage_state stage_state; /* FIXME: Move this into stage_state? */ - struct pvr_stage_allocation_uniform_state uniform_state; + struct pvr_stage_allocation_descriptor_state descriptor_state; uint32_t pass_type; struct pvr_pds_upload pds_coeff_program; @@ -1072,7 +1072,7 @@ struct pvr_compute_pipeline { uint32_t base_workgroup : 1; } flags; - struct pvr_stage_allocation_uniform_state uniform; + struct pvr_stage_allocation_descriptor_state descriptor; struct pvr_pds_upload primary_program; struct pvr_pds_info primary_program_info;