diff --git a/src/gallium/drivers/iris/iris_query.c b/src/gallium/drivers/iris/iris_query.c index 6a1ab643bcc..d039f86a6d1 100644 --- a/src/gallium/drivers/iris/iris_query.c +++ b/src/gallium/drivers/iris/iris_query.c @@ -712,7 +712,7 @@ iris_get_query_result_resource(struct pipe_context *ctx, bool predicated = !wait && !q->stalled; struct mi_builder b; - mi_builder_init(&b, batch); + mi_builder_init(&b, &batch->screen->devinfo, batch); iris_batch_sync_region_start(batch); @@ -783,7 +783,7 @@ set_predicate_for_result(struct iris_context *ice, q->stalled = true; struct mi_builder b; - mi_builder_init(&b, batch); + mi_builder_init(&b, &batch->screen->devinfo, batch); struct mi_value result; diff --git a/src/gallium/drivers/iris/iris_state.c b/src/gallium/drivers/iris/iris_state.c index e805142c1ac..4e354a4ba3d 100644 --- a/src/gallium/drivers/iris/iris_state.c +++ b/src/gallium/drivers/iris/iris_state.c @@ -6647,7 +6647,7 @@ iris_upload_render_state(struct iris_context *ice, if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) { struct mi_builder b; - mi_builder_init(&b, batch); + mi_builder_init(&b, &batch->screen->devinfo, batch); /* comparison = draw id < draw count */ struct mi_value comparison = @@ -6733,7 +6733,7 @@ iris_upload_render_state(struct iris_context *ice, PIPE_CONTROL_CS_STALL); struct mi_builder b; - mi_builder_init(&b, batch); + mi_builder_init(&b, &batch->screen->devinfo, batch); struct iris_address addr = ro_bo(iris_resource_bo(so->offset.res), so->offset.offset); diff --git a/src/intel/common/mi_builder.h b/src/intel/common/mi_builder.h index e3d9b62df6a..9e76e11156a 100644 --- a/src/intel/common/mi_builder.h +++ b/src/intel/common/mi_builder.h @@ -24,6 +24,7 @@ #ifndef MI_BUILDER_H #define MI_BUILDER_H +#include "dev/gen_device_info.h" #include "genxml/genX_bits.h" #include "util/bitscan.h" #include "util/fast_idiv_by_const.h" @@ -128,6 +129,7 @@ mi_adjust_reg_num(uint32_t reg) #endif struct mi_builder { + const struct gen_device_info *devinfo; __gen_user_data *user_data; #if GEN_VERSIONx10 >= 75 @@ -140,9 +142,12 @@ struct mi_builder { }; static inline void -mi_builder_init(struct mi_builder *b, __gen_user_data *user_data) +mi_builder_init(struct mi_builder *b, + const struct gen_device_info *devinfo, + __gen_user_data *user_data) { memset(b, 0, sizeof(*b)); + b->devinfo = devinfo; b->user_data = user_data; #if GEN_VERSIONx10 >= 75 @@ -1168,7 +1173,7 @@ mi_self_mod_barrier(struct mi_builder *b) * but experiment show it doesn't work properly, so for now just get over * the CS prefetch. */ - for (uint32_t i = 0; i < 128; i++) + for (uint32_t i = 0; i < (b->devinfo->cs_prefetch_size / 4); i++) mi_builder_emit(b, GENX(MI_NOOP), noop); } diff --git a/src/intel/common/tests/mi_builder_test.cpp b/src/intel/common/tests/mi_builder_test.cpp index 0d6d5a072d1..40ad9996736 100644 --- a/src/intel/common/tests/mi_builder_test.cpp +++ b/src/intel/common/tests/mi_builder_test.cpp @@ -282,7 +282,7 @@ mi_builder_test::SetUp() memset(data_map, 139, DATA_BO_SIZE); memset(&canary, 139, sizeof(canary)); - mi_builder_init(&b, this); + mi_builder_init(&b, &devinfo, this); } void * diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index e887aa8f97f..22977f2e1f6 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -471,7 +471,7 @@ anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer, genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); for (uint32_t a = 0; a < layer_count; a++) { const uint32_t layer = base_layer + a; @@ -769,7 +769,7 @@ anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer *cmd_buffer, enum anv_fast_clear_type fast_clear_supported) { struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); const struct mi_value fast_clear_type = mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device, @@ -852,7 +852,7 @@ anv_cmd_simple_resolve_predicate(struct anv_cmd_buffer *cmd_buffer, enum anv_fast_clear_type fast_clear_supported) { struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); struct mi_value fast_clear_type_mem = mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device, @@ -1064,7 +1064,7 @@ genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer, #endif struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); if (copy_from_surface_state) { mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size); @@ -1809,7 +1809,7 @@ genX(CmdExecuteCommands)( * regardless of conditional rendering being enabled in primary. */ struct mi_builder b; - mi_builder_init(&b, &primary->batch); + mi_builder_init(&b, &primary->device->info, &primary->batch); mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG), mi_imm(UINT64_MAX)); } @@ -3817,7 +3817,7 @@ void genX(CmdDrawIndirectByteCountEXT)( instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); struct mi_value count = mi_mem32(anv_address_add(counter_buffer->address, counterBufferOffset)); @@ -3847,7 +3847,7 @@ load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer, bool indexed) { struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_store(&b, mi_reg32(GEN7_3DPRIM_VERTEX_COUNT), mi_mem32(anv_address_add(addr, 0))); @@ -4084,7 +4084,7 @@ void genX(CmdDrawIndirectCount)( genX(cmd_buffer_flush_state)(cmd_buffer); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); struct anv_address count_address = anv_address_add(count_buffer->address, countBufferOffset); struct mi_value max = @@ -4155,7 +4155,7 @@ void genX(CmdDrawIndexedIndirectCount)( genX(cmd_buffer_flush_state)(cmd_buffer); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); struct anv_address count_address = anv_address_add(count_buffer->address, countBufferOffset); struct mi_value max = @@ -4631,7 +4631,7 @@ void genX(CmdDispatchIndirect)( genX(cmd_buffer_flush_compute_state)(cmd_buffer); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); struct mi_value size_x = mi_mem32(anv_address_add(addr, 0)); struct mi_value size_y = mi_mem32(anv_address_add(addr, 4)); @@ -6073,7 +6073,7 @@ genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer) { #if GEN_VERSIONx10 >= 75 struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), mi_reg32(ANV_PREDICATE_RESULT_REG)); @@ -6106,7 +6106,7 @@ void genX(CmdBeginConditionalRenderingEXT)( genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); /* Section 19.4 of the Vulkan 1.1.85 spec says: * diff --git a/src/intel/vulkan/genX_query.c b/src/intel/vulkan/genX_query.c index ab7857024b1..89d61857858 100644 --- a/src/intel/vulkan/genX_query.c +++ b/src/intel/vulkan/genX_query.c @@ -225,7 +225,7 @@ VkResult genX(CreateQueryPool)( }; batch.next = batch.start; - mi_builder_init(&b, &batch); + mi_builder_init(&b, &device->info, &batch); mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG), mi_imm(p * pool->pass_size)); anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe); @@ -747,7 +747,7 @@ void genX(CmdResetQueryPool)( case VK_QUERY_TYPE_PIPELINE_STATISTICS: case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: { struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); for (uint32_t i = 0; i < queryCount; i++) emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false); @@ -757,7 +757,7 @@ void genX(CmdResetQueryPool)( #if GEN_GEN >= 8 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: { struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); for (uint32_t i = 0; i < queryCount; i++) { for (uint32_t p = 0; p < pool->n_passes; p++) { @@ -773,7 +773,7 @@ void genX(CmdResetQueryPool)( case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: { struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); for (uint32_t i = 0; i < queryCount; i++) emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false); @@ -911,7 +911,7 @@ void genX(CmdBeginQueryIndexedEXT)( struct anv_address query_addr = anv_query_address(pool, query); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: @@ -1088,7 +1088,7 @@ void genX(CmdEndQueryIndexedEXT)( struct anv_address query_addr = anv_query_address(pool, query); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: @@ -1252,7 +1252,7 @@ void genX(CmdWriteTimestamp)( assert(pool->type == VK_QUERY_TYPE_TIMESTAMP); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); switch (pipelineStage) { case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: @@ -1369,7 +1369,7 @@ void genX(CmdCopyQueryPoolResults)( ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer); struct mi_builder b; - mi_builder_init(&b, &cmd_buffer->batch); + mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); struct mi_value result; /* If render target writes are ongoing, request a render target cache flush