zink: explicitly unset unordered_exec when resource can't be unordered

ensure that subsequent commands can't be promoted

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17667>
This commit is contained in:
Mike Blumenkrantz 2022-07-14 12:26:55 -04:00 committed by Marge Bot
parent e5c4c33fa6
commit f0f0611f40
5 changed files with 36 additions and 6 deletions

View File

@ -372,6 +372,8 @@ zink_blit(struct pipe_context *pctx,
util_blitter_blit(ctx->blitter, info);
}
end:
src->obj->unordered_exec = false;
dst->obj->unordered_exec = false;
if (needs_present_readback)
zink_kopper_present_readback(ctx, src);
}

View File

@ -498,6 +498,7 @@ zink_clear_buffer(struct pipe_context *pctx,
zink_batch_reference_resource_rw(batch, res, true);
util_range_add(&res->base.b, &res->valid_buffer_range, offset, offset + size);
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
res->obj->unordered_exec = false;
VKCTX(CmdFillBuffer)(batch->state->cmdbuf, res->obj->buffer, offset, size, *(uint32_t*)clear_value);
return;
}

View File

@ -1144,6 +1144,7 @@ zink_set_vertex_buffers(struct pipe_context *pctx,
/* always barrier before possible rebind */
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
res->obj->unordered_exec = false;
} else {
enabled_buffers &= ~BITFIELD_BIT(start_slot + i);
}
@ -1295,6 +1296,7 @@ zink_set_constant_buffer(struct pipe_context *pctx,
zink_batch_resource_usage_set(&ctx->batch, new_res, false);
zink_resource_buffer_barrier(ctx, new_res, VK_ACCESS_UNIFORM_READ_BIT,
new_res->gfx_barrier);
new_res->obj->unordered_exec = false;
}
update |= ((index || zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY) && ctx->ubos[shader][index].buffer_offset != offset) ||
!!res != !!buffer || (res && res->obj->buffer != new_res->obj->buffer) ||
@ -1415,6 +1417,7 @@ zink_set_shader_buffers(struct pipe_context *pctx,
update = true;
max_slot = MAX2(max_slot, start_slot + i);
update_descriptor_state_ssbo(ctx, p_stage, start_slot + i, new_res);
new_res->obj->unordered_exec = false;
} else {
update = !!res;
ssbo->buffer_offset = 0;
@ -1643,6 +1646,7 @@ zink_set_shader_images(struct pipe_context *pctx,
zink_resource_access_is_write(access));
update = true;
update_descriptor_state_image(ctx, p_stage, start_slot + i, res);
res->obj->unordered_exec = false;
res->image_binds[p_stage] |= BITFIELD_BIT(start_slot + i);
} else if (image_view->base.resource) {
update = true;
@ -1765,6 +1769,7 @@ zink_set_sampler_views(struct pipe_context *pctx,
}
res->sampler_binds[shader_type] |= BITFIELD_BIT(start_slot + i);
zink_batch_resource_usage_set(&ctx->batch, res, false);
res->obj->unordered_exec = false;
} else if (a) {
unbind_samplerview(ctx, shader_type, start_slot + i);
update = true;
@ -1929,6 +1934,7 @@ zink_make_texture_handle_resident(struct pipe_context *pctx, uint64_t handle, bo
util_dynarray_append(&ctx->di.bindless[0].resident, struct zink_bindless_descriptor *, bd);
uint32_t h = is_buffer ? handle + ZINK_MAX_BINDLESS_HANDLES : handle;
util_dynarray_append(&ctx->di.bindless[0].updates, uint32_t, h);
res->obj->unordered_exec = false;
} else {
zero_bindless_descriptor(ctx, handle, is_buffer, false);
util_dynarray_delete_unordered(&ctx->di.bindless[0].resident, struct zink_bindless_descriptor *, bd);
@ -2047,6 +2053,7 @@ zink_make_image_handle_resident(struct pipe_context *pctx, uint64_t handle, unsi
util_dynarray_append(&ctx->di.bindless[1].resident, struct zink_bindless_descriptor *, bd);
uint32_t h = is_buffer ? handle + ZINK_MAX_BINDLESS_HANDLES : handle;
util_dynarray_append(&ctx->di.bindless[1].updates, uint32_t, h);
res->obj->unordered_exec = false;
} else {
zero_bindless_descriptor(ctx, handle, is_buffer, true);
util_dynarray_delete_unordered(&ctx->di.bindless[1].resident, struct zink_bindless_descriptor *, bd);
@ -2433,6 +2440,7 @@ zink_prep_fb_attachment(struct zink_context *ctx, struct zink_surface *surf, uns
layout = zink_render_pass_attachment_get_barrier_info(&rt, i < ctx->fb_state.nr_cbufs, &pipeline, &access);
}
zink_resource_image_barrier(ctx, res, layout, access, pipeline);
res->obj->unordered_exec = false;
if (i == ctx->fb_state.nr_cbufs && res->sampler_bind_count[0])
update_res_sampler_layouts(ctx, res);
return surf->image_view;
@ -2563,6 +2571,7 @@ update_resource_refs_for_stage(struct zink_context *ctx, enum pipe_shader_type s
continue;
bool is_write = zink_resource_access_is_write(get_access_flags_for_binding(ctx, i, stage, j));
zink_batch_resource_usage_set(batch, res, is_write);
res->obj->unordered_exec = false;
struct zink_sampler_view *sv = zink_sampler_view(ctx->sampler_views[stage][j]);
struct zink_sampler_state *sampler_state = ctx->sampler_states[stage][j];
@ -2604,8 +2613,11 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
unsigned vertex_buffers_enabled_mask = ctx->gfx_pipeline_state.vertex_buffers_enabled_mask;
unsigned last_vbo = util_last_bit(vertex_buffers_enabled_mask);
for (unsigned i = 0; i < last_vbo + 1; i++) {
if (ctx->vertex_buffers[i].buffer.resource)
zink_batch_resource_usage_set(batch, zink_resource(ctx->vertex_buffers[i].buffer.resource), false);
struct zink_resource *res = zink_resource(ctx->vertex_buffers[i].buffer.resource);
if (res) {
zink_batch_resource_usage_set(batch, res, false);
res->obj->unordered_exec = false;
}
}
if (ctx->curr_program)
zink_batch_reference_program(batch, &ctx->curr_program->base);
@ -2616,6 +2628,7 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
util_dynarray_foreach(&ctx->di.bindless[i].resident, struct zink_bindless_descriptor*, bd) {
struct zink_resource *res = zink_descriptor_surface_resource(&(*bd)->ds);
zink_batch_resource_usage_set(&ctx->batch, res, (*bd)->access & PIPE_IMAGE_ACCESS_WRITE);
res->obj->unordered_exec = false;
}
}
}
@ -3890,6 +3903,7 @@ zink_set_stream_output_targets(struct pipe_context *pctx,
if (so) {
so->so_bind_count++;
update_res_bind_count(ctx, so, false, false);
so->obj->unordered_exec = false;
}
}
for (unsigned i = num_targets; i < ctx->num_so_targets; i++) {

View File

@ -40,6 +40,7 @@ zink_emit_xfb_counter_barrier(struct zink_context *ctx)
stage |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
}
zink_resource_buffer_barrier(ctx, res, access, stage);
res->obj->unordered_exec = false;
}
}
@ -85,6 +86,7 @@ check_buffer_barrier(struct zink_context *ctx, struct pipe_resource *pres, VkAcc
{
struct zink_resource *res = zink_resource(pres);
zink_resource_buffer_barrier(ctx, res, flags, pipeline);
res->obj->unordered_exec = false;
}
ALWAYS_INLINE static void
@ -343,6 +345,7 @@ update_barriers(struct zink_context *ctx, bool is_compute,
if (layout != res->layout)
zink_resource_image_barrier(ctx, res, layout, res->barrier_access[is_compute], pipeline);
}
res->obj->unordered_exec = false;
/* always barrier on draw if this resource has either multiple image write binds or
* image write binds and image read binds
*/
@ -477,9 +480,12 @@ zink_draw(struct pipe_context *pctx,
*/
for (unsigned i = 0; i < ctx->num_so_targets; i++) {
struct zink_so_target *t = (struct zink_so_target *)ctx->so_targets[i];
if (t)
zink_resource_buffer_barrier(ctx, zink_resource(t->base.buffer),
if (t) {
struct zink_resource *res = zink_resource(t->base.buffer);
zink_resource_buffer_barrier(ctx, res,
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
res->obj->unordered_exec = false;
}
}
}
}
@ -491,10 +497,13 @@ zink_draw(struct pipe_context *pctx,
/* ensure synchronization between doing streamout with counter buffer
* and using counter buffer for indirect draw
*/
if (so_target && so_target->counter_buffer_valid)
zink_resource_buffer_barrier(ctx, zink_resource(so_target->counter_buffer),
if (so_target && so_target->counter_buffer_valid) {
struct zink_resource *res = zink_resource(so_target->counter_buffer);
zink_resource_buffer_barrier(ctx, res,
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
res->obj->unordered_exec = false;
}
zink_query_update_gs_states(ctx, dinfo->was_line_loop);
@ -766,6 +775,7 @@ zink_draw(struct pipe_context *pctx,
struct zink_resource *res = zink_resource(t->counter_buffer);
t->stride = ctx->last_vertex_stage->sinfo.so_info.stride[i] * sizeof(uint32_t);
zink_batch_reference_resource_rw(batch, res, true);
res->obj->unordered_exec = false;
if (t->counter_buffer_valid) {
counter_buffers[i] = res->obj->buffer;
counter_buffer_offsets[i] = t->counter_buffer_offset;
@ -882,6 +892,7 @@ zink_draw_vertex_state(struct pipe_context *pctx,
struct zink_resource *res = zink_resource(vstate->input.vbuffer.buffer.resource);
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
res->obj->unordered_exec = false;
struct zink_vertex_elements_hw_state *hw_state = ctx->gfx_pipeline_state.element_state;
ctx->gfx_pipeline_state.element_state = &((struct zink_vertex_state*)vstate)->velems.hw_state;

View File

@ -732,6 +732,7 @@ copy_pool_results_to_buffer(struct zink_context *ctx, struct zink_query *query,
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
util_range_add(&res->base.b, &res->valid_buffer_range, offset, offset + result_size);
assert(query_id < NUM_QUERIES);
res->obj->unordered_exec = false;
VKCTX(CmdCopyQueryPoolResults)(batch->state->cmdbuf, pool, query_id, num_results, res->obj->buffer,
offset, base_result_size, flags);
}
@ -1134,6 +1135,7 @@ zink_start_conditional_render(struct zink_context *ctx)
begin_info.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
begin_info.buffer = ctx->render_condition.query->predicate->obj->buffer;
begin_info.flags = begin_flags;
ctx->render_condition.query->predicate->obj->unordered_exec = false;
VKCTX(CmdBeginConditionalRenderingEXT)(batch->state->cmdbuf, &begin_info);
zink_batch_reference_resource_rw(batch, ctx->render_condition.query->predicate, false);
ctx->render_condition.active = true;