iris: Remove render cache hash table-based synchronization.

The render cache hash table is now *mostly* redundant with the more
general seqno matrix-based cache tracking mechanism.  Most hash table
operations are now gone except for the format mismatch checks done in
iris_cache_flush_for_render().  Redundant code removed as a separate
patch for bisectability.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3875>
This commit is contained in:
Francisco Jerez 2020-02-18 22:39:43 -08:00 committed by Marge Bot
parent aa78d05a23
commit 74c774dce9
5 changed files with 7 additions and 59 deletions

View File

@ -399,8 +399,6 @@ iris_batch_reset(struct iris_batch *batch)
iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
iris_syncobj_reference(screen, &syncobj, NULL);
iris_cache_sets_clear(batch);
assert(!batch->sync_region_depth);
iris_batch_sync_boundary(batch);
iris_batch_mark_reset_sync(batch);

View File

@ -367,12 +367,6 @@ iris_blorp_exec(struct blorp_batch *blorp_batch,
ice->state.dirty |= ~skip_bits;
ice->state.stage_dirty |= ~skip_stage_bits;
if (params->dst.enabled) {
iris_render_cache_add_bo(batch, params->dst.addr.buffer,
params->dst.view.format,
params->dst.aux_usage);
}
if (params->src.enabled)
iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
IRIS_DOMAIN_OTHER_READ);

View File

@ -910,17 +910,12 @@ void iris_predraw_resolve_framebuffer(struct iris_context *ice,
bool *draw_aux_buffer_disabled);
void iris_postdraw_update_resolve_tracking(struct iris_context *ice,
struct iris_batch *batch);
void iris_cache_sets_clear(struct iris_batch *batch);
void iris_flush_depth_and_render_caches(struct iris_batch *batch);
void iris_cache_flush_for_read(struct iris_batch *batch, struct iris_bo *bo);
void iris_cache_flush_for_render(struct iris_batch *batch,
struct iris_bo *bo,
enum isl_format format,
enum isl_aux_usage aux_usage);
void iris_render_cache_add_bo(struct iris_batch *batch,
struct iris_bo *bo,
enum isl_format format,
enum isl_aux_usage aux_usage);
void iris_cache_flush_for_depth(struct iris_batch *batch, struct iris_bo *bo);
int iris_get_driver_query_info(struct pipe_screen *pscreen, unsigned index,
struct pipe_driver_query_info *info);

View File

@ -306,8 +306,7 @@ iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
if (render_batch->contains_draw ||
render_batch->cache.render->entries) {
if (render_batch->contains_draw) {
iris_batch_maybe_flush(render_batch, 48);
iris_emit_pipe_control_flush(render_batch,
"API: texture barrier (1/2)",
@ -353,8 +352,7 @@ iris_memory_barrier(struct pipe_context *ctx, unsigned flags)
}
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
if (ice->batches[i].contains_draw ||
ice->batches[i].cache.render->entries) {
if (ice->batches[i].contains_draw) {
iris_batch_maybe_flush(&ice->batches[i], 24);
iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",
bits);

View File

@ -319,9 +319,6 @@ iris_postdraw_update_resolve_tracking(struct iris_context *ice,
struct iris_resource *res = (void *) surf->base.texture;
enum isl_aux_usage aux_usage = ice->state.draw_aux_usage[i];
iris_render_cache_add_bo(batch, res->bo, surf->view.format,
aux_usage);
if (may_have_resolved_color) {
union pipe_surface_desc *desc = &surf->base.u;
unsigned num_layers =
@ -333,16 +330,6 @@ iris_postdraw_update_resolve_tracking(struct iris_context *ice,
}
}
/**
* Clear the cache-tracking sets.
*/
void
iris_cache_sets_clear(struct iris_batch *batch)
{
hash_table_foreach(batch->cache.render, render_entry)
_mesa_hash_table_remove(batch->cache.render, render_entry);
}
/**
* Emits an appropriate flush for a BO if it has been rendered to within the
* same batchbuffer as a read that's about to be emitted.
@ -368,17 +355,12 @@ iris_flush_depth_and_render_caches(struct iris_batch *batch)
"cache tracker: render-to-texture",
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_CONST_CACHE_INVALIDATE);
iris_cache_sets_clear(batch);
}
void
iris_cache_flush_for_read(struct iris_batch *batch,
struct iris_bo *bo)
{
if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo))
iris_flush_depth_and_render_caches(batch);
iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_OTHER_READ);
}
@ -421,38 +403,19 @@ iris_cache_flush_for_render(struct iris_batch *batch,
*/
struct hash_entry *entry =
_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
if (entry && entry->data != format_aux_tuple(format, aux_usage))
if (!entry) {
_mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo,
format_aux_tuple(format, aux_usage));
} else if (entry->data != format_aux_tuple(format, aux_usage)) {
iris_flush_depth_and_render_caches(batch);
}
void
iris_render_cache_add_bo(struct iris_batch *batch,
struct iris_bo *bo,
enum isl_format format,
enum isl_aux_usage aux_usage)
{
#ifndef NDEBUG
struct hash_entry *entry =
_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
if (entry) {
/* Otherwise, someone didn't do a flush_for_render and that would be
* very bad indeed.
*/
assert(entry->data == format_aux_tuple(format, aux_usage));
entry->data = format_aux_tuple(format, aux_usage);
}
#endif
_mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo,
format_aux_tuple(format, aux_usage));
}
void
iris_cache_flush_for_depth(struct iris_batch *batch,
struct iris_bo *bo)
{
if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo))
iris_flush_depth_and_render_caches(batch);
iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_DEPTH_WRITE);
}