swr: Handle resource across context changes

Swr caches fb contents in tiles.  Those tiles are stored on a per-context
basis.

When switching contexts that share resources we need to make sure that
the tiles of the old context are being stored and the tiles of the new
context are being invalidated (marked as invalid, hence contents need
to be reloaded).

The context does not get any dirty bits to identify this case.  This has
to be, then, coordinated by the resources that are being shared between
the contexts.

Add a "curr_pipe" hook in swr_resource that will allow us to identify a
MakeCurrent of the above form during swr_update_derived().  At that time,
we invalidate the tiles of the new context.  The old context, will need to
have already store its tiles by that time, which happens during glFlush().
glFlush() is being called at the beginning of MakeCurrent.

So, the sequence of operations is:
- At the beginning of glXMakeCurrent(), glFlush() will store the tiles
  of all bound surfaces of the old context.
- After the store, a fence will guarantee that the all tile store make
  it to the surface
- During swr_update_derived(), when we validate the new context, we check
  all resources to see what changed, and if so, we invalidate the
  current tiles.

Fixes rendering problems with CEI/Ensight.

Reviewed-by: Bruce Cherniak <bruce.cherniak@intel.com>
This commit is contained in:
George Kyriazis 2017-09-13 21:06:44 -05:00
parent 016de7e155
commit b9aa0fa7d6
4 changed files with 69 additions and 9 deletions

View File

@ -365,10 +365,20 @@ swr_destroy(struct pipe_context *pipe)
util_blitter_destroy(ctx->blitter);
for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL);
if (ctx->framebuffer.cbufs[i]) {
struct swr_resource *res = swr_resource(ctx->framebuffer.cbufs[i]->texture);
/* NULL curr_pipe, so we don't have a reference to a deleted pipe */
res->curr_pipe = NULL;
pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL);
}
}
pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL);
if (ctx->framebuffer.zsbuf) {
struct swr_resource *res = swr_resource(ctx->framebuffer.zsbuf->texture);
/* NULL curr_pipe, so we don't have a reference to a deleted pipe */
res->curr_pipe = NULL;
pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL);
}
for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_FRAGMENT][i], NULL);

View File

@ -239,14 +239,17 @@ swr_flush(struct pipe_context *pipe,
{
struct swr_context *ctx = swr_context(pipe);
struct swr_screen *screen = swr_screen(pipe->screen);
struct pipe_surface *cb = ctx->framebuffer.cbufs[0];
/* If the current renderTarget is the display surface, store tiles back to
* the surface, in preparation for present (swr_flush_frontbuffer).
* Other renderTargets get stored back when attachment changes or
* swr_surface_destroy */
if (cb && swr_resource(cb->texture)->display_target)
swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);
for (int i=0; i < ctx->framebuffer.nr_cbufs; i++) {
struct pipe_surface *cb = ctx->framebuffer.cbufs[i];
if (cb) {
swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);
}
}
if (ctx->framebuffer.zsbuf) {
swr_store_dirty_resource(pipe, ctx->framebuffer.zsbuf->texture,
SWR_TILE_RESOLVED);
}
if (fence)
swr_fence_reference(pipe->screen, fence, screen->flush_fence);

View File

@ -54,6 +54,9 @@ struct swr_resource {
size_t secondary_mip_offsets[PIPE_MAX_TEXTURE_LEVELS];
enum swr_resource_status status;
/* last pipe that used (validated) this resource */
struct pipe_context *curr_pipe;
};

View File

@ -953,6 +953,47 @@ swr_change_rt(struct swr_context *ctx,
return need_fence;
}
/*
* for cases where resources are shared between contexts, invalidate
* this ctx's resource. so it can be fetched fresh. Old ctx's resource
* is already stored during a flush
*/
static inline void
swr_invalidate_buffers_after_ctx_change(struct pipe_context *pipe)
{
struct swr_context *ctx = swr_context(pipe);
for (uint32_t i = 0; i < ctx->framebuffer.nr_cbufs; i++) {
struct pipe_surface *cb = ctx->framebuffer.cbufs[i];
if (cb) {
struct swr_resource *res = swr_resource(cb->texture);
if (res->curr_pipe != pipe) {
/* if curr_pipe is NULL (first use), status should not be WRITE */
assert(res->curr_pipe || !(res->status & SWR_RESOURCE_WRITE));
if (res->status & SWR_RESOURCE_WRITE) {
swr_invalidate_render_target(pipe, i, cb->width, cb->height);
}
}
res->curr_pipe = pipe;
}
}
if (ctx->framebuffer.zsbuf) {
struct pipe_surface *zb = ctx->framebuffer.zsbuf;
if (zb) {
struct swr_resource *res = swr_resource(zb->texture);
if (res->curr_pipe != pipe) {
/* if curr_pipe is NULL (first use), status should not be WRITE */
assert(res->curr_pipe || !(res->status & SWR_RESOURCE_WRITE));
if (res->status & SWR_RESOURCE_WRITE) {
swr_invalidate_render_target(pipe, SWR_ATTACHMENT_DEPTH, zb->width, zb->height);
swr_invalidate_render_target(pipe, SWR_ATTACHMENT_STENCIL, zb->width, zb->height);
}
}
res->curr_pipe = pipe;
}
}
}
static inline void
swr_user_vbuf_range(const struct pipe_draw_info *info,
const struct swr_vertex_element_state *velems,
@ -1040,6 +1081,9 @@ swr_update_derived(struct pipe_context *pipe,
/* For example, user_buffer vertex and index buffers. */
unsigned post_update_dirty_flags = 0;
/* bring resources that changed context up-to-date */
swr_invalidate_buffers_after_ctx_change(pipe);
/* Render Targets */
if (ctx->dirty & SWR_NEW_FRAMEBUFFER) {
struct pipe_framebuffer_state *fb = &ctx->framebuffer;