vl: use a separate context for shader based decode v2

This makes VDPAU thread save again.

v2: fix some memory leaks reported by Aaron Watry.

Signed-off-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
Christian König 2013-11-03 15:19:00 +01:00
parent cb3c57df3a
commit 754eb6a67d
2 changed files with 124 additions and 61 deletions

View File

@ -82,6 +82,65 @@ static const unsigned const_empty_block_mask_420[3][2][2] = {
{ { 0x01, 0x01 }, { 0x01, 0x01 } }
};
struct video_buffer_private
{
struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
struct pipe_surface *surfaces[VL_MAX_SURFACES];
struct vl_mpeg12_buffer *buffer;
};
static void
vl_mpeg12_destroy_buffer(struct vl_mpeg12_buffer *buf);
static void
destroy_video_buffer_private(void *private)
{
struct video_buffer_private *priv = private;
unsigned i;
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
pipe_sampler_view_reference(&priv->sampler_view_planes[i], NULL);
for (i = 0; i < VL_MAX_SURFACES; ++i)
pipe_surface_reference(&priv->surfaces[i], NULL);
if (priv->buffer)
vl_mpeg12_destroy_buffer(priv->buffer);
FREE(priv);
}
static struct video_buffer_private *
get_video_buffer_private(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *buf)
{
struct pipe_context *pipe = dec->context;
struct video_buffer_private *priv;
struct pipe_sampler_view **sv;
struct pipe_surface **surf;
unsigned i;
priv = vl_video_buffer_get_associated_data(buf, &dec->base);
if (priv)
return priv;
priv = CALLOC_STRUCT(video_buffer_private);
sv = buf->get_sampler_view_planes(buf);
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
if (sv[i])
priv->sampler_view_planes[i] = pipe->create_sampler_view(pipe, sv[i]->texture, sv[i]);
surf = buf->get_surfaces(buf);
for (i = 0; i < VL_MAX_SURFACES; ++i)
if (surf[i])
priv->surfaces[i] = pipe->create_surface(pipe, surf[i]->texture, surf[i]);
vl_video_buffer_set_associated_data(buf, &dec->base, priv, destroy_video_buffer_private);
return priv;
}
static bool
init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
{
@ -103,7 +162,7 @@ init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer
res_tmpl.usage = PIPE_USAGE_STREAM;
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
res = dec->base.context->screen->resource_create(dec->base.context->screen, &res_tmpl);
res = dec->context->screen->resource_create(dec->context->screen, &res_tmpl);
if (!res)
goto error_source;
@ -111,7 +170,7 @@ init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer
memset(&sv_tmpl, 0, sizeof(sv_tmpl));
u_sampler_view_default_template(&sv_tmpl, res, res->format);
sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED;
buffer->zscan_source = dec->base.context->create_sampler_view(dec->base.context, res, &sv_tmpl);
buffer->zscan_source = dec->context->create_sampler_view(dec->context, res, &sv_tmpl);
pipe_resource_reference(&res, NULL);
if (!buffer->zscan_source)
goto error_sampler;
@ -384,9 +443,8 @@ UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
}
static void
vl_mpeg12_destroy_buffer(void *buffer)
vl_mpeg12_destroy_buffer(struct vl_mpeg12_buffer *buf)
{
struct vl_mpeg12_buffer *buf = buffer;
assert(buf);
@ -407,11 +465,11 @@ vl_mpeg12_destroy(struct pipe_video_codec *decoder)
assert(decoder);
/* Asserted in softpipe_delete_fs_state() for some reason */
dec->base.context->bind_vs_state(dec->base.context, NULL);
dec->base.context->bind_fs_state(dec->base.context, NULL);
dec->context->bind_vs_state(dec->context, NULL);
dec->context->bind_fs_state(dec->context, NULL);
dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
dec->context->delete_depth_stencil_alpha_state(dec->context, dec->dsa);
dec->context->delete_sampler_state(dec->context, dec->sampler_ycbcr);
vl_mc_cleanup(&dec->mc_y);
vl_mc_cleanup(&dec->mc_c);
@ -426,8 +484,8 @@ vl_mpeg12_destroy(struct pipe_video_codec *decoder)
vl_zscan_cleanup(&dec->zscan_y);
vl_zscan_cleanup(&dec->zscan_c);
dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
dec->context->delete_vertex_elements_state(dec->context, dec->ves_ycbcr);
dec->context->delete_vertex_elements_state(dec->context, dec->ves_mv);
pipe_resource_reference(&dec->quads.buffer, NULL);
pipe_resource_reference(&dec->pos.buffer, NULL);
@ -440,19 +498,22 @@ vl_mpeg12_destroy(struct pipe_video_codec *decoder)
if (dec->dec_buffers[i])
vl_mpeg12_destroy_buffer(dec->dec_buffers[i]);
dec->context->destroy(dec->context);
FREE(dec);
}
static struct vl_mpeg12_buffer *
vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *target)
{
struct video_buffer_private *priv;
struct vl_mpeg12_buffer *buffer;
assert(dec);
buffer = vl_video_buffer_get_associated_data(target, &dec->base);
if (buffer)
return buffer;
priv = get_video_buffer_private(dec, target);
if (priv->buffer)
return priv->buffer;
buffer = dec->dec_buffers[dec->current_buffer];
if (buffer)
@ -462,7 +523,7 @@ vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buf
if (buffer == NULL)
return NULL;
if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
if (!vl_vb_init(&buffer->vertex_stream, dec->context,
dec->base.width / VL_MACROBLOCK_WIDTH,
dec->base.height / VL_MACROBLOCK_HEIGHT))
goto error_vertex_buffer;
@ -481,8 +542,7 @@ vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buf
vl_mpg12_bs_init(&buffer->bs, &dec->base);
if (dec->base.expect_chunked_decode)
vl_video_buffer_set_associated_data(target, &dec->base,
buffer, vl_mpeg12_destroy_buffer);
priv->buffer = buffer;
else
dec->dec_buffers[dec->current_buffer] = buffer;
@ -539,17 +599,17 @@ vl_mpeg12_begin_frame(struct pipe_video_codec *decoder,
vl_zscan_upload_quant(zscan, &buf->zscan[i], non_intra_matrix, false);
}
vl_vb_map(&buf->vertex_stream, dec->base.context);
vl_vb_map(&buf->vertex_stream, dec->context);
tex = buf->zscan_source->texture;
rect.width = tex->width0;
rect.height = tex->height0;
buf->texels =
dec->base.context->transfer_map(dec->base.context, tex, 0,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE,
&rect, &buf->tex_transfer);
dec->context->transfer_map(dec->context, tex, 0,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE,
&rect, &buf->tex_transfer);
buf->block_num = 0;
@ -685,23 +745,23 @@ vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
buf = vl_mpeg12_get_decode_buffer(dec, target);
vl_vb_unmap(&buf->vertex_stream, dec->base.context);
vl_vb_unmap(&buf->vertex_stream, dec->context);
dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
dec->context->transfer_unmap(dec->context, buf->tex_transfer);
vb[0] = dec->quads;
vb[1] = dec->pos;
target_surfaces = target->get_surfaces(target);
target_surfaces = get_video_buffer_private(dec, target)->surfaces;
for (i = 0; i < VL_MAX_REF_FRAMES; ++i) {
if (desc->ref[i])
ref_frames[i] = desc->ref[i]->get_sampler_view_planes(desc->ref[i]);
ref_frames[i] = get_video_buffer_private(dec, desc->ref[i])->sampler_view_planes;
else
ref_frames[i] = NULL;
}
dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
dec->context->bind_vertex_elements_state(dec->context, dec->ves_mv);
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
if (!target_surfaces[i]) continue;
@ -711,18 +771,18 @@ vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
if (!ref_frames[j] || !ref_frames[j][i]) continue;
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
dec->base.context->set_vertex_buffers(dec->base.context, 0, 3, vb);
dec->context->set_vertex_buffers(dec->context, 0, 3, vb);
vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
}
}
dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
dec->context->bind_vertex_elements_state(dec->context, dec->ves_ycbcr);
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
if (!buf->num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
dec->base.context->set_vertex_buffers(dec->base.context, 0, 2, vb);
dec->context->set_vertex_buffers(dec->context, 0, 2, vb);
vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
@ -741,21 +801,22 @@ vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
if (!buf->num_ycbcr_blocks[plane]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
dec->base.context->set_vertex_buffers(dec->base.context, 0, 2, vb);
dec->context->set_vertex_buffers(dec->context, 0, 2, vb);
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);
else {
dec->base.context->set_sampler_views(dec->base.context,
PIPE_SHADER_FRAGMENT, 0, 1,
&mc_source_sv[plane]);
dec->base.context->bind_sampler_states(dec->base.context,
PIPE_SHADER_FRAGMENT,
0, 1, &dec->sampler_ycbcr);
dec->context->set_sampler_views(dec->context,
PIPE_SHADER_FRAGMENT, 0, 1,
&mc_source_sv[plane]);
dec->context->bind_sampler_states(dec->context,
PIPE_SHADER_FRAGMENT,
0, 1, &dec->sampler_ycbcr);
}
vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[plane]);
}
}
dec->context->flush(dec->context, NULL, 0);
++dec->current_buffer;
dec->current_buffer %= 4;
}
@ -793,8 +854,8 @@ init_pipe_state(struct vl_mpeg12_decoder *dec)
dsa.alpha.enabled = 0;
dsa.alpha.func = PIPE_FUNC_ALWAYS;
dsa.alpha.ref_value = 0;
dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
dec->dsa = dec->context->create_depth_stencil_alpha_state(dec->context, &dsa);
dec->context->bind_depth_stencil_alpha_state(dec->context, dec->dsa);
memset(&sampler, 0, sizeof(sampler));
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
@ -806,7 +867,7 @@ init_pipe_state(struct vl_mpeg12_decoder *dec)
sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
sampler.compare_func = PIPE_FUNC_ALWAYS;
sampler.normalized_coords = 1;
dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
dec->sampler_ycbcr = dec->context->create_sampler_state(dec->context, &sampler);
if (!dec->sampler_ycbcr)
return false;
@ -821,7 +882,7 @@ find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config con
assert(dec);
screen = dec->base.context->screen;
screen = dec->context->screen;
for (i = 0; i < num_configs; ++i) {
if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
@ -855,17 +916,17 @@ init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_con
assert(dec);
dec->zscan_source_format = format_config->zscan_source_format;
dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
dec->zscan_linear = vl_zscan_layout(dec->context, vl_zscan_linear, dec->blocks_per_line);
dec->zscan_normal = vl_zscan_layout(dec->context, vl_zscan_normal, dec->blocks_per_line);
dec->zscan_alternate = vl_zscan_layout(dec->context, vl_zscan_alternate, dec->blocks_per_line);
num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
if (!vl_zscan_init(&dec->zscan_y, dec->context, dec->base.width, dec->base.height,
dec->blocks_per_line, dec->num_blocks, num_channels))
return false;
if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
if (!vl_zscan_init(&dec->zscan_c, dec->context, dec->chroma_width, dec->chroma_height,
dec->blocks_per_line, dec->num_blocks, num_channels))
return false;
@ -881,14 +942,14 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
struct pipe_sampler_view *matrix = NULL;
nr_of_idct_render_targets = dec->base.context->screen->get_param
nr_of_idct_render_targets = dec->context->screen->get_param
(
dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
dec->context->screen, PIPE_CAP_MAX_RENDER_TARGETS
);
max_inst = dec->base.context->screen->get_shader_param
max_inst = dec->context->screen->get_shader_param
(
dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
dec->context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
);
// Just assume we need 32 inst per render target, not 100% true, but should work in most cases
@ -905,7 +966,7 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
templat.chroma_format = dec->base.chroma_format;
dec->idct_source = vl_video_buffer_create_ex
(
dec->base.context, &templat,
dec->context, &templat,
formats, 1, 1, PIPE_USAGE_STATIC
);
@ -919,21 +980,21 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
templat.chroma_format = dec->base.chroma_format;
dec->mc_source = vl_video_buffer_create_ex
(
dec->base.context, &templat,
dec->context, &templat,
formats, nr_of_idct_render_targets, 1, PIPE_USAGE_STATIC
);
if (!dec->mc_source)
goto error_mc_source;
if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
if (!(matrix = vl_idct_upload_matrix(dec->context, format_config->idct_scale)))
goto error_matrix;
if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
if (!vl_idct_init(&dec->idct_y, dec->context, dec->base.width, dec->base.height,
nr_of_idct_render_targets, matrix, matrix))
goto error_y;
if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
if(!vl_idct_init(&dec->idct_c, dec->context, dec->chroma_width, dec->chroma_height,
nr_of_idct_render_targets, matrix, matrix))
goto error_c;
@ -970,7 +1031,7 @@ init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_
templat.chroma_format = dec->base.chroma_format;
dec->mc_source = vl_video_buffer_create_ex
(
dec->base.context, &templat,
dec->context, &templat,
formats, 1, 1, PIPE_USAGE_STATIC
);
@ -1037,6 +1098,7 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
dec->base = *templat;
dec->base.context = context;
dec->context = context->screen->context_create(context->screen, NULL);
dec->base.destroy = vl_mpeg12_destroy;
dec->base.begin_frame = vl_mpeg12_begin_frame;
@ -1066,15 +1128,15 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
dec->num_blocks = dec->num_blocks * 3;
}
dec->quads = vl_vb_upload_quads(dec->base.context);
dec->quads = vl_vb_upload_quads(dec->context);
dec->pos = vl_vb_upload_pos(
dec->base.context,
dec->context,
dec->base.width / VL_MACROBLOCK_WIDTH,
dec->base.height / VL_MACROBLOCK_HEIGHT
);
dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->context);
dec->ves_mv = vl_vb_get_ves_mv(dec->context);
switch (templat->entrypoint) {
case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
@ -1111,13 +1173,13 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
goto error_sources;
}
if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
if (!vl_mc_init(&dec->mc_y, dec->context, dec->base.width, dec->base.height,
VL_MACROBLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_y;
// TODO
if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
if (!vl_mc_init(&dec->mc_c, dec->context, dec->base.width, dec->base.height,
VL_BLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_c;

View File

@ -44,6 +44,7 @@ struct pipe_context;
struct vl_mpeg12_decoder
{
struct pipe_video_codec base;
struct pipe_context *context;
unsigned chroma_width, chroma_height;