From fcdf50f74befad8d89eb3f9cdfd88b82d1daa98c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 7 Apr 2011 20:10:55 +0200 Subject: [PATCH] [g3dvl] add support for different decoding entry points --- src/gallium/auxiliary/vl/vl_context.c | 5 +- src/gallium/auxiliary/vl/vl_idct.c | 6 +- src/gallium/auxiliary/vl/vl_idct.h | 2 +- src/gallium/auxiliary/vl/vl_mpeg12_decoder.c | 148 ++++++++++-------- src/gallium/auxiliary/vl/vl_mpeg12_decoder.h | 3 +- .../auxiliary/vl/vl_mpeg12_mc_renderer.c | 14 +- .../auxiliary/vl/vl_mpeg12_mc_renderer.h | 3 +- src/gallium/include/pipe/p_defines.h | 7 + src/gallium/include/pipe/p_video_context.h | 2 + .../state_trackers/xorg/xvmc/context.c | 5 +- 10 files changed, 113 insertions(+), 82 deletions(-) diff --git a/src/gallium/auxiliary/vl/vl_context.c b/src/gallium/auxiliary/vl/vl_context.c index 5a55f3b9607..2bc027fedc5 100644 --- a/src/gallium/auxiliary/vl/vl_context.c +++ b/src/gallium/auxiliary/vl/vl_context.c @@ -173,6 +173,7 @@ error_map: static struct pipe_video_decoder * vl_context_create_decoder(struct pipe_video_context *context, enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint, enum pipe_video_chroma_format chroma_format, unsigned width, unsigned height) { @@ -187,8 +188,8 @@ vl_context_create_decoder(struct pipe_video_context *context, switch (u_reduce_video_profile(profile)) { case PIPE_VIDEO_CODEC_MPEG12: - return vl_create_mpeg12_decoder(context, ctx->pipe, profile, chroma_format, - buffer_width, buffer_height); + return vl_create_mpeg12_decoder(context, ctx->pipe, profile, entrypoint, + chroma_format, buffer_width, buffer_height); default: return NULL; } diff --git a/src/gallium/auxiliary/vl/vl_idct.c b/src/gallium/auxiliary/vl/vl_idct.c index a7b8a18dec3..dc4a9bbb8c9 100644 --- a/src/gallium/auxiliary/vl/vl_idct.c +++ b/src/gallium/auxiliary/vl/vl_idct.c @@ -38,8 +38,6 @@ #include #include "vl_types.h" -#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f) - #define NR_RENDER_TARGETS 4 enum VS_OUTPUT @@ -534,10 +532,8 @@ cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer) } struct pipe_sampler_view * -vl_idct_upload_matrix(struct pipe_context *pipe) +vl_idct_upload_matrix(struct pipe_context *pipe, float scale) { - const float scale = sqrtf(SCALE_FACTOR_16_TO_9); - struct pipe_resource tex_templ, *matrix; struct pipe_sampler_view sv_templ, *sv; struct pipe_transfer *buf_transfer; diff --git a/src/gallium/auxiliary/vl/vl_idct.h b/src/gallium/auxiliary/vl/vl_idct.h index cd62cde449b..5d3784ce6c0 100644 --- a/src/gallium/auxiliary/vl/vl_idct.h +++ b/src/gallium/auxiliary/vl/vl_idct.h @@ -69,7 +69,7 @@ struct vl_idct_buffer }; /* upload the idct matrix, which can be shared by all idct instances of a pipe */ -struct pipe_sampler_view *vl_idct_upload_matrix(struct pipe_context *pipe); +struct pipe_sampler_view *vl_idct_upload_matrix(struct pipe_context *pipe, float scale); /* init an idct instance */ bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c index 24f385681c2..31163b9d08e 100644 --- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c +++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c @@ -25,17 +25,8 @@ * **************************************************************************/ -//#include - -//#include "util/u_inlines.h" - -//#include -//#include -//#include - -//#include -//#include -//#include +#include +#include #include #include @@ -44,6 +35,8 @@ #include "vl_mpeg12_decoder.h" #include "vl_defines.h" +#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f) + static const unsigned const_empty_block_mask_420[3][2][2] = { { { 0x20, 0x10 }, { 0x08, 0x04 } }, { { 0x02, 0x02 }, { 0x02, 0x02 } }, @@ -59,7 +52,10 @@ map_buffers(struct vl_mpeg12_decoder *ctx, struct vl_mpeg12_buffer *buffer) assert(ctx && buffer); - sampler_views = buffer->idct_source->get_sampler_views(buffer->idct_source); + if (ctx->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + sampler_views = buffer->idct_source->get_sampler_views(buffer->idct_source); + else + sampler_views = buffer->mc_source->get_sampler_views(buffer->mc_source); assert(sampler_views); for (i = 0; i < VL_MAX_PLANES; ++i) { @@ -156,12 +152,14 @@ vl_mpeg12_buffer_destroy(struct pipe_video_decode_buffer *buffer) struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)buf->base.decoder; assert(buf && dec); - buf->idct_source->destroy(buf->idct_source); - buf->idct_2_mc->destroy(buf->idct_2_mc); + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { + buf->idct_source->destroy(buf->idct_source); + vl_idct_cleanup_buffer(&dec->idct_y, &buf->idct[0]); + vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[1]); + vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[2]); + } + buf->mc_source->destroy(buf->mc_source); vl_vb_cleanup(&buf->vertex_stream); - vl_idct_cleanup_buffer(&dec->idct_y, &buf->idct[0]); - vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[1]); - vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[2]); vl_mpeg12_mc_cleanup_buffer(&buf->mc[0]); vl_mpeg12_mc_cleanup_buffer(&buf->mc[1]); vl_mpeg12_mc_cleanup_buffer(&buf->mc[2]); @@ -238,8 +236,10 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder) dec->pipe->delete_depth_stencil_alpha_state(dec->pipe, dec->dsa); vl_mpeg12_mc_renderer_cleanup(&dec->mc); - vl_idct_cleanup(&dec->idct_y); - vl_idct_cleanup(&dec->idct_c); + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { + vl_idct_cleanup(&dec->idct_y); + vl_idct_cleanup(&dec->idct_c); + } dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[0]); dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[1]); dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[2]); @@ -257,7 +257,7 @@ vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder) PIPE_FORMAT_R16G16B16A16_SNORM }; - const enum pipe_format idct_2_mc_formats[3] = { + const enum pipe_format mc_source_formats[3] = { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16_SNORM @@ -291,43 +291,47 @@ vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder) if (!buffer->vertex_bufs.individual.stream.buffer) goto error_vertex_stream; - buffer->idct_source = vl_video_buffer_init(dec->base.context, dec->pipe, - dec->base.width / 4, dec->base.height, 1, - dec->base.chroma_format, 3, - idct_source_formats, - PIPE_USAGE_STREAM); - if (!buffer->idct_source) - goto error_idct_source; - - buffer->idct_2_mc = vl_video_buffer_init(dec->base.context, dec->pipe, + buffer->mc_source = vl_video_buffer_init(dec->base.context, dec->pipe, dec->base.width, dec->base.height, 1, dec->base.chroma_format, 3, - idct_2_mc_formats, + mc_source_formats, PIPE_USAGE_STATIC); - if (!buffer->idct_2_mc) - goto error_idct_2_mc; - idct_views = buffer->idct_source->get_sampler_views(buffer->idct_source); - if (!idct_views) - goto error_idct_views; + if (!buffer->mc_source) + goto error_mc_source; - idct_surfaces = buffer->idct_2_mc->get_surfaces(buffer->idct_2_mc); - if (!idct_surfaces) - goto error_idct_surfaces; + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { + buffer->idct_source = vl_video_buffer_init(dec->base.context, dec->pipe, + dec->base.width / 4, dec->base.height, 1, + dec->base.chroma_format, 3, + idct_source_formats, + PIPE_USAGE_STREAM); + if (!buffer->idct_source) + goto error_idct_source; - if (!vl_idct_init_buffer(&dec->idct_y, &buffer->idct[0], - idct_views[0], idct_surfaces[0])) - goto error_idct_y; - if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[1], - idct_views[1], idct_surfaces[1])) - goto error_idct_cb; + idct_views = buffer->idct_source->get_sampler_views(buffer->idct_source); + if (!idct_views) + goto error_idct_views; - if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[2], - idct_views[2], idct_surfaces[2])) - goto error_idct_cr; + idct_surfaces = buffer->mc_source->get_surfaces(buffer->mc_source); + if (!idct_surfaces) + goto error_idct_surfaces; - mc_views = buffer->idct_2_mc->get_sampler_views(buffer->idct_2_mc); + if (!vl_idct_init_buffer(&dec->idct_y, &buffer->idct[0], + idct_views[0], idct_surfaces[0])) + goto error_idct_y; + + if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[1], + idct_views[1], idct_surfaces[1])) + goto error_idct_cb; + + if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[2], + idct_views[2], idct_surfaces[2])) + goto error_idct_cr; + } + + mc_views = buffer->mc_source->get_sampler_views(buffer->mc_source); if (!mc_views) goto error_mc_views; @@ -350,23 +354,27 @@ error_mc_cb: error_mc_y: error_mc_views: - vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[2]); + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[2]); error_idct_cr: - vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[1]); + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[1]); error_idct_cb: - vl_idct_cleanup_buffer(&dec->idct_y, &buffer->idct[0]); + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + vl_idct_cleanup_buffer(&dec->idct_y, &buffer->idct[0]); error_idct_y: error_idct_surfaces: error_idct_views: - buffer->idct_2_mc->destroy(buffer->idct_2_mc); - -error_idct_2_mc: - buffer->idct_source->destroy(buffer->idct_source); + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + buffer->idct_source->destroy(buffer->idct_source); error_idct_source: + buffer->mc_source->destroy(buffer->mc_source); + +error_mc_source: vl_vb_cleanup(&buffer->vertex_stream); error_vertex_stream: @@ -408,7 +416,9 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer, for (i = 0; i < VL_MAX_PLANES; ++i) { dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves[i]); - vl_idct_flush(i == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[i], ne_num); + + if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + vl_idct_flush(i == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[i], ne_num); sv_refs[0] = sv_past ? sv_past[i] : NULL; sv_refs[1] = sv_future ? sv_future[i] : NULL; @@ -516,11 +526,7 @@ init_idct(struct vl_mpeg12_decoder *dec, unsigned buffer_width, unsigned buffer_ unsigned chroma_width, chroma_height, chroma_blocks_x, chroma_blocks_y; struct pipe_sampler_view *idct_matrix; - /* TODO: Implement 422, 444 */ - assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420); - dec->empty_block_mask = &const_empty_block_mask_420; - - if (!(idct_matrix = vl_idct_upload_matrix(dec->pipe))) + if (!(idct_matrix = vl_idct_upload_matrix(dec->pipe, sqrt(SCALE_FACTOR_16_TO_9)))) goto error_idct_matrix; if (!vl_idct_init(&dec->idct_y, dec->pipe, buffer_width, buffer_height, @@ -565,6 +571,7 @@ struct pipe_video_decoder * vl_create_mpeg12_decoder(struct pipe_video_context *context, struct pipe_context *pipe, enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint, enum pipe_video_chroma_format chroma_format, unsigned width, unsigned height) { @@ -580,6 +587,7 @@ vl_create_mpeg12_decoder(struct pipe_video_context *context, dec->base.context = context; dec->base.profile = profile; + dec->base.entrypoint = entrypoint; dec->base.chroma_format = chroma_format; dec->base.width = width; dec->base.height = height; @@ -598,10 +606,16 @@ vl_create_mpeg12_decoder(struct pipe_video_context *context, dec->base.width = align(width, MACROBLOCK_WIDTH); dec->base.height = align(height, MACROBLOCK_HEIGHT); - if (!init_idct(dec, dec->base.width, dec->base.height)) - goto error_idct; + /* TODO: Implement 422, 444 */ + assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420); + dec->empty_block_mask = &const_empty_block_mask_420; - if (!vl_mpeg12_mc_renderer_init(&dec->mc, dec->pipe, dec->base.width, dec->base.height)) + if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) + if (!init_idct(dec, dec->base.width, dec->base.height)) + goto error_idct; + + if (!vl_mpeg12_mc_renderer_init(&dec->mc, dec->pipe, dec->base.width, dec->base.height, + entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 1.0f : SCALE_FACTOR_16_TO_9)) goto error_mc; if (!init_pipe_state(dec)) @@ -613,8 +627,10 @@ error_pipe_state: vl_mpeg12_mc_renderer_cleanup(&dec->mc); error_mc: - vl_idct_cleanup(&dec->idct_y); - vl_idct_cleanup(&dec->idct_c); + if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { + vl_idct_cleanup(&dec->idct_y); + vl_idct_cleanup(&dec->idct_c); + } error_idct: FREE(dec); diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h index 69d649b179a..e90f8d3880b 100644 --- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h +++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h @@ -64,7 +64,7 @@ struct vl_mpeg12_buffer struct vl_vertex_buffer vertex_stream; struct pipe_video_buffer *idct_source; - struct pipe_video_buffer *idct_2_mc; + struct pipe_video_buffer *mc_source; union { @@ -87,6 +87,7 @@ struct pipe_video_decoder * vl_create_mpeg12_decoder(struct pipe_video_context *context, struct pipe_context *pipe, enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint, enum pipe_video_chroma_format chroma_format, unsigned width, unsigned height); diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c index 912dea3c57f..455aa52919e 100644 --- a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c +++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c @@ -216,7 +216,7 @@ calc_field(struct ureg_program *shader) } static struct ureg_dst -fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field) +fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field, float scale) { struct ureg_src tc[2], sampler; struct ureg_dst texel, t_tc; @@ -250,6 +250,9 @@ fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); ureg_ENDIF(shader); + if (scale != 1.0f) + ureg_MUL(shader, texel, ureg_src(texel), ureg_imm1f(shader, scale)); + ureg_release_temporary(shader, t_tc); return texel; @@ -308,7 +311,7 @@ fetch_ref(struct ureg_program *shader, struct ureg_dst field) } static void * -create_frag_shader(struct vl_mpeg12_mc_renderer *r) +create_frag_shader(struct vl_mpeg12_mc_renderer *r, float scale) { struct ureg_program *shader; struct ureg_dst result; @@ -322,7 +325,7 @@ create_frag_shader(struct vl_mpeg12_mc_renderer *r) fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); field = calc_field(shader); - texel = fetch_ycbcr(r, shader, field); + texel = fetch_ycbcr(r, shader, field, scale); result = fetch_ref(shader, field); @@ -424,7 +427,8 @@ bool vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer, struct pipe_context *pipe, unsigned buffer_width, - unsigned buffer_height) + unsigned buffer_height, + float scale) { struct pipe_resource tex_templ, *tex_dummy; struct pipe_sampler_view sampler_view; @@ -445,7 +449,7 @@ vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer, if (!renderer->vs) goto error_vs_shaders; - renderer->fs = create_frag_shader(renderer); + renderer->fs = create_frag_shader(renderer, scale); if (!renderer->fs) goto error_fs_shaders; diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h index 052d7d6a30f..c3efda524a8 100644 --- a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h +++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h @@ -71,7 +71,8 @@ struct vl_mpeg12_mc_buffer bool vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer, struct pipe_context *pipe, unsigned picture_width, - unsigned picture_height); + unsigned picture_height, + float scale); void vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer); diff --git a/src/gallium/include/pipe/p_defines.h b/src/gallium/include/pipe/p_defines.h index e9d47983e1b..e3cc28ba476 100644 --- a/src/gallium/include/pipe/p_defines.h +++ b/src/gallium/include/pipe/p_defines.h @@ -514,6 +514,13 @@ enum pipe_video_profile PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH }; +enum pipe_video_entrypoint +{ + PIPE_VIDEO_ENTRYPOINT_UNKNOWN, + PIPE_VIDEO_ENTRYPOINT_BITSTREAM, + PIPE_VIDEO_ENTRYPOINT_IDCT, + PIPE_VIDEO_ENTRYPOINT_MC +}; /** * Composite query types diff --git a/src/gallium/include/pipe/p_video_context.h b/src/gallium/include/pipe/p_video_context.h index 33c64baf7c4..21d0581226d 100644 --- a/src/gallium/include/pipe/p_video_context.h +++ b/src/gallium/include/pipe/p_video_context.h @@ -112,6 +112,7 @@ struct pipe_video_context */ struct pipe_video_decoder *(*create_decoder)(struct pipe_video_context *context, enum pipe_video_profile profile, + enum pipe_video_entrypoint entrypoint, enum pipe_video_chroma_format chroma_format, unsigned width, unsigned height); @@ -137,6 +138,7 @@ struct pipe_video_decoder struct pipe_video_context *context; enum pipe_video_profile profile; + enum pipe_video_entrypoint entrypoint; enum pipe_video_chroma_format chroma_format; unsigned width; unsigned height; diff --git a/src/gallium/state_trackers/xorg/xvmc/context.c b/src/gallium/state_trackers/xorg/xvmc/context.c index 2690f8046a8..6f136f2b121 100644 --- a/src/gallium/state_trackers/xorg/xvmc/context.c +++ b/src/gallium/state_trackers/xorg/xvmc/context.c @@ -209,7 +209,7 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id, XVMC_MSG(XVMC_ERR, "[XvMC] Cannot decode requested surface type. Unsupported chroma format.\n"); return BadImplementation; } - if (mc_type != (XVMC_IDCT | XVMC_MOCOMP | XVMC_MPEG_2)) { + if ((mc_type & ~XVMC_IDCT) != (XVMC_MOCOMP | XVMC_MPEG_2)) { XVMC_MSG(XVMC_ERR, "[XvMC] Cannot decode requested surface type. Non-MPEG2/Mocomp/iDCT acceleration unsupported.\n"); return BadImplementation; } @@ -241,6 +241,9 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id, context_priv->decoder = vctx->vpipe->create_decoder(vctx->vpipe, ProfileToPipe(mc_type), + (mc_type & XVMC_IDCT) ? + PIPE_VIDEO_ENTRYPOINT_IDCT : + PIPE_VIDEO_ENTRYPOINT_MC, FormatToPipe(chroma_format), width, height);