[g3dvl] make motion vector buffers a public interface

This commit is contained in:
Christian König 2011-04-20 13:44:26 +02:00
parent 3511780a43
commit b7acf83d52
8 changed files with 185 additions and 126 deletions

View File

@ -136,7 +136,7 @@ create_ref_vert_shader(struct vl_mc *r)
0.5f / r->buffer_width,
0.5f / r->buffer_height,
1.0f / 4.0f,
1.0f / 255.0f);
1.0f / PIPE_VIDEO_MV_WEIGHT_MAX);
for (i = 0; i < 2; ++i) {
ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), mv_scale, vmv[i], ureg_src(t_vpos));

View File

@ -231,6 +231,26 @@ vl_mpeg12_buffer_map(struct pipe_video_decode_buffer *buffer)
map_buffers(dec, buf);
}
static unsigned
vl_mpeg12_buffer_get_mv_stream_stride(struct pipe_video_decode_buffer *buffer)
{
struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
assert(buf);
return vl_vb_get_mv_stream_stride(&buf->vertex_stream);
}
static struct pipe_motionvector *
vl_mpeg12_buffer_get_mv_stream(struct pipe_video_decode_buffer *buffer, int ref_frame)
{
struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
assert(buf);
return vl_vb_get_mv_stream(&buf->vertex_stream, ref_frame);
}
static void
vl_mpeg12_buffer_add_macroblocks(struct pipe_video_decode_buffer *buffer,
unsigned num_macroblocks,
@ -251,7 +271,6 @@ vl_mpeg12_buffer_add_macroblocks(struct pipe_video_decode_buffer *buffer,
assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
for ( i = 0; i < num_macroblocks; ++i ) {
vl_vb_add_block(&buf->vertex_stream, &mb[i]);
upload_buffer(dec, buf, &mb[i]);
}
}
@ -389,6 +408,8 @@ vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
buffer->base.decoder = decoder;
buffer->base.destroy = vl_mpeg12_buffer_destroy;
buffer->base.map = vl_mpeg12_buffer_map;
buffer->base.get_mv_stream_stride = vl_mpeg12_buffer_get_mv_stream_stride;
buffer->base.get_mv_stream = vl_mpeg12_buffer_get_mv_stream;
buffer->base.add_macroblocks = vl_mpeg12_buffer_add_macroblocks;
buffer->base.unmap = vl_mpeg12_buffer_unmap;

View File

@ -38,11 +38,6 @@ struct vl_ycbcr_vertex_stream
uint8_t field;
};
struct vl_mv_vertex_stream
{
struct vertex4s mv[2];
};
/* vertices for a quad covering a block */
static const struct vertex2f block_quad[4] = {
{0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
@ -242,7 +237,7 @@ vl_vb_init(struct vl_vertex_buffer *buffer, struct pipe_context *pipe,
pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
PIPE_USAGE_STREAM,
sizeof(struct vl_mv_vertex_stream) * size
sizeof(struct pipe_motionvector) * size
);
}
@ -270,7 +265,7 @@ vl_vb_get_mv(struct vl_vertex_buffer *buffer, int motionvector)
assert(buffer);
buf.stride = sizeof(struct vl_mv_vertex_stream);
buf.stride = sizeof(struct pipe_motionvector);
buf.buffer_offset = 0;
buf.buffer = buffer->mv[motionvector].resource;
@ -324,39 +319,21 @@ void vl_vb_add_ycbcr(struct vl_vertex_buffer *buffer,
buffer->ycbcr[component].num_instances++;
}
static void
get_motion_vectors(enum pipe_mpeg12_motion_type mo_type, struct pipe_motionvector *src, struct vertex4s dst[2])
unsigned
vl_vb_get_mv_stream_stride(struct vl_vertex_buffer *buffer)
{
if (mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
dst[0].x = dst[1].x = src->top.x;
dst[0].y = dst[1].y = src->top.y;
dst[0].z = dst[1].z = 0;
assert(buffer);
} else {
dst[0].x = src->top.x;
dst[0].y = src->top.y;
dst[0].z = src->top.field_select ? 3 : 1;
dst[1].x = src->bottom.x;
dst[1].y = src->bottom.y;
dst[1].z = src->bottom.field_select ? 3 : 1;
}
dst[0].w = src->top.wheight;
dst[1].w = src->bottom.wheight;
return buffer->width;
}
void
vl_vb_add_block(struct vl_vertex_buffer *buffer, struct pipe_mpeg12_macroblock *mb)
struct pipe_motionvector *
vl_vb_get_mv_stream(struct vl_vertex_buffer *buffer, int ref_frame)
{
unsigned mv_pos;
assert(buffer);
assert(mb);
assert(ref_frame < VL_MAX_REF_FRAMES);
mv_pos = mb->mbx + mb->mby * buffer->width;
get_motion_vectors(mb->mo_type, &mb->mv[0], buffer->mv[0].vertex_stream[mv_pos].mv);
get_motion_vectors(mb->mo_type, &mb->mv[1], buffer->mv[1].vertex_stream[mv_pos].mv);
return buffer->mv[ref_frame].vertex_stream;
}
void

View File

@ -62,9 +62,9 @@ struct vl_vertex_buffer
} ycbcr[VL_MAX_PLANES];
struct {
struct pipe_resource *resource;
struct pipe_transfer *transfer;
struct vl_mv_vertex_stream *vertex_stream;
struct pipe_resource *resource;
struct pipe_transfer *transfer;
struct pipe_motionvector *vertex_stream;
} mv[VL_MAX_REF_FRAMES];
};
@ -80,17 +80,19 @@ void vl_vb_init(struct vl_vertex_buffer *buffer,
struct pipe_context *pipe,
unsigned width, unsigned height);
struct pipe_vertex_buffer vl_vb_get_ycbcr(struct vl_vertex_buffer *buffer, int component);
struct pipe_vertex_buffer vl_vb_get_mv(struct vl_vertex_buffer *buffer, int motionvector);
void vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe);
struct pipe_vertex_buffer vl_vb_get_ycbcr(struct vl_vertex_buffer *buffer, int component);
void vl_vb_add_ycbcr(struct vl_vertex_buffer *buffer,
unsigned component, unsigned x, unsigned y,
bool intra, enum pipe_mpeg12_dct_type type);
void vl_vb_add_block(struct vl_vertex_buffer *buffer, struct pipe_mpeg12_macroblock *mb);
struct pipe_vertex_buffer vl_vb_get_mv(struct vl_vertex_buffer *buffer, int ref_frame);
unsigned vl_vb_get_mv_stream_stride(struct vl_vertex_buffer *buffer);
struct pipe_motionvector *vl_vb_get_mv_stream(struct vl_vertex_buffer *buffer, int ref_frame);
void vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe);

View File

@ -186,6 +186,16 @@ struct pipe_video_decode_buffer
*/
void (*map)(struct pipe_video_decode_buffer *decbuf);
/**
* get the stride of the mv buffer
*/
unsigned (*get_mv_stream_stride)(struct pipe_video_decode_buffer *decbuf);
/**
* get the pointer where to put the motion vectors of a ref frame
*/
struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decode_buffer *decbuf, int ref_frame);
#if 0
/**
* decode a bitstream

View File

@ -50,31 +50,43 @@ enum pipe_mpeg12_picture_type
PIPE_MPEG12_PICTURE_TYPE_FRAME
};
enum pipe_mpeg12_motion_type
{
PIPE_MPEG12_MOTION_TYPE_FIELD,
PIPE_MPEG12_MOTION_TYPE_FRAME,
PIPE_MPEG12_MOTION_TYPE_DUALPRIME,
PIPE_MPEG12_MOTION_TYPE_16x8
};
enum pipe_mpeg12_dct_type
{
PIPE_MPEG12_DCT_TYPE_FIELD,
PIPE_MPEG12_DCT_TYPE_FRAME
};
enum pipe_video_field_select
{
PIPE_VIDEO_FRAME = 0,
PIPE_VIDEO_TOP_FIELD = 1,
PIPE_VIDEO_BOTTOM_FIELD = 3,
/* TODO
PIPE_VIDEO_DUALPRIME
PIPE_VIDEO_16x8
*/
};
enum pipe_video_mv_weight
{
PIPE_VIDEO_MV_WEIGHT_MIN = 0,
PIPE_VIDEO_MV_WEIGHT_HALF = 128,
PIPE_VIDEO_MV_WEIGHT_MAX = 256
};
struct pipe_macroblock
{
enum pipe_video_codec codec;
};
/* bitfields because this is used as a vertex buffer element */
struct pipe_motionvector
{
struct {
signed x, y;
bool field_select;
unsigned wheight:8;
signed x:16, y:16;
enum pipe_video_field_select field_select:16;
enum pipe_video_mv_weight weight:16;
} top, bottom;
};
@ -84,10 +96,8 @@ struct pipe_mpeg12_macroblock
unsigned mbx;
unsigned mby;
enum pipe_mpeg12_motion_type mo_type;
bool dct_intra;
enum pipe_mpeg12_dct_type dct_type;
struct pipe_motionvector mv[2];
unsigned cbp;
short *blocks;
};

View File

@ -60,30 +60,84 @@ static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
return -1;
}
static enum pipe_mpeg12_motion_type MotionToPipe(int xvmc_motion_type, unsigned xvmc_picture_structure)
static inline void
MacroBlockTypeToPipeWeights(const XvMCMacroBlock *xvmc_mb, unsigned weights[2])
{
switch (xvmc_motion_type) {
assert(xvmc_mb);
switch (xvmc_mb->macroblock_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) {
case XVMC_MB_TYPE_MOTION_FORWARD:
weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
break;
case (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD):
weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
break;
case XVMC_MB_TYPE_MOTION_BACKWARD:
weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
break;
default:
/* workaround for xines xxmc video out plugin */
if (!(xvmc_mb->macroblock_type & ~XVMC_MB_TYPE_PATTERN)) {
weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
} else {
weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
}
break;
}
}
static inline struct pipe_motionvector
MotionVectorToPipe(const XvMCMacroBlock *xvmc_mb, unsigned vector,
unsigned field_select_mask, unsigned weight)
{
struct pipe_motionvector mv;
assert(xvmc_mb);
switch (xvmc_mb->motion_type) {
case XVMC_PREDICTION_FRAME:
if (xvmc_picture_structure == XVMC_FRAME_PICTURE)
return PIPE_MPEG12_MOTION_TYPE_FRAME;
else
return PIPE_MPEG12_MOTION_TYPE_16x8;
mv.top.x = xvmc_mb->PMV[0][vector][0];
mv.top.y = xvmc_mb->PMV[0][vector][1];
mv.top.field_select = PIPE_VIDEO_FRAME;
mv.top.weight = weight;
mv.bottom.x = xvmc_mb->PMV[0][vector][0];
mv.bottom.y = xvmc_mb->PMV[0][vector][1];
mv.bottom.weight = weight;
mv.bottom.field_select = PIPE_VIDEO_FRAME;
break;
case XVMC_PREDICTION_FIELD:
return PIPE_MPEG12_MOTION_TYPE_FIELD;
mv.top.x = xvmc_mb->PMV[0][vector][0];
mv.top.y = xvmc_mb->PMV[0][vector][1];
mv.top.field_select = (xvmc_mb->motion_vertical_field_select & field_select_mask) ?
PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
mv.top.weight = weight;
case XVMC_PREDICTION_DUAL_PRIME:
return PIPE_MPEG12_MOTION_TYPE_DUALPRIME;
mv.bottom.x = xvmc_mb->PMV[1][vector][0];
mv.bottom.y = xvmc_mb->PMV[1][vector][1];
mv.bottom.field_select = (xvmc_mb->motion_vertical_field_select & (field_select_mask << 2)) ?
PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
mv.bottom.weight = weight;
break;
default: // TODO: Support DUALPRIME and 16x8
break;
}
XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized motion type 0x%08X (with picture structure 0x%08X).\n", xvmc_motion_type, xvmc_picture_structure);
return -1;
return mv;
}
static void
MacroBlocksToPipe(struct pipe_screen *screen,
MacroBlocksToPipe(XvMCSurfacePrivate *surface,
unsigned int xvmc_picture_structure,
const XvMCMacroBlock *xvmc_mb,
const XvMCBlockArray *xvmc_blocks,
@ -98,63 +152,33 @@ MacroBlocksToPipe(struct pipe_screen *screen,
assert(num_macroblocks);
for (i = 0; i < num_macroblocks; ++i) {
unsigned mv_pos = xvmc_mb->x + surface->mv_stride * xvmc_mb->y;
unsigned mv_weights[2];
mb->base.codec = PIPE_VIDEO_CODEC_MPEG12;
mb->mbx = xvmc_mb->x;
mb->mby = xvmc_mb->y;
if (!(xvmc_mb->macroblock_type & XVMC_MB_TYPE_INTRA))
mb->mo_type = MotionToPipe(xvmc_mb->motion_type, xvmc_picture_structure);
/* Get rid of Valgrind 'undefined' warnings */
else
mb->mo_type = -1;
mb->dct_intra = xvmc_mb->macroblock_type & XVMC_MB_TYPE_INTRA;
mb->dct_type = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
switch (xvmc_mb->macroblock_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) {
case XVMC_MB_TYPE_MOTION_FORWARD:
mb->mv[0].top.wheight = mb->mv[0].bottom.wheight = 255;
mb->mv[1].top.wheight = mb->mv[1].bottom.wheight = 0;
break;
case (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD):
mb->mv[0].top.wheight = mb->mv[0].bottom.wheight = 127;
mb->mv[1].top.wheight = mb->mv[1].bottom.wheight = 127;
break;
case XVMC_MB_TYPE_MOTION_BACKWARD:
mb->mv[0].top.wheight = mb->mv[0].bottom.wheight = 0;
mb->mv[1].top.wheight = mb->mv[1].bottom.wheight = 255;
break;
default:
/* workaround for xines xxmc video out plugin */
if (!(xvmc_mb->macroblock_type & ~XVMC_MB_TYPE_PATTERN)) {
mb->mv[0].top.wheight = mb->mv[0].bottom.wheight = 255;
mb->mv[1].top.wheight = mb->mv[1].bottom.wheight = 0;
} else {
mb->mv[0].top.wheight = mb->mv[0].bottom.wheight = 0;
mb->mv[1].top.wheight = mb->mv[1].bottom.wheight = 0;
}
break;
}
for (j = 0; j < 2; ++j) {
mb->mv[j].top.x = xvmc_mb->PMV[0][j][0];
mb->mv[j].top.y = xvmc_mb->PMV[0][j][1];
mb->mv[j].bottom.x = xvmc_mb->PMV[1][j][0];
mb->mv[j].bottom.y = xvmc_mb->PMV[1][j][1];
}
mb->mv[0].top.field_select = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_FORWARD;
mb->mv[1].top.field_select = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_BACKWARD;
mb->mv[0].bottom.field_select = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_FORWARD;
mb->mv[1].bottom.field_select = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_BACKWARD;
mb->cbp = xvmc_mb->coded_block_pattern;
mb->blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
MacroBlockTypeToPipeWeights(xvmc_mb, mv_weights);
for (j = 0; j < 2; ++j) {
if (!surface->ref[j].mv) continue;
surface->ref[j].mv[mv_pos] = MotionVectorToPipe
(
xvmc_mb, j,
j ? XVMC_SELECT_FIRST_BACKWARD : XVMC_SELECT_FIRST_FORWARD,
mv_weights[j]
);
}
++mb;
++xvmc_mb;
}
@ -172,13 +196,13 @@ unmap_and_flush_surface(XvMCSurfacePrivate *surface)
context_priv = surface->context->privData;
for ( i = 0; i < 2; ++i ) {
if (surface->ref_surfaces[i]) {
XvMCSurfacePrivate *ref = surface->ref_surfaces[i]->privData;
if (surface->ref[i].surface) {
XvMCSurfacePrivate *ref = surface->ref[i].surface->privData;
assert(ref);
unmap_and_flush_surface(ref);
surface->ref_surfaces[i] = NULL;
surface->ref[i].surface = NULL;
ref_frames[i] = ref->video_buffer;
} else {
ref_frames[i] = NULL;
@ -225,6 +249,7 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
return BadAlloc;
surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder);
surface_priv->mv_stride = surface_priv->decode_buffer->get_mv_stream_stride(surface_priv->decode_buffer);
surface_priv->video_buffer = vpipe->create_buffer(vpipe, PIPE_FORMAT_YV12, //TODO
resource_formats,
context_priv->decoder->chroma_format,
@ -262,6 +287,8 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
XvMCSurfacePrivate *future_surface_priv;
XvMCMacroBlock *xvmc_mb;
unsigned i;
struct pipe_mpeg12_macroblock pipe_macroblocks[num_macroblocks];
XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p, with past %p and future %p\n",
@ -319,23 +346,30 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
/* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
if (target_surface_priv->mapped && (
target_surface_priv->ref_surfaces[0] != past_surface ||
target_surface_priv->ref_surfaces[1] != future_surface ||
target_surface_priv->ref[0].surface != past_surface ||
target_surface_priv->ref[1].surface != future_surface ||
(xvmc_mb->x == 0 && xvmc_mb->y == 0))) {
// If they change anyway we need to clear our surface
unmap_and_flush_surface(target_surface_priv);
}
MacroBlocksToPipe(vpipe->screen, picture_structure, xvmc_mb, blocks, num_macroblocks, pipe_macroblocks);
if (!target_surface_priv->mapped) {
t_buffer->map(t_buffer);
target_surface_priv->ref_surfaces[0] = past_surface;
target_surface_priv->ref_surfaces[1] = future_surface;
for (i = 0; i < 2; ++i) {
target_surface_priv->ref[i].surface = i == 0 ? past_surface : future_surface;
if (target_surface_priv->ref[i].surface)
target_surface_priv->ref[i].mv = t_buffer->get_mv_stream(t_buffer, i);
else
target_surface_priv->ref[i].mv = NULL;
}
target_surface_priv->mapped = 1;
}
MacroBlocksToPipe(target_surface_priv, picture_structure, xvmc_mb, blocks, num_macroblocks, pipe_macroblocks);
t_buffer->add_macroblocks(t_buffer, num_macroblocks, &pipe_macroblocks->base);
XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);

View File

@ -71,7 +71,12 @@ typedef struct
bool mapped; // are we still mapped to memory?
XvMCSurface *ref_surfaces[2];
unsigned mv_stride;
struct {
XvMCSurface *surface;
struct pipe_motionvector *mv;
} ref[2];
struct pipe_fence_handle *flush_fence;
struct pipe_fence_handle *render_fence;