va: Add support for VPP rotation, flip, alpha blend, crop, scaling

Reviewed-by: Ruijing Dong <ruijing.dong@amd.com>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17557>
This commit is contained in:
Sil Vilerino 2022-07-15 08:01:41 -04:00 committed by Marge Bot
parent d09cf4333c
commit 345fd92092
5 changed files with 174 additions and 2 deletions

View File

@ -249,7 +249,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
if (!context)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
if (is_vpp) {
if (is_vpp && !drv->vscreen->pscreen->get_video_param(drv->vscreen->pscreen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_SUPPORTED)) {
context->decoder = NULL;
} else {
if (config->entrypoint != PIPE_VIDEO_ENTRYPOINT_PROCESSING) {

View File

@ -86,6 +86,14 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
context->target->buffer_format != PIPE_FORMAT_P016)
return VA_STATUS_ERROR_UNIMPLEMENTED;
if (drv->pipe->screen->get_video_param(drv->pipe->screen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_SUPPORTED)) {
context->needs_begin_frame = true;
context->vpp_needs_flush_on_endpic = true;
}
return VA_STATUS_SUCCESS;
}
@ -822,6 +830,12 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
} else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
context->desc.h265enc.frame_num++;
else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
context->vpp_needs_flush_on_endpic) {
context->decoder->flush(context->decoder);
context->vpp_needs_flush_on_endpic = false;
}
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}

View File

@ -111,6 +111,74 @@ static void vlVaGetBox(struct pipe_video_buffer *buf, unsigned idx,
box->height = height;
}
static VAStatus vlVaVidEngineBlit(vlVaDriver *drv, vlVaContext *context,
const VARectangle *src_region,
const VARectangle *dst_region,
struct pipe_video_buffer *src,
struct pipe_video_buffer *dst,
enum vl_compositor_deinterlace deinterlace,
VAProcPipelineParameterBuffer* param)
{
if (deinterlace != VL_COMPOSITOR_NONE)
return VA_STATUS_ERROR_UNIMPLEMENTED;
if (src->buffer_format != PIPE_FORMAT_NV12 ||
dst->buffer_format != PIPE_FORMAT_NV12)
return VA_STATUS_ERROR_UNIMPLEMENTED;
struct u_rect src_rect;
struct u_rect dst_rect;
src_rect.x0 = src_region->x;
src_rect.y0 = src_region->y;
src_rect.x1 = src_region->x + src_region->width;
src_rect.y1 = src_region->y + src_region->height;
dst_rect.x0 = dst_region->x;
dst_rect.y0 = dst_region->y;
dst_rect.x1 = dst_region->x + dst_region->width;
dst_rect.y1 = dst_region->y + dst_region->height;
context->desc.vidproc.base.input_format = src->buffer_format;
context->desc.vidproc.base.output_format = dst->buffer_format;
context->desc.vidproc.src_region = src_rect;
context->desc.vidproc.dst_region = dst_rect;
if (param->rotation_state == VA_ROTATION_NONE)
context->desc.vidproc.orientation = PIPE_VIDEO_VPP_ORIENTATION_DEFAULT;
else if (param->rotation_state == VA_ROTATION_90)
context->desc.vidproc.orientation = PIPE_VIDEO_VPP_ROTATION_90;
else if (param->rotation_state == VA_ROTATION_180)
context->desc.vidproc.orientation = PIPE_VIDEO_VPP_ROTATION_180;
else if (param->rotation_state == VA_ROTATION_270)
context->desc.vidproc.orientation = PIPE_VIDEO_VPP_ROTATION_270;
if (param->mirror_state == VA_MIRROR_HORIZONTAL)
context->desc.vidproc.orientation |= PIPE_VIDEO_VPP_FLIP_HORIZONTAL;
if (param->mirror_state == VA_MIRROR_VERTICAL)
context->desc.vidproc.orientation |= PIPE_VIDEO_VPP_FLIP_VERTICAL;
memset(&context->desc.vidproc.blend, 0, sizeof(context->desc.vidproc.blend));
context->desc.vidproc.blend.mode = PIPE_VIDEO_VPP_BLEND_MODE_NONE;
if (param->blend_state != NULL) {
if (param->blend_state->flags & VA_BLEND_GLOBAL_ALPHA) {
context->desc.vidproc.blend.mode = PIPE_VIDEO_VPP_BLEND_MODE_GLOBAL_ALPHA;
context->desc.vidproc.blend.global_alpha = param->blend_state->global_alpha;
}
}
if (context->needs_begin_frame) {
context->decoder->begin_frame(context->decoder, dst,
&context->desc.base);
context->needs_begin_frame = false;
}
context->decoder->process_frame(context->decoder, src, &context->desc.vidproc);
context->vpp_needs_flush_on_endpic = true;
return VA_STATUS_SUCCESS;
}
static VAStatus vlVaPostProcBlit(vlVaDriver *drv, vlVaContext *context,
const VARectangle *src_region,
const VARectangle *dst_region,
@ -402,6 +470,26 @@ vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *contex
src_region = vlVaRegionDefault(param->surface_region, src_surface, &def_src_region);
dst_region = vlVaRegionDefault(param->output_region, dst_surface, &def_dst_region);
/* If the driver supports video engine post proc, attempt to do that
* if it fails, fallback to the other existing implementations below
*/
if (pscreen->get_video_param(pscreen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_SUPPORTED)) {
if (!context->decoder) {
context->decoder = drv->pipe->create_video_codec(drv->pipe, &context->templat);
if (!context->decoder)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
/* Perform VPBlit, if fail, fallback to other implementations below */
if (VA_STATUS_SUCCESS == vlVaVidEngineBlit(drv, context, src_region, dst_region,
src, context->target, deinterlace, param))
return VA_STATUS_SUCCESS;
}
/* Try other post proc implementations */
if (context->target->buffer_format != PIPE_FORMAT_NV12 &&
context->target->buffer_format != PIPE_FORMAT_P010 &&
context->target->buffer_format != PIPE_FORMAT_P016)

View File

@ -523,7 +523,12 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config_id,
}
#endif
if (config->entrypoint != PIPE_VIDEO_ENTRYPOINT_PROCESSING) {
/* If VPP supported entry, use the max dimensions cap values, if not fallback to this below */
if (config->entrypoint != PIPE_VIDEO_ENTRYPOINT_PROCESSING ||
pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_SUPPORTED))
{
attribs[i].type = VASurfaceAttribMaxWidth;
attribs[i].value.type = VAGenericValueTypeInteger;
attribs[i].flags = VA_SURFACE_ATTRIB_GETTABLE;
@ -1146,6 +1151,66 @@ vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context,
pipeline_cap->num_output_color_standards = ARRAY_SIZE(vpp_output_color_standards);
pipeline_cap->output_color_standards = vpp_output_color_standards;
struct pipe_screen *pscreen = VL_VA_PSCREEN(ctx);
uint32_t pipe_orientation_flags = pscreen->get_video_param(pscreen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_ORIENTATION_MODES);
pipeline_cap->rotation_flags = VA_ROTATION_NONE;
if(pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_90)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_90);
if(pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_180)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_180);
if(pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_270)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_270);
pipeline_cap->mirror_flags = VA_MIRROR_NONE;
if(pipe_orientation_flags & PIPE_VIDEO_VPP_FLIP_HORIZONTAL)
pipeline_cap->mirror_flags |= VA_MIRROR_HORIZONTAL;
if(pipe_orientation_flags & PIPE_VIDEO_VPP_FLIP_VERTICAL)
pipeline_cap->mirror_flags |= VA_MIRROR_VERTICAL;
pipeline_cap->max_input_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_INPUT_WIDTH);
pipeline_cap->max_input_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_INPUT_HEIGHT);
pipeline_cap->min_input_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_INPUT_WIDTH);
pipeline_cap->min_input_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_INPUT_HEIGHT);
pipeline_cap->max_output_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_OUTPUT_WIDTH);
pipeline_cap->max_output_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_OUTPUT_HEIGHT);
pipeline_cap->min_output_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_OUTPUT_WIDTH);
pipeline_cap->min_output_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_OUTPUT_HEIGHT);
uint32_t pipe_blend_modes = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_BLEND_MODES);
pipeline_cap->blend_flags = 0;
if (pipe_blend_modes & PIPE_VIDEO_VPP_BLEND_MODE_GLOBAL_ALPHA)
pipeline_cap->blend_flags |= VA_BLEND_GLOBAL_ALPHA;
for (i = 0; i < num_filters; i++) {
vlVaBuffer *buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, filters[i]);
VAProcFilterParameterBufferBase *filter;

View File

@ -293,6 +293,7 @@ typedef struct {
struct pipe_av1_picture_desc av1;
struct pipe_h264_enc_picture_desc h264enc;
struct pipe_h265_enc_picture_desc h265enc;
struct pipe_vpp_desc vidproc;
} desc;
struct {
@ -317,6 +318,7 @@ typedef struct {
bool first_single_submitted;
int gop_coeff;
bool needs_begin_frame;
bool vpp_needs_flush_on_endpic;
void *blit_cs;
int packed_header_type;
} vlVaContext;