frontends/va: Move remainig processing functions to postproc.c

Reviewed-by: Leo Liu <leo.liu@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/37545>
This commit is contained in:
David Rosca
2025-10-16 16:51:05 +02:00
committed by Marge Bot
parent 7a5270d4df
commit 09ff0fa005
2 changed files with 224 additions and 224 deletions

View File

@@ -35,9 +35,231 @@
#include "va_private.h"
static VAProcColorStandardType vpp_color_standards[] = {
VAProcColorStandardBT601,
VAProcColorStandardBT709,
VAProcColorStandardBT2020,
VAProcColorStandardExplicit
};
VAStatus
vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context,
VABufferID *filters, unsigned int num_filters,
VAProcPipelineCaps *pipeline_cap)
{
unsigned int i = 0;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!pipeline_cap)
return VA_STATUS_ERROR_INVALID_PARAMETER;
if (num_filters && !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
pipeline_cap->pipeline_flags = 0;
pipeline_cap->filter_flags = 0;
pipeline_cap->num_forward_references = 0;
pipeline_cap->num_backward_references = 0;
pipeline_cap->rotation_flags = VA_ROTATION_NONE;
pipeline_cap->mirror_flags = VA_MIRROR_NONE;
struct pipe_screen *pscreen = VL_VA_PSCREEN(ctx);
bool media_only = !pscreen->caps.graphics && !pscreen->caps.compute;
uint32_t pipe_orientation_flags = pscreen->get_video_param(pscreen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_ORIENTATION_MODES);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_90)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_90);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_180)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_180);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_270)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_270);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_FLIP_HORIZONTAL)
pipeline_cap->mirror_flags |= VA_MIRROR_HORIZONTAL;
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_FLIP_VERTICAL)
pipeline_cap->mirror_flags |= VA_MIRROR_VERTICAL;
pipeline_cap->input_color_standards = vpp_color_standards;
pipeline_cap->num_input_color_standards = ARRAY_SIZE(vpp_color_standards);
pipeline_cap->output_color_standards = vpp_color_standards;
pipeline_cap->num_output_color_standards = ARRAY_SIZE(vpp_color_standards);
pipeline_cap->max_input_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_INPUT_WIDTH);
pipeline_cap->max_input_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_INPUT_HEIGHT);
pipeline_cap->min_input_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_INPUT_WIDTH);
pipeline_cap->min_input_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_INPUT_HEIGHT);
pipeline_cap->max_output_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_OUTPUT_WIDTH);
pipeline_cap->max_output_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_OUTPUT_HEIGHT);
pipeline_cap->min_output_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_OUTPUT_WIDTH);
pipeline_cap->min_output_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_OUTPUT_HEIGHT);
uint32_t pipe_blend_modes = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_BLEND_MODES);
pipeline_cap->blend_flags = 0;
if (pipe_blend_modes & PIPE_VIDEO_VPP_BLEND_MODE_GLOBAL_ALPHA)
pipeline_cap->blend_flags |= VA_BLEND_GLOBAL_ALPHA;
vlVaDriver *drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
for (i = 0; i < num_filters; i++) {
vlVaBuffer *buf = handle_table_get(drv->htab, filters[i]);
VAProcFilterParameterBufferBase *filter;
if (!buf || buf->type != VAProcFilterParameterBufferType) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
filter = buf->data;
switch (filter->type) {
case VAProcFilterDeinterlacing: {
VAProcFilterParameterBufferDeinterlacing *deint = buf->data;
if (deint->algorithm == VAProcDeinterlacingMotionAdaptive) {
pipeline_cap->num_forward_references = 2;
pipeline_cap->num_backward_references = 1;
}
break;
}
default:
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_UNIMPLEMENTED;
}
}
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context_id,
VAProcFilterType *filters, unsigned int *num_filters)
{
vlVaDriver *drv = VL_VA_DRIVER(ctx);
vlVaContext *context;
unsigned int num = 0;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!num_filters || !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE &&
context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
filters[num++] = VAProcFilterDeinterlacing;
mtx_unlock(&drv->mutex);
*num_filters = num;
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context_id,
VAProcFilterType type, void *filter_caps,
unsigned int *num_filter_caps)
{
vlVaDriver *drv = VL_VA_DRIVER(ctx);
vlVaContext *context;
unsigned int i;
bool supports_filters;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!filter_caps || !num_filter_caps)
return VA_STATUS_ERROR_INVALID_PARAMETER;
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
supports_filters = context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE &&
context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
mtx_unlock(&drv->mutex);
i = 0;
switch (type) {
case VAProcFilterNone:
break;
case VAProcFilterDeinterlacing: {
VAProcFilterCapDeinterlacing *deint = filter_caps;
if (!supports_filters)
return VA_STATUS_ERROR_UNIMPLEMENTED;
if (*num_filter_caps < 3) {
*num_filter_caps = 3;
return VA_STATUS_ERROR_MAX_NUM_EXCEEDED;
}
deint[i++].type = VAProcDeinterlacingBob;
deint[i++].type = VAProcDeinterlacingWeave;
deint[i++].type = VAProcDeinterlacingMotionAdaptive;
break;
}
case VAProcFilterNoiseReduction:
case VAProcFilterSharpening:
case VAProcFilterColorBalance:
case VAProcFilterSkinToneEnhancement:
return VA_STATUS_ERROR_UNIMPLEMENTED;
default:
assert(0);
}
*num_filter_caps = i;
return VA_STATUS_SUCCESS;
}
static const VARectangle *
vlVaRegionDefault(const VARectangle *region, vlVaSurface *surf,
VARectangle *def)
VARectangle *def)
{
if (region)
return region;
@@ -366,7 +588,7 @@ vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *contex
case VAProcDeinterlacingMotionAdaptive:
src = vlVaApplyDeint(drv, context, param, src,
!!(deint->flags & VA_DEINTERLACING_BOTTOM_FIELD));
!!(deint->flags & VA_DEINTERLACING_BOTTOM_FIELD));
deinterlace = VL_COMPOSITOR_MOTION_ADAPTIVE;
break;

View File

@@ -1324,228 +1324,6 @@ no_res:
return vaStatus;
}
VAStatus
vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context_id,
VAProcFilterType *filters, unsigned int *num_filters)
{
vlVaDriver *drv = VL_VA_DRIVER(ctx);
vlVaContext *context;
unsigned int num = 0;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!num_filters || !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE &&
context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
filters[num++] = VAProcFilterDeinterlacing;
mtx_unlock(&drv->mutex);
*num_filters = num;
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context_id,
VAProcFilterType type, void *filter_caps,
unsigned int *num_filter_caps)
{
vlVaDriver *drv = VL_VA_DRIVER(ctx);
vlVaContext *context;
unsigned int i;
bool supports_filters;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!filter_caps || !num_filter_caps)
return VA_STATUS_ERROR_INVALID_PARAMETER;
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
supports_filters = context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE &&
context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
mtx_unlock(&drv->mutex);
i = 0;
switch (type) {
case VAProcFilterNone:
break;
case VAProcFilterDeinterlacing: {
VAProcFilterCapDeinterlacing *deint = filter_caps;
if (!supports_filters)
return VA_STATUS_ERROR_UNIMPLEMENTED;
if (*num_filter_caps < 3) {
*num_filter_caps = 3;
return VA_STATUS_ERROR_MAX_NUM_EXCEEDED;
}
deint[i++].type = VAProcDeinterlacingBob;
deint[i++].type = VAProcDeinterlacingWeave;
deint[i++].type = VAProcDeinterlacingMotionAdaptive;
break;
}
case VAProcFilterNoiseReduction:
case VAProcFilterSharpening:
case VAProcFilterColorBalance:
case VAProcFilterSkinToneEnhancement:
return VA_STATUS_ERROR_UNIMPLEMENTED;
default:
assert(0);
}
*num_filter_caps = i;
return VA_STATUS_SUCCESS;
}
static VAProcColorStandardType vpp_color_standards[] = {
VAProcColorStandardBT601,
VAProcColorStandardBT709,
VAProcColorStandardBT2020,
VAProcColorStandardExplicit
};
VAStatus
vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context,
VABufferID *filters, unsigned int num_filters,
VAProcPipelineCaps *pipeline_cap)
{
unsigned int i = 0;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!pipeline_cap)
return VA_STATUS_ERROR_INVALID_PARAMETER;
if (num_filters && !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
pipeline_cap->pipeline_flags = 0;
pipeline_cap->filter_flags = 0;
pipeline_cap->num_forward_references = 0;
pipeline_cap->num_backward_references = 0;
pipeline_cap->rotation_flags = VA_ROTATION_NONE;
pipeline_cap->mirror_flags = VA_MIRROR_NONE;
struct pipe_screen *pscreen = VL_VA_PSCREEN(ctx);
bool media_only = !pscreen->caps.graphics && !pscreen->caps.compute;
uint32_t pipe_orientation_flags = pscreen->get_video_param(pscreen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_ORIENTATION_MODES);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_90)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_90);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_180)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_180);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_ROTATION_270)
pipeline_cap->rotation_flags |= (1 << VA_ROTATION_270);
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_FLIP_HORIZONTAL)
pipeline_cap->mirror_flags |= VA_MIRROR_HORIZONTAL;
if (!media_only || pipe_orientation_flags & PIPE_VIDEO_VPP_FLIP_VERTICAL)
pipeline_cap->mirror_flags |= VA_MIRROR_VERTICAL;
pipeline_cap->input_color_standards = vpp_color_standards;
pipeline_cap->num_input_color_standards = ARRAY_SIZE(vpp_color_standards);
pipeline_cap->output_color_standards = vpp_color_standards;
pipeline_cap->num_output_color_standards = ARRAY_SIZE(vpp_color_standards);
pipeline_cap->max_input_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_INPUT_WIDTH);
pipeline_cap->max_input_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_INPUT_HEIGHT);
pipeline_cap->min_input_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_INPUT_WIDTH);
pipeline_cap->min_input_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_INPUT_HEIGHT);
pipeline_cap->max_output_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_OUTPUT_WIDTH);
pipeline_cap->max_output_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MAX_OUTPUT_HEIGHT);
pipeline_cap->min_output_width = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_OUTPUT_WIDTH);
pipeline_cap->min_output_height = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_MIN_OUTPUT_HEIGHT);
uint32_t pipe_blend_modes = pscreen->get_video_param(pscreen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
PIPE_VIDEO_CAP_VPP_BLEND_MODES);
pipeline_cap->blend_flags = 0;
if (pipe_blend_modes & PIPE_VIDEO_VPP_BLEND_MODE_GLOBAL_ALPHA)
pipeline_cap->blend_flags |= VA_BLEND_GLOBAL_ALPHA;
vlVaDriver *drv = VL_VA_DRIVER(ctx);
mtx_lock(&drv->mutex);
for (i = 0; i < num_filters; i++) {
vlVaBuffer *buf = handle_table_get(drv->htab, filters[i]);
VAProcFilterParameterBufferBase *filter;
if (!buf || buf->type != VAProcFilterParameterBufferType) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
filter = buf->data;
switch (filter->type) {
case VAProcFilterDeinterlacing: {
VAProcFilterParameterBufferDeinterlacing *deint = buf->data;
if (deint->algorithm == VAProcDeinterlacingMotionAdaptive) {
pipeline_cap->num_forward_references = 2;
pipeline_cap->num_backward_references = 1;
}
break;
}
default:
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_UNIMPLEMENTED;
}
}
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
}
#ifndef _WIN32
static uint32_t pipe_format_to_drm_format(enum pipe_format format)
{