st/va: delay decoder creation until max_references is known

In general max_references cannot be based on num_render_targets.

This patch allows to allocate buffers with an accurate size.
I.e. no more than necessary. For other codecs it is a fixed
value 2.

This is similar behaviour as vaapi/vdpau-driver.

For now HEVC case defaults to num_render_targets as before.
But it could also benefits this change by setting a more
accurate max_references number in handlePictureParameterBuffer.

Signed-off-by: Julien Isorce <j.isorce@samsung.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
This commit is contained in:
Julien Isorce
2015-11-26 08:29:54 +00:00
parent 750393ff7d
commit b4fb6d7616
4 changed files with 66 additions and 32 deletions
+21 -23
View File
@@ -187,7 +187,6 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
int picture_height, int flag, VASurfaceID *render_targets,
int num_render_targets, VAContextID *context_id)
{
struct pipe_video_codec templat = {};
vlVaDriver *drv;
vlVaContext *context;
int is_vpp;
@@ -213,27 +212,22 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
} else {
templat.profile = config_id;
templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
templat.width = picture_width;
templat.height = picture_height;
templat.max_references = num_render_targets;
templat.expect_chunked_decode = true;
context->templat.profile = config_id;
context->templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
context->templat.width = picture_width;
context->templat.height = picture_height;
context->templat.expect_chunked_decode = true;
if (u_reduce_video_profile(templat.profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC)
templat.level = u_get_h264_level(templat.width, templat.height,
&templat.max_references);
switch (u_reduce_video_profile(context->templat.profile)) {
case PIPE_VIDEO_FORMAT_MPEG12:
case PIPE_VIDEO_FORMAT_VC1:
case PIPE_VIDEO_FORMAT_MPEG4:
context->templat.max_references = 2;
break;
context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);
if (!context->decoder) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC) {
case PIPE_VIDEO_FORMAT_MPEG4_AVC:
context->templat.max_references = 0;
context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
if (!context->desc.h264.pps) {
FREE(context);
@@ -245,10 +239,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
}
break;
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_HEVC) {
case PIPE_VIDEO_FORMAT_HEVC:
context->templat.max_references = num_render_targets;
context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);
if (!context->desc.h265.pps) {
FREE(context);
@@ -260,6 +254,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
break;
default:
break;
}
}
+40 -8
View File
@@ -59,14 +59,17 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
return VA_STATUS_ERROR_INVALID_SURFACE;
context->target = surf->buffer;
if (!context->decoder) {
/* VPP */
if ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM) ||
context->target->interlaced)
context->target->interlaced))
return VA_STATUS_ERROR_UNIMPLEMENTED;
return VA_STATUS_SUCCESS;
}
@@ -86,13 +89,14 @@ vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
*ref_frame = NULL;
}
static void
static VAStatus
handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
{
VAPictureParameterBufferHEVC *hevc;
unsigned int i;
VAStatus vaStatus = VA_STATUS_SUCCESS;
switch (u_reduce_video_profile(context->decoder->profile)) {
switch (u_reduce_video_profile(context->templat.profile)) {
case PIPE_VIDEO_FORMAT_MPEG12:
vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
break;
@@ -263,6 +267,31 @@ handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *
default:
break;
}
/* Create the decoder once max_references is known. */
if (!context->decoder) {
if (!context->target)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (context->templat.max_references == 0)
return VA_STATUS_ERROR_INVALID_BUFFER;
if (u_reduce_video_profile(context->templat.profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC)
context->templat.level = u_get_h264_level(context->templat.width,
context->templat.height, &context->templat.max_references);
context->decoder = drv->pipe->create_video_codec(drv->pipe,
&context->templat);
if (!context->decoder)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
context->decoder->begin_frame(context->decoder, context->target,
&context->desc.base);
}
return vaStatus;
}
static void
@@ -270,7 +299,7 @@ handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
{
VAIQMatrixBufferHEVC *h265;
switch (u_reduce_video_profile(context->decoder->profile)) {
switch (u_reduce_video_profile(context->templat.profile)) {
case PIPE_VIDEO_FORMAT_MPEG12:
vlVaHandleIQMatrixBufferMPEG12(context, buf);
break;
@@ -304,7 +333,7 @@ handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
{
VASliceParameterBufferHEVC *h265;
switch (u_reduce_video_profile(context->decoder->profile)) {
switch (u_reduce_video_profile(context->templat.profile)) {
case PIPE_VIDEO_FORMAT_MPEG4_AVC:
vlVaHandleSliceParameterBufferH264(context, buf);
break;
@@ -356,7 +385,7 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
format = u_reduce_video_profile(context->decoder->profile);
format = u_reduce_video_profile(context->templat.profile);
switch (format) {
case PIPE_VIDEO_FORMAT_MPEG4_AVC:
if (bufHasStartcode(buf, 0x000001, 24))
@@ -428,7 +457,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
switch (buf->type) {
case VAPictureParameterBufferType:
handlePictureParameterBuffer(drv, context, buf);
vaStatus = handlePictureParameterBuffer(drv, context, buf);
break;
case VAIQMatrixBufferType:
@@ -472,6 +501,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!context->decoder) {
if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
return VA_STATUS_ERROR_INVALID_CONTEXT;
/* VPP */
return VA_STATUS_SUCCESS;
}
@@ -26,6 +26,7 @@
*
**************************************************************************/
#include "util/u_video.h"
#include "va_private.h"
void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
@@ -90,6 +91,9 @@ void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context,
h264->pic_fields.bits.redundant_pic_cnt_present_flag;
/*reference_pic_flag*/
context->desc.h264.frame_num = h264->frame_num;
if (!context->decoder && context->desc.h264.num_ref_frames > 0)
context->templat.max_references = MIN2(context->desc.h264.num_ref_frames, 16);
}
void vlVaHandleIQMatrixBufferH264(vlVaContext *context, vlVaBuffer *buf)
+1 -1
View File
@@ -215,7 +215,7 @@ typedef struct {
} vlVaSubpicture;
typedef struct {
struct pipe_video_codec *decoder;
struct pipe_video_codec templat, *decoder;
struct pipe_video_buffer *target;
union {
struct pipe_picture_desc base;