FFmpeg
hw_base_encode.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "libavutil/avassert.h"
20 #include "libavutil/common.h"
21 #include "libavutil/internal.h"
22 #include "libavutil/log.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/pixdesc.h"
25 
26 #include "encode.h"
27 #include "avcodec.h"
28 #include "hw_base_encode.h"
29 
31 {
34 
36  av_freep(&pic->codec_priv);
37  av_freep(&pic->priv);
38  av_free(pic);
39 
40  return 0;
41 }
42 
44  FFHWBaseEncodePicture *target,
45  int is_ref, int in_dpb, int prev)
46 {
47  int refs = 0;
48 
49  if (is_ref) {
50  av_assert0(pic != target);
53  if (target->display_order < pic->display_order)
54  pic->refs[0][pic->nb_refs[0]++] = target;
55  else
56  pic->refs[1][pic->nb_refs[1]++] = target;
57  ++refs;
58  }
59 
60  if (in_dpb) {
62  pic->dpb[pic->nb_dpb_pics++] = target;
63  ++refs;
64  }
65 
66  if (prev) {
67  av_assert0(!pic->prev);
68  pic->prev = target;
69  ++refs;
70  }
71 
72  target->ref_count[0] += refs;
73  target->ref_count[1] += refs;
74 }
75 
77 {
78  int i;
79 
80  if (pic->ref_removed[level])
81  return;
82 
83  for (i = 0; i < pic->nb_refs[0]; i++) {
84  av_assert0(pic->refs[0][i]);
85  --pic->refs[0][i]->ref_count[level];
86  av_assert0(pic->refs[0][i]->ref_count[level] >= 0);
87  }
88 
89  for (i = 0; i < pic->nb_refs[1]; i++) {
90  av_assert0(pic->refs[1][i]);
91  --pic->refs[1][i]->ref_count[level];
92  av_assert0(pic->refs[1][i]->ref_count[level] >= 0);
93  }
94 
95  for (i = 0; i < pic->nb_dpb_pics; i++) {
96  av_assert0(pic->dpb[i]);
97  --pic->dpb[i]->ref_count[level];
98  av_assert0(pic->dpb[i]->ref_count[level] >= 0);
99  }
100 
101  av_assert0(pic->prev || pic->type == FF_HW_PICTURE_TYPE_IDR);
102  if (pic->prev) {
103  --pic->prev->ref_count[level];
104  av_assert0(pic->prev->ref_count[level] >= 0);
105  }
106 
107  pic->ref_removed[level] = 1;
108 }
109 
111  FFHWBaseEncodePicture *start,
113  FFHWBaseEncodePicture *prev,
114  int current_depth,
115  FFHWBaseEncodePicture **last)
116 {
117  FFHWBaseEncodePicture *pic, *next, *ref;
118  int i, len;
119 
120  av_assert0(start && end && start != end && start->next != end);
121 
122  // If we are at the maximum depth then encode all pictures as
123  // non-referenced B-pictures. Also do this if there is exactly one
124  // picture left, since there will be nothing to reference it.
125  if (current_depth == ctx->max_b_depth || start->next->next == end) {
126  for (pic = start->next; pic; pic = pic->next) {
127  if (pic == end)
128  break;
129  pic->type = FF_HW_PICTURE_TYPE_B;
130  pic->b_depth = current_depth;
131 
132  hw_base_encode_add_ref(pic, start, 1, 1, 0);
133  hw_base_encode_add_ref(pic, end, 1, 1, 0);
134  hw_base_encode_add_ref(pic, prev, 0, 0, 1);
135 
136  for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
137  hw_base_encode_add_ref(pic, ref, 0, 1, 0);
138  }
139  *last = prev;
140 
141  } else {
142  // Split the current list at the midpoint with a referenced
143  // B-picture, then descend into each side separately.
144  len = 0;
145  for (pic = start->next; pic != end; pic = pic->next)
146  ++len;
147  for (pic = start->next, i = 1; 2 * i < len; pic = pic->next, i++);
148 
149  pic->type = FF_HW_PICTURE_TYPE_B;
150  pic->b_depth = current_depth;
151 
152  pic->is_reference = 1;
153 
154  hw_base_encode_add_ref(pic, pic, 0, 1, 0);
155  hw_base_encode_add_ref(pic, start, 1, 1, 0);
156  hw_base_encode_add_ref(pic, end, 1, 1, 0);
157  hw_base_encode_add_ref(pic, prev, 0, 0, 1);
158 
159  for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
160  hw_base_encode_add_ref(pic, ref, 0, 1, 0);
161 
162  if (i > 1)
163  hw_base_encode_set_b_pictures(ctx, start, pic, pic,
164  current_depth + 1, &next);
165  else
166  next = pic;
167 
168  hw_base_encode_set_b_pictures(ctx, pic, end, next,
169  current_depth + 1, last);
170  }
171 }
172 
175 {
176  int i;
177 
178  if (!pic)
179  return;
180 
181  if (pic->type == FF_HW_PICTURE_TYPE_IDR) {
182  for (i = 0; i < ctx->nb_next_prev; i++) {
183  --ctx->next_prev[i]->ref_count[0];
184  ctx->next_prev[i] = NULL;
185  }
186  ctx->next_prev[0] = pic;
187  ++pic->ref_count[0];
188  ctx->nb_next_prev = 1;
189 
190  return;
191  }
192 
193  if (ctx->nb_next_prev < ctx->ref_l0) {
194  ctx->next_prev[ctx->nb_next_prev++] = pic;
195  ++pic->ref_count[0];
196  } else {
197  --ctx->next_prev[0]->ref_count[0];
198  for (i = 0; i < ctx->ref_l0 - 1; i++)
199  ctx->next_prev[i] = ctx->next_prev[i + 1];
200  ctx->next_prev[i] = pic;
201  ++pic->ref_count[0];
202  }
203 }
204 
207  FFHWBaseEncodePicture **pic_out)
208 {
209  FFHWBaseEncodePicture *pic = NULL, *prev = NULL, *next, *start;
210  int i, b_counter, closed_gop_end;
211 
212  // If there are any B-frames already queued, the next one to encode
213  // is the earliest not-yet-issued frame for which all references are
214  // available.
215  for (pic = ctx->pic_start; pic; pic = pic->next) {
216  if (pic->encode_issued)
217  continue;
218  if (pic->type != FF_HW_PICTURE_TYPE_B)
219  continue;
220  for (i = 0; i < pic->nb_refs[0]; i++) {
221  if (!pic->refs[0][i]->encode_issued)
222  break;
223  }
224  if (i != pic->nb_refs[0])
225  continue;
226 
227  for (i = 0; i < pic->nb_refs[1]; i++) {
228  if (!pic->refs[1][i]->encode_issued)
229  break;
230  }
231  if (i == pic->nb_refs[1])
232  break;
233  }
234 
235  if (pic) {
236  av_log(avctx, AV_LOG_DEBUG, "Pick B-picture at depth %d to "
237  "encode next.\n", pic->b_depth);
238  *pic_out = pic;
239  return 0;
240  }
241 
242  // Find the B-per-Pth available picture to become the next picture
243  // on the top layer.
244  start = NULL;
245  b_counter = 0;
246  closed_gop_end = ctx->closed_gop ||
247  ctx->idr_counter == ctx->gop_per_idr;
248  for (pic = ctx->pic_start; pic; pic = next) {
249  next = pic->next;
250  if (pic->encode_issued) {
251  start = pic;
252  continue;
253  }
254  // If the next available picture is force-IDR, encode it to start
255  // a new GOP immediately.
256  if (pic->force_idr)
257  break;
258  if (b_counter == ctx->b_per_p)
259  break;
260  // If this picture ends a closed GOP or starts a new GOP then it
261  // needs to be in the top layer.
262  if (ctx->gop_counter + b_counter + closed_gop_end >= ctx->gop_size)
263  break;
264  // If the picture after this one is force-IDR, we need to encode
265  // this one in the top layer.
266  if (next && next->force_idr)
267  break;
268  ++b_counter;
269  }
270 
271  // At the end of the stream the last picture must be in the top layer.
272  if (!pic && ctx->end_of_stream) {
273  --b_counter;
274  pic = ctx->pic_end;
275  if (pic->encode_complete)
276  return AVERROR_EOF;
277  else if (pic->encode_issued)
278  return AVERROR(EAGAIN);
279  }
280 
281  if (!pic) {
282  av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
283  "need more input for reference pictures.\n");
284  return AVERROR(EAGAIN);
285  }
286  if (ctx->input_order <= ctx->decode_delay && !ctx->end_of_stream) {
287  av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
288  "need more input for timestamps.\n");
289  return AVERROR(EAGAIN);
290  }
291 
292  if (pic->force_idr) {
293  av_log(avctx, AV_LOG_DEBUG, "Pick forced IDR-picture to "
294  "encode next.\n");
296  ctx->idr_counter = 1;
297  ctx->gop_counter = 1;
298 
299  } else if (ctx->gop_counter + b_counter >= ctx->gop_size) {
300  if (ctx->idr_counter == ctx->gop_per_idr) {
301  av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP IDR-picture to "
302  "encode next.\n");
304  ctx->idr_counter = 1;
305  } else {
306  av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP I-picture to "
307  "encode next.\n");
308  pic->type = FF_HW_PICTURE_TYPE_I;
309  ++ctx->idr_counter;
310  }
311  ctx->gop_counter = 1;
312 
313  } else {
314  if (ctx->gop_counter + b_counter + closed_gop_end == ctx->gop_size) {
315  av_log(avctx, AV_LOG_DEBUG, "Pick group-end P-picture to "
316  "encode next.\n");
317  } else {
318  av_log(avctx, AV_LOG_DEBUG, "Pick normal P-picture to "
319  "encode next.\n");
320  }
321  pic->type = FF_HW_PICTURE_TYPE_P;
322  av_assert0(start);
323  ctx->gop_counter += 1 + b_counter;
324  }
325  pic->is_reference = 1;
326  *pic_out = pic;
327 
328  hw_base_encode_add_ref(pic, pic, 0, 1, 0);
329  if (pic->type != FF_HW_PICTURE_TYPE_IDR) {
330  // TODO: apply both previous and forward multi reference for all vaapi encoders.
331  // And L0/L1 reference frame number can be set dynamically through query
332  // VAConfigAttribEncMaxRefFrames attribute.
333  if (avctx->codec_id == AV_CODEC_ID_AV1) {
334  for (i = 0; i < ctx->nb_next_prev; i++)
335  hw_base_encode_add_ref(pic, ctx->next_prev[i],
336  pic->type == FF_HW_PICTURE_TYPE_P,
337  b_counter > 0, 0);
338  } else
339  hw_base_encode_add_ref(pic, start,
340  pic->type == FF_HW_PICTURE_TYPE_P,
341  b_counter > 0, 0);
342 
343  hw_base_encode_add_ref(pic, ctx->next_prev[ctx->nb_next_prev - 1], 0, 0, 1);
344  }
345 
346  if (b_counter > 0) {
347  hw_base_encode_set_b_pictures(ctx, start, pic, pic, 1,
348  &prev);
349  } else {
350  prev = pic;
351  }
353 
354  return 0;
355 }
356 
358 {
359  FFHWBaseEncodePicture *pic, *prev, *next;
360 
361  av_assert0(ctx->pic_start);
362 
363  // Remove direct references once each picture is complete.
364  for (pic = ctx->pic_start; pic; pic = pic->next) {
365  if (pic->encode_complete && pic->next)
367  }
368 
369  // Remove indirect references once a picture has no direct references.
370  for (pic = ctx->pic_start; pic; pic = pic->next) {
371  if (pic->encode_complete && pic->ref_count[0] == 0)
373  }
374 
375  // Clear out all complete pictures with no remaining references.
376  prev = NULL;
377  for (pic = ctx->pic_start; pic; pic = next) {
378  next = pic->next;
379  if (pic->encode_complete && pic->ref_count[1] == 0) {
380  av_assert0(pic->ref_removed[0] && pic->ref_removed[1]);
381  if (prev)
382  prev->next = next;
383  else
384  ctx->pic_start = next;
385  ctx->op->free(avctx, pic);
387  } else {
388  prev = pic;
389  }
390  }
391 
392  return 0;
393 }
394 
396  const AVFrame *frame)
397 {
398  if ((frame->crop_top || frame->crop_bottom ||
399  frame->crop_left || frame->crop_right) && !ctx->crop_warned) {
400  av_log(ctx->log_ctx, AV_LOG_WARNING, "Cropping information on input "
401  "frames ignored due to lack of API support.\n");
402  ctx->crop_warned = 1;
403  }
404 
405  if (!ctx->roi_allowed) {
406  AVFrameSideData *sd =
408 
409  if (sd && !ctx->roi_warned) {
410  av_log(ctx->log_ctx, AV_LOG_WARNING, "ROI side data on input "
411  "frames ignored due to lack of driver support.\n");
412  ctx->roi_warned = 1;
413  }
414  }
415 
416  return 0;
417 }
418 
420  AVFrame *frame)
421 {
423  int err;
424 
425  if (frame) {
426  av_log(avctx, AV_LOG_DEBUG, "Input frame: %ux%u (%"PRId64").\n",
427  frame->width, frame->height, frame->pts);
428 
430  if (err < 0)
431  return err;
432 
433  pic = av_mallocz(sizeof(*pic));
434  if (!pic)
435  return AVERROR(ENOMEM);
436 
437  pic->input_image = av_frame_alloc();
438  if (!pic->input_image) {
439  err = AVERROR(ENOMEM);
440  goto fail;
441  }
442 
443  if (ctx->recon_frames_ref) {
444  pic->recon_image = av_frame_alloc();
445  if (!pic->recon_image) {
446  err = AVERROR(ENOMEM);
447  goto fail;
448  }
449 
450  err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0);
451  if (err < 0) {
452  err = AVERROR(ENOMEM);
453  goto fail;
454  }
455  }
456 
457  pic->priv = av_mallocz(ctx->op->priv_size);
458  if (!pic->priv) {
459  err = AVERROR(ENOMEM);
460  goto fail;
461  }
462 
463  if (ctx->input_order == 0 || frame->pict_type == AV_PICTURE_TYPE_I)
464  pic->force_idr = 1;
465 
466  pic->pts = frame->pts;
467  pic->duration = frame->duration;
468 
469  if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
470  err = av_buffer_replace(&pic->opaque_ref, frame->opaque_ref);
471  if (err < 0)
472  goto fail;
473 
474  pic->opaque = frame->opaque;
475  }
476 
478 
479  if (ctx->input_order == 0)
480  ctx->first_pts = pic->pts;
481  if (ctx->input_order == ctx->decode_delay)
482  ctx->dts_pts_diff = pic->pts - ctx->first_pts;
483  if (ctx->output_delay > 0)
484  ctx->ts_ring[ctx->input_order %
485  (3 * ctx->output_delay + ctx->async_depth)] = pic->pts;
486 
487  pic->display_order = ctx->input_order;
488  ++ctx->input_order;
489 
490  if (ctx->pic_start) {
491  ctx->pic_end->next = pic;
492  ctx->pic_end = pic;
493  } else {
494  ctx->pic_start = pic;
495  ctx->pic_end = pic;
496  }
497 
498  err = ctx->op->init(avctx, pic);
499  if (err < 0)
500  goto fail;
501  } else {
502  ctx->end_of_stream = 1;
503 
504  // Fix timestamps if we hit end-of-stream before the initial decode
505  // delay has elapsed.
506  if (ctx->input_order <= ctx->decode_delay)
507  ctx->dts_pts_diff = ctx->pic_end->pts - ctx->first_pts;
508  }
509 
510  return 0;
511 
512 fail:
513  ctx->op->free(avctx, pic);
515  return err;
516 }
517 
519  AVCodecContext *avctx,
521  AVPacket *pkt, int flag_no_delay)
522 {
523  if (pic->type == FF_HW_PICTURE_TYPE_IDR)
525 
526  pkt->pts = pic->pts;
527  pkt->duration = pic->duration;
528 
529  // for no-delay encoders this is handled in generic codec
530  if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY &&
531  avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
532  pkt->opaque = pic->opaque;
533  pkt->opaque_ref = pic->opaque_ref;
534  pic->opaque_ref = NULL;
535  }
536 
537  if (flag_no_delay) {
538  pkt->dts = pkt->pts;
539  return 0;
540  }
541 
542  if (ctx->output_delay == 0) {
543  pkt->dts = pkt->pts;
544  } else if (pic->encode_order < ctx->decode_delay) {
545  if (ctx->ts_ring[pic->encode_order] < INT64_MIN + ctx->dts_pts_diff)
546  pkt->dts = INT64_MIN;
547  else
548  pkt->dts = ctx->ts_ring[pic->encode_order] - ctx->dts_pts_diff;
549  } else {
550  pkt->dts = ctx->ts_ring[(pic->encode_order - ctx->decode_delay) %
551  (3 * ctx->output_delay + ctx->async_depth)];
552  }
553 
554  return 0;
555 }
556 
558  AVCodecContext *avctx, AVPacket *pkt)
559 {
561  AVFrame *frame = ctx->frame;
562  int err;
563 
564  av_assert0(ctx->op && ctx->op->init && ctx->op->issue &&
565  ctx->op->output && ctx->op->free);
566 
567 start:
568  /** if no B frame before repeat P frame, sent repeat P frame out. */
569  if (ctx->tail_pkt->size) {
570  for (FFHWBaseEncodePicture *tmp = ctx->pic_start; tmp; tmp = tmp->next) {
571  if (tmp->type == FF_HW_PICTURE_TYPE_B && tmp->pts < ctx->tail_pkt->pts)
572  break;
573  else if (!tmp->next) {
574  av_packet_move_ref(pkt, ctx->tail_pkt);
575  goto end;
576  }
577  }
578  }
579 
580  err = ff_encode_get_frame(avctx, frame);
581  if (err == AVERROR_EOF) {
582  frame = NULL;
583  } else if (err < 0)
584  return err;
585 
586  err = hw_base_encode_send_frame(avctx, ctx, frame);
587  if (err < 0)
588  return err;
589 
590  if (!ctx->pic_start) {
591  if (ctx->end_of_stream)
592  return AVERROR_EOF;
593  else
594  return AVERROR(EAGAIN);
595  }
596 
597  if (ctx->async_encode) {
598  if (av_fifo_can_write(ctx->encode_fifo)) {
599  err = hw_base_encode_pick_next(avctx, ctx, &pic);
600  if (!err) {
601  av_assert0(pic);
602  pic->encode_order = ctx->encode_order +
603  av_fifo_can_read(ctx->encode_fifo);
604  err = ctx->op->issue(avctx, pic);
605  if (err < 0) {
606  av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
607  return err;
608  }
609  pic->encode_issued = 1;
610  av_fifo_write(ctx->encode_fifo, &pic, 1);
611  }
612  }
613 
614  if (!av_fifo_can_read(ctx->encode_fifo))
615  return err;
616 
617  // More frames can be buffered
618  if (av_fifo_can_write(ctx->encode_fifo) && !ctx->end_of_stream)
619  return AVERROR(EAGAIN);
620 
621  av_fifo_read(ctx->encode_fifo, &pic, 1);
622  ctx->encode_order = pic->encode_order + 1;
623  } else {
624  err = hw_base_encode_pick_next(avctx, ctx, &pic);
625  if (err < 0)
626  return err;
627  av_assert0(pic);
628 
629  pic->encode_order = ctx->encode_order++;
630 
631  err = ctx->op->issue(avctx, pic);
632  if (err < 0) {
633  av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
634  return err;
635  }
636 
637  pic->encode_issued = 1;
638  }
639 
640  err = ctx->op->output(avctx, pic, pkt);
641  if (err < 0) {
642  av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
643  return err;
644  }
645 
646  ctx->output_order = pic->encode_order;
648 
649  /** loop to get an available pkt in encoder flushing. */
650  if (ctx->end_of_stream && !pkt->size)
651  goto start;
652 
653 end:
654  if (pkt->size)
655  av_log(avctx, AV_LOG_DEBUG, "Output packet: pts %"PRId64", dts %"PRId64", "
656  "size %d bytes.\n", pkt->pts, pkt->dts, pkt->size);
657 
658  return 0;
659 }
660 
662  uint32_t ref_l0, uint32_t ref_l1,
663  int flags, int prediction_pre_only)
664 {
665  ctx->ref_l0 = FFMIN(ref_l0, MAX_PICTURE_REFERENCES);
666  ctx->ref_l1 = FFMIN(ref_l1, MAX_PICTURE_REFERENCES);
667 
668  if (flags & FF_HW_FLAG_INTRA_ONLY || avctx->gop_size <= 1) {
669  av_log(avctx, AV_LOG_VERBOSE, "Using intra frames only.\n");
670  ctx->gop_size = 1;
671  } else if (ref_l0 < 1) {
672  av_log(avctx, AV_LOG_ERROR, "Driver does not support any "
673  "reference frames.\n");
674  return AVERROR(EINVAL);
675  } else if (!(flags & FF_HW_FLAG_B_PICTURES) || ref_l1 < 1 ||
676  avctx->max_b_frames < 1 || prediction_pre_only) {
677  if (ctx->p_to_gpb)
678  av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames "
679  "(supported references: %d / %d).\n",
680  ref_l0, ref_l1);
681  else
682  av_log(avctx, AV_LOG_VERBOSE, "Using intra and P-frames "
683  "(supported references: %d / %d).\n", ref_l0, ref_l1);
684  ctx->gop_size = avctx->gop_size;
685  ctx->p_per_i = INT_MAX;
686  ctx->b_per_p = 0;
687  } else {
688  if (ctx->p_to_gpb)
689  av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames "
690  "(supported references: %d / %d).\n",
691  ref_l0, ref_l1);
692  else
693  av_log(avctx, AV_LOG_VERBOSE, "Using intra, P- and B-frames "
694  "(supported references: %d / %d).\n", ref_l0, ref_l1);
695  ctx->gop_size = avctx->gop_size;
696  ctx->p_per_i = INT_MAX;
697  ctx->b_per_p = avctx->max_b_frames;
699  ctx->max_b_depth = FFMIN(ctx->desired_b_depth,
700  av_log2(ctx->b_per_p) + 1);
701  } else {
702  ctx->max_b_depth = 1;
703  }
704  }
705 
707  ctx->closed_gop = !!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP);
708  ctx->gop_per_idr = ctx->idr_interval + 1;
709  } else {
710  ctx->closed_gop = 1;
711  ctx->gop_per_idr = 1;
712  }
713 
714  return 0;
715 }
716 
718  enum AVPixelFormat *fmt)
719 {
720  AVHWFramesConstraints *constraints = NULL;
721  enum AVPixelFormat recon_format;
722  int err, i;
723 
724  constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
725  hwconfig);
726  if (!constraints) {
727  err = AVERROR(ENOMEM);
728  goto fail;
729  }
730 
731  // Probably we can use the input surface format as the surface format
732  // of the reconstructed frames. If not, we just pick the first (only?)
733  // format in the valid list and hope that it all works.
734  recon_format = AV_PIX_FMT_NONE;
735  if (constraints->valid_sw_formats) {
736  for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
737  if (ctx->input_frames->sw_format ==
738  constraints->valid_sw_formats[i]) {
739  recon_format = ctx->input_frames->sw_format;
740  break;
741  }
742  }
743  if (recon_format == AV_PIX_FMT_NONE) {
744  // No match. Just use the first in the supported list and
745  // hope for the best.
746  recon_format = constraints->valid_sw_formats[0];
747  }
748  } else {
749  // No idea what to use; copy input format.
750  recon_format = ctx->input_frames->sw_format;
751  }
752  av_log(ctx->log_ctx, AV_LOG_DEBUG, "Using %s as format of "
753  "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
754 
755  if (ctx->surface_width < constraints->min_width ||
756  ctx->surface_height < constraints->min_height ||
757  ctx->surface_width > constraints->max_width ||
758  ctx->surface_height > constraints->max_height) {
759  av_log(ctx->log_ctx, AV_LOG_ERROR, "Hardware does not support encoding at "
760  "size %dx%d (constraints: width %d-%d height %d-%d).\n",
761  ctx->surface_width, ctx->surface_height,
762  constraints->min_width, constraints->max_width,
763  constraints->min_height, constraints->max_height);
764  err = AVERROR(EINVAL);
765  goto fail;
766  }
767 
768  *fmt = recon_format;
769  err = 0;
770 fail:
771  av_hwframe_constraints_free(&constraints);
772  return err;
773 }
774 
776 {
777  ctx->log_ctx = (void *)avctx;
778 
779  ctx->frame = av_frame_alloc();
780  if (!ctx->frame)
781  return AVERROR(ENOMEM);
782 
783  if (!avctx->hw_frames_ctx) {
784  av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
785  "required to associate the encoding device.\n");
786  return AVERROR(EINVAL);
787  }
788 
789  ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
790  if (!ctx->input_frames_ref)
791  return AVERROR(ENOMEM);
792 
793  ctx->input_frames = (AVHWFramesContext *)ctx->input_frames_ref->data;
794 
795  ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
796  if (!ctx->device_ref)
797  return AVERROR(ENOMEM);
798 
799  ctx->device = (AVHWDeviceContext *)ctx->device_ref->data;
800 
801  ctx->tail_pkt = av_packet_alloc();
802  if (!ctx->tail_pkt)
803  return AVERROR(ENOMEM);
804 
805  return 0;
806 }
807 
809 {
810  for (FFHWBaseEncodePicture *pic = ctx->pic_start, *next_pic = pic; pic; pic = next_pic) {
811  next_pic = pic->next;
813  }
814 
815  av_fifo_freep2(&ctx->encode_fifo);
816 
817  av_frame_free(&ctx->frame);
818  av_packet_free(&ctx->tail_pkt);
819 
820  av_buffer_unref(&ctx->device_ref);
821  av_buffer_unref(&ctx->input_frames_ref);
822  av_buffer_unref(&ctx->recon_frames_ref);
823 
824  return 0;
825 }
FFHWBaseEncodePicture::b_depth
int b_depth
Definition: hw_base_encode.h:79
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_fifo_can_write
size_t av_fifo_can_write(const AVFifo *f)
Definition: fifo.c:94
level
uint8_t level
Definition: svq3.c:205
FFHWBaseEncodePicture::next
struct FFHWBaseEncodePicture * next
Definition: hw_base_encode.h:67
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
FF_HW_PICTURE_TYPE_P
@ FF_HW_PICTURE_TYPE_P
Definition: hw_base_encode.h:41
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:980
FFHWBaseEncodePicture::priv
void * priv
Definition: hw_base_encode.h:63
FFHWBaseEncodePicture::codec_priv
void * codec_priv
Definition: hw_base_encode.h:65
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
ff_hw_base_encode_init
int ff_hw_base_encode_init(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)
Definition: hw_base_encode.c:775
encode.h
FFHWBaseEncodePicture::recon_image
AVFrame * recon_image
Definition: hw_base_encode.h:84
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
FF_HW_FLAG_B_PICTURE_REFERENCES
@ FF_HW_FLAG_B_PICTURE_REFERENCES
Definition: hw_base_encode.h:55
hw_base_encode_add_ref
static void hw_base_encode_add_ref(FFHWBaseEncodePicture *pic, FFHWBaseEncodePicture *target, int is_ref, int in_dpb, int prev)
Definition: hw_base_encode.c:43
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_hwdevice_get_hwframe_constraints
AVHWFramesConstraints * av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, const void *hwconfig)
Get the constraints on HW frames given a device and the HW-specific configuration to be used with tha...
Definition: hwcontext.c:566
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
AVHWFramesConstraints
This struct describes the constraints on hardware frames attached to a given device with a hardware-s...
Definition: hwcontext.h:441
FFHWBaseEncodeContext
Definition: hw_base_encode.h:122
AV_CODEC_FLAG_COPY_OPAQUE
#define AV_CODEC_FLAG_COPY_OPAQUE
Definition: avcodec.h:299
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
FFHWBaseEncodePicture::type
int type
Definition: hw_base_encode.h:78
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:575
ff_hw_base_encode_close
int ff_hw_base_encode_close(FFHWBaseEncodeContext *ctx)
Definition: hw_base_encode.c:808
FFHWBaseEncodePicture::is_reference
int is_reference
Definition: hw_base_encode.h:87
fail
#define fail()
Definition: checkasm.h:193
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
FFHWBaseEncodePicture::ref_removed
int ref_removed[2]
Definition: hw_base_encode.h:106
AVHWFramesConstraints::min_width
int min_width
The minimum size of frames in this hw_frames_ctx.
Definition: hwcontext.h:459
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
FF_HW_FLAG_B_PICTURES
@ FF_HW_FLAG_B_PICTURES
Definition: hw_base_encode.h:53
FFHWBaseEncodePicture::input_image
AVFrame * input_image
Definition: hw_base_encode.h:83
ff_hw_base_init_gop_structure
int ff_hw_base_init_gop_structure(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, uint32_t ref_l0, uint32_t ref_l1, int flags, int prediction_pre_only)
Definition: hw_base_encode.c:661
FFHWBaseEncodePicture::prev
struct FFHWBaseEncodePicture * prev
Definition: hw_base_encode.h:101
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:151
avassert.h
ff_hw_base_get_recon_format
int ff_hw_base_get_recon_format(FFHWBaseEncodeContext *ctx, const void *hwconfig, enum AVPixelFormat *fmt)
Definition: hw_base_encode.c:717
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FFHWBaseEncodePicture::opaque
void * opaque
Definition: hw_base_encode.h:75
hw_base_encode_clear_old
static int hw_base_encode_clear_old(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)
Definition: hw_base_encode.c:357
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AVHWFramesConstraints::valid_sw_formats
enum AVPixelFormat * valid_sw_formats
A list of possible values for sw_format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE.
Definition: hwcontext.h:453
av_hwframe_constraints_free
void av_hwframe_constraints_free(AVHWFramesConstraints **constraints)
Free an AVHWFrameConstraints structure.
Definition: hwcontext.c:591
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
hw_base_encode_set_b_pictures
static void hw_base_encode_set_b_pictures(FFHWBaseEncodeContext *ctx, FFHWBaseEncodePicture *start, FFHWBaseEncodePicture *end, FFHWBaseEncodePicture *prev, int current_depth, FFHWBaseEncodePicture **last)
Definition: hw_base_encode.c:110
FF_HW_PICTURE_TYPE_IDR
@ FF_HW_PICTURE_TYPE_IDR
Definition: hw_base_encode.h:39
hw_base_encode.h
AVPacket::opaque
void * opaque
for some private data of the user
Definition: packet.h:564
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
if
if(ret)
Definition: filter_design.txt:179
FFHWBaseEncodePicture::dpb
struct FFHWBaseEncodePicture * dpb[MAX_DPB_SIZE]
Definition: hw_base_encode.h:93
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:284
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: packet.c:487
FFHWBaseEncodePicture::force_idr
int force_idr
Definition: hw_base_encode.h:73
ff_hw_base_encode_set_output_property
int ff_hw_base_encode_set_output_property(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, FFHWBaseEncodePicture *pic, AVPacket *pkt, int flag_no_delay)
Definition: hw_base_encode.c:518
hw_base_encode_check_frame
static int hw_base_encode_check_frame(FFHWBaseEncodeContext *ctx, const AVFrame *frame)
Definition: hw_base_encode.c:395
AVPacket::size
int size
Definition: packet.h:540
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1045
FFHWBaseEncodePicture::nb_refs
int nb_refs[MAX_REFERENCE_LIST_NUM]
Definition: hw_base_encode.h:97
MAX_DPB_SIZE
#define MAX_DPB_SIZE
Definition: hw_base_encode.h:26
FF_HW_FLAG_NON_IDR_KEY_PICTURES
@ FF_HW_FLAG_NON_IDR_KEY_PICTURES
Definition: hw_base_encode.h:58
ff_hw_base_encode_receive_packet
int ff_hw_base_encode_receive_packet(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, AVPacket *pkt)
Definition: hw_base_encode.c:557
FFHWBaseEncodePicture::encode_order
int64_t encode_order
Definition: hw_base_encode.h:70
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AVHWFramesConstraints::max_width
int max_width
The maximum size of frames in this hw_frames_ctx.
Definition: hwcontext.h:466
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
FFHWBaseEncodePicture::opaque_ref
AVBufferRef * opaque_ref
Definition: hw_base_encode.h:76
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
internal.h
common.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:650
FFHWBaseEncodePicture::refs
struct FFHWBaseEncodePicture * refs[MAX_REFERENCE_LIST_NUM][MAX_PICTURE_REFERENCES]
Definition: hw_base_encode.h:98
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1493
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
FF_HW_FLAG_INTRA_ONLY
@ FF_HW_FLAG_INTRA_ONLY
Definition: hw_base_encode.h:51
hw_base_encode_add_next_prev
static void hw_base_encode_add_next_prev(FFHWBaseEncodeContext *ctx, FFHWBaseEncodePicture *pic)
Definition: hw_base_encode.c:173
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
FFHWBaseEncodePicture
Definition: hw_base_encode.h:61
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVHWFramesConstraints::max_height
int max_height
Definition: hwcontext.h:467
base_encode_pic_free
static int base_encode_pic_free(FFHWBaseEncodePicture *pic)
Definition: hw_base_encode.c:30
AVCodecContext
main external API structure.
Definition: avcodec.h:451
hw_base_encode_pick_next
static int hw_base_encode_pick_next(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx, FFHWBaseEncodePicture **pic_out)
Definition: hw_base_encode.c:205
hw_base_encode_remove_refs
static void hw_base_encode_remove_refs(FFHWBaseEncodePicture *pic, int level)
Definition: hw_base_encode.c:76
AVHWFramesConstraints::min_height
int min_height
Definition: hwcontext.h:460
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
hw_base_encode_send_frame
static int hw_base_encode_send_frame(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx, AVFrame *frame)
Definition: hw_base_encode.c:419
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FFHWBaseEncodePicture::encode_complete
int encode_complete
Definition: hw_base_encode.h:81
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:809
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:205
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
FFHWBaseEncodePicture::pts
int64_t pts
Definition: hw_base_encode.h:71
AVPacket
This structure stores compressed data.
Definition: packet.h:516
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
FF_HW_PICTURE_TYPE_B
@ FF_HW_PICTURE_TYPE_B
Definition: hw_base_encode.h:42
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
FFHWBaseEncodePicture::duration
int64_t duration
Definition: hw_base_encode.h:72
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
FFHWBaseEncodePicture::encode_issued
int encode_issued
Definition: hw_base_encode.h:80
FFHWBaseEncodePicture::display_order
int64_t display_order
Definition: hw_base_encode.h:69
FF_HW_PICTURE_TYPE_I
@ FF_HW_PICTURE_TYPE_I
Definition: hw_base_encode.h:40
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:491
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
FFHWBaseEncodePicture::ref_count
int ref_count[2]
Definition: hw_base_encode.h:105
FFHWBaseEncodePicture::nb_dpb_pics
int nb_dpb_pics
Definition: hw_base_encode.h:92
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3090
MAX_PICTURE_REFERENCES
#define MAX_PICTURE_REFERENCES
Definition: hw_base_encode.h:27