FFmpeg
mfenc.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #define COBJMACROS
20 #if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
21 #undef _WIN32_WINNT
22 #define _WIN32_WINNT 0x0602
23 #endif
24 
25 #include "encode.h"
26 #include "mf_utils.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/time.h"
30 #include "codec_internal.h"
31 #include "internal.h"
32 #include "compat/w32dlfcn.h"
33 
34 typedef struct MFContext {
36  HMODULE library;
41  IMFTransform *mft;
42  IMFMediaEventGenerator *async_events;
44  MFT_INPUT_STREAM_INFO in_info;
45  MFT_OUTPUT_STREAM_INFO out_info;
51  ICodecAPI *codec_api;
52  // set by AVOption
57 } MFContext;
58 
59 static int mf_choose_output_type(AVCodecContext *avctx);
60 static int mf_setup_context(AVCodecContext *avctx);
61 
62 #define MF_TIMEBASE (AVRational){1, 10000000}
63 // Sentinel value only used by us.
64 #define MF_INVALID_TIME AV_NOPTS_VALUE
65 
66 static int mf_wait_events(AVCodecContext *avctx)
67 {
68  MFContext *c = avctx->priv_data;
69 
70  if (!c->async_events)
71  return 0;
72 
73  while (!(c->async_need_input || c->async_have_output || c->draining_done || c->async_marker)) {
74  IMFMediaEvent *ev = NULL;
75  MediaEventType ev_id = 0;
76  HRESULT hr = IMFMediaEventGenerator_GetEvent(c->async_events, 0, &ev);
77  if (FAILED(hr)) {
78  av_log(avctx, AV_LOG_ERROR, "IMFMediaEventGenerator_GetEvent() failed: %s\n",
79  ff_hr_str(hr));
80  return AVERROR_EXTERNAL;
81  }
82  IMFMediaEvent_GetType(ev, &ev_id);
83  switch (ev_id) {
85  if (!c->draining)
86  c->async_need_input = 1;
87  break;
89  c->async_have_output = 1;
90  break;
92  c->draining_done = 1;
93  break;
95  c->async_marker = 1;
96  break;
97  default: ;
98  }
99  IMFMediaEvent_Release(ev);
100  }
101 
102  return 0;
103 }
104 
106 {
107  if (avctx->time_base.num > 0 && avctx->time_base.den > 0)
108  return avctx->time_base;
109  return MF_TIMEBASE;
110 }
111 
112 static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
113 {
114  if (av_pts == AV_NOPTS_VALUE)
115  return MF_INVALID_TIME;
116  return av_rescale_q(av_pts, mf_get_tb(avctx), MF_TIMEBASE);
117 }
118 
119 static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
120 {
121  LONGLONG stime = mf_to_mf_time(avctx, av_pts);
122  if (stime != MF_INVALID_TIME)
123  IMFSample_SetSampleTime(sample, stime);
124 }
125 
126 static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
127 {
128  return av_rescale_q(stime, MF_TIMEBASE, mf_get_tb(avctx));
129 }
130 
131 static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
132 {
133  LONGLONG pts;
134  HRESULT hr = IMFSample_GetSampleTime(sample, &pts);
135  if (FAILED(hr))
136  return AV_NOPTS_VALUE;
137  return mf_from_mf_time(avctx, pts);
138 }
139 
140 static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
141 {
142  MFContext *c = avctx->priv_data;
143  HRESULT hr;
144  UINT32 sz;
145 
146  if (avctx->codec_id != AV_CODEC_ID_MP3 && avctx->codec_id != AV_CODEC_ID_AC3) {
147  hr = IMFAttributes_GetBlobSize(type, &MF_MT_USER_DATA, &sz);
148  if (!FAILED(hr) && sz > 0) {
150  if (!avctx->extradata)
151  return AVERROR(ENOMEM);
152  avctx->extradata_size = sz;
153  hr = IMFAttributes_GetBlob(type, &MF_MT_USER_DATA, avctx->extradata, sz, NULL);
154  if (FAILED(hr))
155  return AVERROR_EXTERNAL;
156 
157  if (avctx->codec_id == AV_CODEC_ID_AAC && avctx->extradata_size >= 12) {
158  // Get rid of HEAACWAVEINFO (after wfx field, 12 bytes).
159  avctx->extradata_size = avctx->extradata_size - 12;
160  memmove(avctx->extradata, avctx->extradata + 12, avctx->extradata_size);
161  }
162  }
163  }
164 
165  // I don't know where it's documented that we need this. It happens with the
166  // MS mp3 encoder MFT. The idea for the workaround is taken from NAudio.
167  // (Certainly any lossy codec will have frames much smaller than 1 second.)
168  if (!c->out_info.cbSize && !c->out_stream_provides_samples) {
169  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
170  if (!FAILED(hr)) {
171  av_log(avctx, AV_LOG_VERBOSE, "MFT_OUTPUT_STREAM_INFO.cbSize set to 0, "
172  "assuming %d bytes instead.\n", (int)sz);
173  c->out_info.cbSize = sz;
174  }
175  }
176 
177  return 0;
178 }
179 
180 static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
181 {
182  HRESULT hr;
183  UINT32 sz;
184 
185  hr = IMFAttributes_GetBlobSize(type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
186  if (!FAILED(hr) && sz > 0) {
187  uint8_t *extradata = av_mallocz(sz + AV_INPUT_BUFFER_PADDING_SIZE);
188  if (!extradata)
189  return AVERROR(ENOMEM);
190  hr = IMFAttributes_GetBlob(type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz, NULL);
191  if (FAILED(hr)) {
192  av_free(extradata);
193  return AVERROR_EXTERNAL;
194  }
195  av_freep(&avctx->extradata);
196  avctx->extradata = extradata;
197  avctx->extradata_size = sz;
198  }
199 
200  return 0;
201 }
202 
204 {
205  MFContext *c = avctx->priv_data;
206  HRESULT hr;
207  IMFMediaType *type;
208  int ret;
209 
210  hr = IMFTransform_GetOutputCurrentType(c->mft, c->out_stream_id, &type);
211  if (FAILED(hr)) {
212  av_log(avctx, AV_LOG_ERROR, "could not get output type\n");
213  return AVERROR_EXTERNAL;
214  }
215 
216  av_log(avctx, AV_LOG_VERBOSE, "final output type:\n");
217  ff_media_type_dump(avctx, type);
218 
219  ret = 0;
220  if (c->is_video) {
221  ret = mf_encv_output_type_get(avctx, type);
222  } else if (c->is_audio) {
223  ret = mf_enca_output_type_get(avctx, type);
224  }
225 
226  if (ret < 0)
227  av_log(avctx, AV_LOG_ERROR, "output type not supported\n");
228 
229  IMFMediaType_Release(type);
230  return ret;
231 }
232 
233 static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
234 {
235  MFContext *c = avctx->priv_data;
236  HRESULT hr;
237  int ret;
238  DWORD len;
239  IMFMediaBuffer *buffer;
240  BYTE *data;
241  UINT64 t;
242  UINT32 t32;
243 
244  hr = IMFSample_GetTotalLength(sample, &len);
245  if (FAILED(hr))
246  return AVERROR_EXTERNAL;
247 
248  if ((ret = ff_get_encode_buffer(avctx, avpkt, len, 0)) < 0)
249  return ret;
250 
251  hr = IMFSample_ConvertToContiguousBuffer(sample, &buffer);
252  if (FAILED(hr))
253  return AVERROR_EXTERNAL;
254 
255  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
256  if (FAILED(hr)) {
257  IMFMediaBuffer_Release(buffer);
258  return AVERROR_EXTERNAL;
259  }
260 
261  memcpy(avpkt->data, data, len);
262 
263  IMFMediaBuffer_Unlock(buffer);
264  IMFMediaBuffer_Release(buffer);
265 
266  avpkt->pts = avpkt->dts = mf_sample_get_pts(avctx, sample);
267 
268  hr = IMFAttributes_GetUINT32(sample, &MFSampleExtension_CleanPoint, &t32);
269  if (c->is_audio || (!FAILED(hr) && t32 != 0))
270  avpkt->flags |= AV_PKT_FLAG_KEY;
271 
272  hr = IMFAttributes_GetUINT64(sample, &MFSampleExtension_DecodeTimestamp, &t);
273  if (!FAILED(hr)) {
274  avpkt->dts = mf_from_mf_time(avctx, t);
275  // At least on Qualcomm's HEVC encoder on SD 835, the output dts
276  // starts from the input pts of the first frame, while the output pts
277  // is shifted forward. Therefore, shift the output values back so that
278  // the output pts matches the input.
279  if (c->reorder_delay == AV_NOPTS_VALUE)
280  c->reorder_delay = avpkt->pts - avpkt->dts;
281  avpkt->dts -= c->reorder_delay;
282  avpkt->pts -= c->reorder_delay;
283  }
284 
285  return 0;
286 }
287 
288 static IMFSample *mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
289 {
290  MFContext *c = avctx->priv_data;
291  size_t len;
292  size_t bps;
293  IMFSample *sample;
294 
296  len = frame->nb_samples * bps;
297 
298  sample = ff_create_memory_sample(&c->functions, frame->data[0], len,
299  c->in_info.cbAlignment);
300  if (sample)
301  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->nb_samples));
302  return sample;
303 }
304 
305 static IMFSample *mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
306 {
307  MFContext *c = avctx->priv_data;
308  IMFSample *sample;
309  IMFMediaBuffer *buffer;
310  BYTE *data;
311  HRESULT hr;
312  int ret;
313  int size;
314 
315  size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
316  if (size < 0)
317  return NULL;
318 
319  sample = ff_create_memory_sample(&c->functions, NULL, size,
320  c->in_info.cbAlignment);
321  if (!sample)
322  return NULL;
323 
324  hr = IMFSample_GetBufferByIndex(sample, 0, &buffer);
325  if (FAILED(hr)) {
326  IMFSample_Release(sample);
327  return NULL;
328  }
329 
330  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
331  if (FAILED(hr)) {
332  IMFMediaBuffer_Release(buffer);
333  IMFSample_Release(sample);
334  return NULL;
335  }
336 
337  ret = av_image_copy_to_buffer((uint8_t *)data, size, (void *)frame->data, frame->linesize,
338  avctx->pix_fmt, avctx->width, avctx->height, 1);
339  IMFMediaBuffer_SetCurrentLength(buffer, size);
340  IMFMediaBuffer_Unlock(buffer);
341  IMFMediaBuffer_Release(buffer);
342  if (ret < 0) {
343  IMFSample_Release(sample);
344  return NULL;
345  }
346 
347  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->duration));
348 
349  return sample;
350 }
351 
352 static IMFSample *mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
353 {
354  MFContext *c = avctx->priv_data;
355  IMFSample *sample;
356 
357  if (c->is_audio) {
359  } else {
361  }
362 
363  if (sample)
364  mf_sample_set_pts(avctx, sample, frame->pts);
365 
366  return sample;
367 }
368 
369 static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
370 {
371  MFContext *c = avctx->priv_data;
372  HRESULT hr;
373  int ret;
374 
375  if (sample) {
376  if (c->async_events) {
377  if ((ret = mf_wait_events(avctx)) < 0)
378  return ret;
379  if (!c->async_need_input)
380  return AVERROR(EAGAIN);
381  }
382  if (!c->sample_sent)
383  IMFSample_SetUINT32(sample, &MFSampleExtension_Discontinuity, TRUE);
384  c->sample_sent = 1;
385  hr = IMFTransform_ProcessInput(c->mft, c->in_stream_id, sample, 0);
386  if (hr == MF_E_NOTACCEPTING) {
387  return AVERROR(EAGAIN);
388  } else if (FAILED(hr)) {
389  av_log(avctx, AV_LOG_ERROR, "failed processing input: %s\n", ff_hr_str(hr));
390  return AVERROR_EXTERNAL;
391  }
392  c->async_need_input = 0;
393  } else if (!c->draining) {
394  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
395  if (FAILED(hr))
396  av_log(avctx, AV_LOG_ERROR, "failed draining: %s\n", ff_hr_str(hr));
397  // Some MFTs (AC3) will send a frame after each drain command (???), so
398  // this is required to make draining actually terminate.
399  c->draining = 1;
400  c->async_need_input = 0;
401  } else {
402  return AVERROR_EOF;
403  }
404  return 0;
405 }
406 
407 static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
408 {
409  MFContext *c = avctx->priv_data;
410  HRESULT hr;
411  DWORD st;
412  MFT_OUTPUT_DATA_BUFFER out_buffers;
413  IMFSample *sample;
414  int ret = 0;
415 
416  while (1) {
417  *out_sample = NULL;
418  sample = NULL;
419 
420  if (c->async_events) {
421  if ((ret = mf_wait_events(avctx)) < 0)
422  return ret;
423  if (!c->async_have_output || c->draining_done) {
424  ret = 0;
425  break;
426  }
427  }
428 
429  if (!c->out_stream_provides_samples) {
430  sample = ff_create_memory_sample(&c->functions, NULL,
431  c->out_info.cbSize,
432  c->out_info.cbAlignment);
433  if (!sample)
434  return AVERROR(ENOMEM);
435  }
436 
437  out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
438  .dwStreamID = c->out_stream_id,
439  .pSample = sample,
440  };
441 
442  st = 0;
443  hr = IMFTransform_ProcessOutput(c->mft, 0, 1, &out_buffers, &st);
444 
445  if (out_buffers.pEvents)
446  IMFCollection_Release(out_buffers.pEvents);
447 
448  if (!FAILED(hr)) {
449  *out_sample = out_buffers.pSample;
450  ret = 0;
451  break;
452  }
453 
454  if (out_buffers.pSample)
455  IMFSample_Release(out_buffers.pSample);
456 
457  if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
458  if (c->draining)
459  c->draining_done = 1;
460  ret = 0;
461  } else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
462  av_log(avctx, AV_LOG_WARNING, "stream format change\n");
463  ret = mf_choose_output_type(avctx);
464  if (ret == 0) // we don't expect renegotiating the input type
466  if (ret > 0) {
467  ret = mf_setup_context(avctx);
468  if (ret >= 0) {
469  c->async_have_output = 0;
470  continue;
471  }
472  }
473  } else {
474  av_log(avctx, AV_LOG_ERROR, "failed processing output: %s\n", ff_hr_str(hr));
476  }
477 
478  break;
479  }
480 
481  c->async_have_output = 0;
482 
483  if (ret >= 0 && !*out_sample)
484  ret = c->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
485 
486  return ret;
487 }
488 
489 static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
490 {
491  MFContext *c = avctx->priv_data;
492  IMFSample *sample = NULL;
493  int ret;
494 
495  if (!c->frame->buf[0]) {
496  ret = ff_encode_get_frame(avctx, c->frame);
497  if (ret < 0 && ret != AVERROR_EOF)
498  return ret;
499  }
500 
501  if (c->frame->buf[0]) {
502  sample = mf_avframe_to_sample(avctx, c->frame);
503  if (!sample) {
504  av_frame_unref(c->frame);
505  return AVERROR(ENOMEM);
506  }
507  if (c->is_video && c->codec_api) {
508  if (c->frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
509  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
510  }
511  }
512 
513  ret = mf_send_sample(avctx, sample);
514  if (sample)
515  IMFSample_Release(sample);
516  if (ret != AVERROR(EAGAIN))
517  av_frame_unref(c->frame);
518  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
519  return ret;
520 
521  ret = mf_receive_sample(avctx, &sample);
522  if (ret < 0)
523  return ret;
524 
525  ret = mf_sample_to_avpacket(avctx, sample, avpkt);
526  IMFSample_Release(sample);
527 
528  return ret;
529 }
530 
531 // Most encoders seem to enumerate supported audio formats on the output types,
532 // at least as far as channel configuration and sample rate is concerned. Pick
533 // the one which seems to match best.
534 static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
535 {
536  MFContext *c = avctx->priv_data;
537  HRESULT hr;
538  UINT32 t;
539  GUID tg;
540  int64_t score = 0;
541 
542  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
543  if (!FAILED(hr) && t == avctx->sample_rate)
544  score |= 1LL << 32;
545 
546  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
547  if (!FAILED(hr) && t == avctx->ch_layout.nb_channels)
548  score |= 2LL << 32;
549 
550  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
551  if (!FAILED(hr)) {
552  if (IsEqualGUID(&c->main_subtype, &tg))
553  score |= 4LL << 32;
554  }
555 
556  // Select the bitrate (lowest priority).
557  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
558  if (!FAILED(hr)) {
559  int diff = (int)t - avctx->bit_rate / 8;
560  if (diff >= 0) {
561  score |= (1LL << 31) - diff; // prefer lower bitrate
562  } else {
563  score |= (1LL << 30) + diff; // prefer higher bitrate
564  }
565  }
566 
567  hr = IMFAttributes_GetUINT32(type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
568  if (!FAILED(hr) && t != 0)
569  return -1;
570 
571  return score;
572 }
573 
574 static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
575 {
576  // (some decoders allow adjusting this freely, but it can also cause failure
577  // to set the output type - so it's commented for being too fragile)
578  //IMFAttributes_SetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, avctx->bit_rate / 8);
579  //IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
580 
581  return 0;
582 }
583 
584 static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
585 {
586  HRESULT hr;
587  UINT32 t;
588  int64_t score = 0;
589 
590  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
591  if (sformat == AV_SAMPLE_FMT_NONE)
592  return -1; // can not use
593 
594  if (sformat == avctx->sample_fmt)
595  score |= 1;
596 
597  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
598  if (!FAILED(hr) && t == avctx->sample_rate)
599  score |= 2;
600 
601  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
602  if (!FAILED(hr) && t == avctx->ch_layout.nb_channels)
603  score |= 4;
604 
605  return score;
606 }
607 
608 static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
609 {
610  HRESULT hr;
611  UINT32 t;
612 
613  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
614  if (sformat != avctx->sample_fmt) {
615  av_log(avctx, AV_LOG_ERROR, "unsupported input sample format set\n");
616  return AVERROR(EINVAL);
617  }
618 
619  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
620  if (FAILED(hr) || t != avctx->sample_rate) {
621  av_log(avctx, AV_LOG_ERROR, "unsupported input sample rate set\n");
622  return AVERROR(EINVAL);
623  }
624 
625  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
626  if (FAILED(hr) || t != avctx->ch_layout.nb_channels) {
627  av_log(avctx, AV_LOG_ERROR, "unsupported input channel number set\n");
628  return AVERROR(EINVAL);
629  }
630 
631  return 0;
632 }
633 
634 static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
635 {
636  MFContext *c = avctx->priv_data;
637  GUID tg;
638  HRESULT hr;
639  int score = -1;
640 
641  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
642  if (!FAILED(hr)) {
643  if (IsEqualGUID(&c->main_subtype, &tg))
644  score = 1;
645  }
646 
647  return score;
648 }
649 
650 static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
651 {
652  MFContext *c = avctx->priv_data;
654 
655  ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
656  IMFAttributes_SetUINT32(type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
657 
658  if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
659  framerate = avctx->framerate;
660  } else {
661  framerate = av_inv_q(avctx->time_base);
662 #if FF_API_TICKS_PER_FRAME
664  framerate.den *= avctx->ticks_per_frame;
666 #endif
667  }
668 
669  ff_MFSetAttributeRatio((IMFAttributes *)type, &MF_MT_FRAME_RATE, framerate.num, framerate.den);
670 
671  // (MS HEVC supports eAVEncH265VProfile_Main_420_8 only.)
672  if (avctx->codec_id == AV_CODEC_ID_H264) {
674  switch (avctx->profile) {
677  break;
680  break;
681  }
682  IMFAttributes_SetUINT32(type, &MF_MT_MPEG2_PROFILE, profile);
683  }
684 
685  IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
686 
687  // Note that some of the ICodecAPI options must be set before SetOutputType.
688  if (c->codec_api) {
689  if (avctx->bit_rate)
690  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMeanBitRate, FF_VAL_VT_UI4(avctx->bit_rate));
691 
692  if (c->opt_enc_rc >= 0)
693  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode, FF_VAL_VT_UI4(c->opt_enc_rc));
694 
695  if (c->opt_enc_quality >= 0)
696  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQuality, FF_VAL_VT_UI4(c->opt_enc_quality));
697 
698  // Always set the number of b-frames. Qualcomm's HEVC encoder on SD835
699  // defaults this to 1, and that setting is buggy with many of the
700  // rate control modes. (0 or 2 b-frames works fine with most rate
701  // control modes, but 2 seems buggy with the u_vbr mode.) Setting
702  // "scenario" to "camera_record" sets it in CFR mode (where the default
703  // is VFR), which makes the encoder avoid dropping frames.
704  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVDefaultBPictureCount, FF_VAL_VT_UI4(avctx->max_b_frames));
705  avctx->has_b_frames = avctx->max_b_frames > 0;
706 
707  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable, FF_VAL_VT_BOOL(1));
708 
709  if (c->opt_enc_scenario >= 0)
710  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVScenarioInfo, FF_VAL_VT_UI4(c->opt_enc_scenario));
711  }
712 
713  return 0;
714 }
715 
716 static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
717 {
718  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
719  if (pix_fmt != avctx->pix_fmt)
720  return -1; // can not use
721 
722  return 0;
723 }
724 
725 static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
726 {
727  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
728  if (pix_fmt != avctx->pix_fmt) {
729  av_log(avctx, AV_LOG_ERROR, "unsupported input pixel format set\n");
730  return AVERROR(EINVAL);
731  }
732 
733  //ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
734 
735  return 0;
736 }
737 
739 {
740  MFContext *c = avctx->priv_data;
741  HRESULT hr;
742  int ret;
743  IMFMediaType *out_type = NULL;
744  int64_t out_type_score = -1;
745  int out_type_index = -1;
746  int n;
747 
748  av_log(avctx, AV_LOG_VERBOSE, "output types:\n");
749  for (n = 0; ; n++) {
750  IMFMediaType *type;
751  int64_t score = -1;
752 
753  hr = IMFTransform_GetOutputAvailableType(c->mft, c->out_stream_id, n, &type);
754  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
755  break;
756  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
757  av_log(avctx, AV_LOG_VERBOSE, "(need to set input type)\n");
758  ret = 0;
759  goto done;
760  }
761  if (FAILED(hr)) {
762  av_log(avctx, AV_LOG_ERROR, "error getting output type: %s\n", ff_hr_str(hr));
764  goto done;
765  }
766 
767  av_log(avctx, AV_LOG_VERBOSE, "output type %d:\n", n);
768  ff_media_type_dump(avctx, type);
769 
770  if (c->is_video) {
771  score = mf_encv_output_score(avctx, type);
772  } else if (c->is_audio) {
773  score = mf_enca_output_score(avctx, type);
774  }
775 
776  if (score > out_type_score) {
777  if (out_type)
778  IMFMediaType_Release(out_type);
779  out_type = type;
780  out_type_score = score;
781  out_type_index = n;
782  IMFMediaType_AddRef(out_type);
783  }
784 
785  IMFMediaType_Release(type);
786  }
787 
788  if (out_type) {
789  av_log(avctx, AV_LOG_VERBOSE, "picking output type %d.\n", out_type_index);
790  } else {
791  hr = c->functions.MFCreateMediaType(&out_type);
792  if (FAILED(hr)) {
793  ret = AVERROR(ENOMEM);
794  goto done;
795  }
796  }
797 
798  ret = 0;
799  if (c->is_video) {
800  ret = mf_encv_output_adjust(avctx, out_type);
801  } else if (c->is_audio) {
802  ret = mf_enca_output_adjust(avctx, out_type);
803  }
804 
805  if (ret >= 0) {
806  av_log(avctx, AV_LOG_VERBOSE, "setting output type:\n");
807  ff_media_type_dump(avctx, out_type);
808 
809  hr = IMFTransform_SetOutputType(c->mft, c->out_stream_id, out_type, 0);
810  if (!FAILED(hr)) {
811  ret = 1;
812  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
813  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set input type\n");
814  ret = 0;
815  } else {
816  av_log(avctx, AV_LOG_ERROR, "could not set output type (%s)\n", ff_hr_str(hr));
818  }
819  }
820 
821 done:
822  if (out_type)
823  IMFMediaType_Release(out_type);
824  return ret;
825 }
826 
828 {
829  MFContext *c = avctx->priv_data;
830  HRESULT hr;
831  int ret;
832  IMFMediaType *in_type = NULL;
833  int64_t in_type_score = -1;
834  int in_type_index = -1;
835  int n;
836 
837  av_log(avctx, AV_LOG_VERBOSE, "input types:\n");
838  for (n = 0; ; n++) {
839  IMFMediaType *type = NULL;
840  int64_t score = -1;
841 
842  hr = IMFTransform_GetInputAvailableType(c->mft, c->in_stream_id, n, &type);
843  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
844  break;
845  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
846  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 1)\n");
847  ret = 0;
848  goto done;
849  }
850  if (FAILED(hr)) {
851  av_log(avctx, AV_LOG_ERROR, "error getting input type: %s\n", ff_hr_str(hr));
853  goto done;
854  }
855 
856  av_log(avctx, AV_LOG_VERBOSE, "input type %d:\n", n);
857  ff_media_type_dump(avctx, type);
858 
859  if (c->is_video) {
860  score = mf_encv_input_score(avctx, type);
861  } else if (c->is_audio) {
862  score = mf_enca_input_score(avctx, type);
863  }
864 
865  if (score > in_type_score) {
866  if (in_type)
867  IMFMediaType_Release(in_type);
868  in_type = type;
869  in_type_score = score;
870  in_type_index = n;
871  IMFMediaType_AddRef(in_type);
872  }
873 
874  IMFMediaType_Release(type);
875  }
876 
877  if (in_type) {
878  av_log(avctx, AV_LOG_VERBOSE, "picking input type %d.\n", in_type_index);
879  } else {
880  // Some buggy MFTs (WMA encoder) fail to return MF_E_TRANSFORM_TYPE_NOT_SET.
881  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 2)\n");
882  ret = 0;
883  goto done;
884  }
885 
886  ret = 0;
887  if (c->is_video) {
888  ret = mf_encv_input_adjust(avctx, in_type);
889  } else if (c->is_audio) {
890  ret = mf_enca_input_adjust(avctx, in_type);
891  }
892 
893  if (ret >= 0) {
894  av_log(avctx, AV_LOG_VERBOSE, "setting input type:\n");
895  ff_media_type_dump(avctx, in_type);
896 
897  hr = IMFTransform_SetInputType(c->mft, c->in_stream_id, in_type, 0);
898  if (!FAILED(hr)) {
899  ret = 1;
900  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
901  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set output type\n");
902  ret = 0;
903  } else {
904  av_log(avctx, AV_LOG_ERROR, "could not set input type (%s)\n", ff_hr_str(hr));
906  }
907  }
908 
909 done:
910  if (in_type)
911  IMFMediaType_Release(in_type);
912  return ret;
913 }
914 
916 {
917  // This follows steps 1-5 on:
918  // https://msdn.microsoft.com/en-us/library/windows/desktop/aa965264(v=vs.85).aspx
919  // If every MFT implementer does this correctly, this loop should at worst
920  // be repeated once.
921  int need_input = 1, need_output = 1;
922  int n;
923  for (n = 0; n < 2 && (need_input || need_output); n++) {
924  int ret;
925  ret = mf_choose_input_type(avctx);
926  if (ret < 0)
927  return ret;
928  need_input = ret < 1;
929  ret = mf_choose_output_type(avctx);
930  if (ret < 0)
931  return ret;
932  need_output = ret < 1;
933  }
934  if (need_input || need_output) {
935  av_log(avctx, AV_LOG_ERROR, "format negotiation failed (%d/%d)\n",
936  need_input, need_output);
937  return AVERROR_EXTERNAL;
938  }
939  return 0;
940 }
941 
943 {
944  MFContext *c = avctx->priv_data;
945  HRESULT hr;
946  int ret;
947 
948  hr = IMFTransform_GetInputStreamInfo(c->mft, c->in_stream_id, &c->in_info);
949  if (FAILED(hr))
950  return AVERROR_EXTERNAL;
951  av_log(avctx, AV_LOG_VERBOSE, "in_info: size=%d, align=%d\n",
952  (int)c->in_info.cbSize, (int)c->in_info.cbAlignment);
953 
954  hr = IMFTransform_GetOutputStreamInfo(c->mft, c->out_stream_id, &c->out_info);
955  if (FAILED(hr))
956  return AVERROR_EXTERNAL;
957  c->out_stream_provides_samples =
958  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
959  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
960  av_log(avctx, AV_LOG_VERBOSE, "out_info: size=%d, align=%d%s\n",
961  (int)c->out_info.cbSize, (int)c->out_info.cbAlignment,
962  c->out_stream_provides_samples ? " (provides samples)" : "");
963 
964  if ((ret = mf_output_type_get(avctx)) < 0)
965  return ret;
966 
967  return 0;
968 }
969 
970 static int mf_unlock_async(AVCodecContext *avctx)
971 {
972  MFContext *c = avctx->priv_data;
973  HRESULT hr;
974  IMFAttributes *attrs;
975  UINT32 v;
976  int res = AVERROR_EXTERNAL;
977 
978  // For hw encoding we unfortunately need to use async mode, otherwise
979  // play it safe and avoid it.
980  if (!(c->is_video && c->opt_enc_hw))
981  return 0;
982 
983  hr = IMFTransform_GetAttributes(c->mft, &attrs);
984  if (FAILED(hr)) {
985  av_log(avctx, AV_LOG_ERROR, "error retrieving MFT attributes: %s\n", ff_hr_str(hr));
986  goto err;
987  }
988 
989  hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
990  if (FAILED(hr)) {
991  av_log(avctx, AV_LOG_ERROR, "error querying async: %s\n", ff_hr_str(hr));
992  goto err;
993  }
994 
995  if (!v) {
996  av_log(avctx, AV_LOG_ERROR, "hardware MFT is not async\n");
997  goto err;
998  }
999 
1000  hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
1001  if (FAILED(hr)) {
1002  av_log(avctx, AV_LOG_ERROR, "could not set async unlock: %s\n", ff_hr_str(hr));
1003  goto err;
1004  }
1005 
1006  hr = IMFTransform_QueryInterface(c->mft, &IID_IMFMediaEventGenerator, (void **)&c->async_events);
1007  if (FAILED(hr)) {
1008  av_log(avctx, AV_LOG_ERROR, "could not get async interface\n");
1009  goto err;
1010  }
1011 
1012  res = 0;
1013 
1014 err:
1015  IMFAttributes_Release(attrs);
1016  return res;
1017 }
1018 
1019 static int mf_create(void *log, MFFunctions *f, IMFTransform **mft,
1020  const AVCodec *codec, int use_hw)
1021 {
1022  int is_audio = codec->type == AVMEDIA_TYPE_AUDIO;
1023  const CLSID *subtype = ff_codec_to_mf_subtype(codec->id);
1024  MFT_REGISTER_TYPE_INFO reg = {0};
1025  GUID category;
1026  int ret;
1027 
1028  *mft = NULL;
1029 
1030  if (!subtype)
1031  return AVERROR(ENOSYS);
1032 
1033  reg.guidSubtype = *subtype;
1034 
1035  if (is_audio) {
1036  reg.guidMajorType = MFMediaType_Audio;
1037  category = MFT_CATEGORY_AUDIO_ENCODER;
1038  } else {
1039  reg.guidMajorType = MFMediaType_Video;
1040  category = MFT_CATEGORY_VIDEO_ENCODER;
1041  }
1042 
1043  if ((ret = ff_instantiate_mf(log, f, category, NULL, &reg, use_hw, mft)) < 0)
1044  return ret;
1045 
1046  return 0;
1047 }
1048 
1050 {
1051  MFContext *c = avctx->priv_data;
1052  HRESULT hr;
1053  int ret;
1054  const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id);
1055  int use_hw = 0;
1056 
1057  c->frame = av_frame_alloc();
1058  if (!c->frame)
1059  return AVERROR(ENOMEM);
1060 
1061  c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO;
1062  c->is_video = !c->is_audio;
1063  c->reorder_delay = AV_NOPTS_VALUE;
1064 
1065  if (c->is_video && c->opt_enc_hw)
1066  use_hw = 1;
1067 
1068  if (!subtype)
1069  return AVERROR(ENOSYS);
1070 
1071  c->main_subtype = *subtype;
1072 
1073  if ((ret = mf_create(avctx, &c->functions, &c->mft, avctx->codec, use_hw)) < 0)
1074  return ret;
1075 
1076  if ((ret = mf_unlock_async(avctx)) < 0)
1077  return ret;
1078 
1079  hr = IMFTransform_QueryInterface(c->mft, &IID_ICodecAPI, (void **)&c->codec_api);
1080  if (!FAILED(hr))
1081  av_log(avctx, AV_LOG_VERBOSE, "MFT supports ICodecAPI.\n");
1082 
1083 
1084  hr = IMFTransform_GetStreamIDs(c->mft, 1, &c->in_stream_id, 1, &c->out_stream_id);
1085  if (hr == E_NOTIMPL) {
1086  c->in_stream_id = c->out_stream_id = 0;
1087  } else if (FAILED(hr)) {
1088  av_log(avctx, AV_LOG_ERROR, "could not get stream IDs (%s)\n", ff_hr_str(hr));
1089  return AVERROR_EXTERNAL;
1090  }
1091 
1092  if ((ret = mf_negotiate_types(avctx)) < 0)
1093  return ret;
1094 
1095  if ((ret = mf_setup_context(avctx)) < 0)
1096  return ret;
1097 
1098  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
1099  if (FAILED(hr)) {
1100  av_log(avctx, AV_LOG_ERROR, "could not start streaming (%s)\n", ff_hr_str(hr));
1101  return AVERROR_EXTERNAL;
1102  }
1103 
1104  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
1105  if (FAILED(hr)) {
1106  av_log(avctx, AV_LOG_ERROR, "could not start stream (%s)\n", ff_hr_str(hr));
1107  return AVERROR_EXTERNAL;
1108  }
1109 
1110  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && c->async_events &&
1111  c->is_video && !avctx->extradata) {
1112  int sleep = 10000, total = 0;
1113  av_log(avctx, AV_LOG_VERBOSE, "Awaiting extradata\n");
1114  while (total < 70*1000) {
1115  // The Qualcomm H264 encoder on SD835 doesn't provide extradata
1116  // immediately, but it becomes available soon after init (without
1117  // any waitable event). In practice, it's available after less
1118  // than 10 ms, but wait for up to 70 ms before giving up.
1119  // Some encoders (Qualcomm's HEVC encoder on SD835, some versions
1120  // of the QSV H264 encoder at least) don't provide extradata this
1121  // way at all, not even after encoding a frame - it's only
1122  // available prepended to frames.
1123  av_usleep(sleep);
1124  total += sleep;
1125  mf_output_type_get(avctx);
1126  if (avctx->extradata)
1127  break;
1128  sleep *= 2;
1129  }
1130  av_log(avctx, AV_LOG_VERBOSE, "%s extradata in %d ms\n",
1131  avctx->extradata ? "Got" : "Didn't get", total / 1000);
1132  }
1133 
1134  return 0;
1135 }
1136 
1137 #if !HAVE_UWP
1138 #define LOAD_MF_FUNCTION(context, func_name) \
1139  context->functions.func_name = (void *)dlsym(context->library, #func_name); \
1140  if (!context->functions.func_name) { \
1141  av_log(context, AV_LOG_ERROR, "DLL mfplat.dll failed to find function "\
1142  #func_name "\n"); \
1143  return AVERROR_UNKNOWN; \
1144  }
1145 #else
1146 // In UWP (which lacks LoadLibrary), just link directly against
1147 // the functions - this requires building with new/complete enough
1148 // import libraries.
1149 #define LOAD_MF_FUNCTION(context, func_name) \
1150  context->functions.func_name = func_name; \
1151  if (!context->functions.func_name) { \
1152  av_log(context, AV_LOG_ERROR, "Failed to find function " #func_name \
1153  "\n"); \
1154  return AVERROR_UNKNOWN; \
1155  }
1156 #endif
1157 
1158 // Windows N editions does not provide MediaFoundation by default.
1159 // So to avoid DLL loading error, MediaFoundation is dynamically loaded except
1160 // on UWP build since LoadLibrary is not available on it.
1162 {
1163  MFContext *c = avctx->priv_data;
1164 
1165 #if !HAVE_UWP
1166  c->library = dlopen("mfplat.dll", 0);
1167 
1168  if (!c->library) {
1169  av_log(c, AV_LOG_ERROR, "DLL mfplat.dll failed to open\n");
1170  return AVERROR_UNKNOWN;
1171  }
1172 #endif
1173 
1174  LOAD_MF_FUNCTION(c, MFStartup);
1175  LOAD_MF_FUNCTION(c, MFShutdown);
1176  LOAD_MF_FUNCTION(c, MFCreateAlignedMemoryBuffer);
1177  LOAD_MF_FUNCTION(c, MFCreateSample);
1178  LOAD_MF_FUNCTION(c, MFCreateMediaType);
1179  // MFTEnumEx is missing in Windows Vista's mfplat.dll.
1180  LOAD_MF_FUNCTION(c, MFTEnumEx);
1181 
1182  return 0;
1183 }
1184 
1185 static int mf_close(AVCodecContext *avctx)
1186 {
1187  MFContext *c = avctx->priv_data;
1188 
1189  if (c->codec_api)
1190  ICodecAPI_Release(c->codec_api);
1191 
1192  if (c->async_events)
1193  IMFMediaEventGenerator_Release(c->async_events);
1194 
1195 #if !HAVE_UWP
1196  if (c->library)
1197  ff_free_mf(&c->functions, &c->mft);
1198 
1199  dlclose(c->library);
1200  c->library = NULL;
1201 #else
1202  ff_free_mf(&c->functions, &c->mft);
1203 #endif
1204 
1205  av_frame_free(&c->frame);
1206 
1207  av_freep(&avctx->extradata);
1208  avctx->extradata_size = 0;
1209 
1210  return 0;
1211 }
1212 
1213 static int mf_init(AVCodecContext *avctx)
1214 {
1215  int ret;
1216  if ((ret = mf_load_library(avctx)) == 0) {
1217  if ((ret = mf_init_encoder(avctx)) == 0) {
1218  return 0;
1219  }
1220  }
1221  return ret;
1222 }
1223 
1224 #define OFFSET(x) offsetof(MFContext, x)
1225 
1226 #define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS) \
1227  static const AVClass ff_ ## NAME ## _mf_encoder_class = { \
1228  .class_name = #NAME "_mf", \
1229  .item_name = av_default_item_name, \
1230  .option = OPTS, \
1231  .version = LIBAVUTIL_VERSION_INT, \
1232  }; \
1233  const FFCodec ff_ ## NAME ## _mf_encoder = { \
1234  .p.priv_class = &ff_ ## NAME ## _mf_encoder_class, \
1235  .p.name = #NAME "_mf", \
1236  CODEC_LONG_NAME(#ID " via MediaFoundation"), \
1237  .p.type = AVMEDIA_TYPE_ ## MEDIATYPE, \
1238  .p.id = AV_CODEC_ID_ ## ID, \
1239  .priv_data_size = sizeof(MFContext), \
1240  .init = mf_init, \
1241  .close = mf_close, \
1242  FF_CODEC_RECEIVE_PACKET_CB(mf_receive_packet), \
1243  FMTS \
1244  CAPS \
1245  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
1246  };
1247 
1248 #define AFMTS \
1249  .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, \
1250  AV_SAMPLE_FMT_NONE },
1251 #define ACAPS \
1252  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1253  AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
1254 
1255 MF_ENCODER(AUDIO, aac, AAC, NULL, AFMTS, ACAPS);
1256 MF_ENCODER(AUDIO, ac3, AC3, NULL, AFMTS, ACAPS);
1257 MF_ENCODER(AUDIO, mp3, MP3, NULL, AFMTS, ACAPS);
1258 
1259 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1260 static const AVOption venc_opts[] = {
1261  {"rate_control", "Select rate control mode", OFFSET(opt_enc_rc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, .unit = "rate_control"},
1262  { "default", "Default mode", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "rate_control"},
1263  { "cbr", "CBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_CBR}, 0, 0, VE, .unit = "rate_control"},
1264  { "pc_vbr", "Peak constrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_PeakConstrainedVBR}, 0, 0, VE, .unit = "rate_control"},
1265  { "u_vbr", "Unconstrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_UnconstrainedVBR}, 0, 0, VE, .unit = "rate_control"},
1266  { "quality", "Quality mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_Quality}, 0, 0, VE, .unit = "rate_control" },
1267  // The following rate_control modes require Windows 8.
1268  { "ld_vbr", "Low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_LowDelayVBR}, 0, 0, VE, .unit = "rate_control"},
1269  { "g_vbr", "Global VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalVBR}, 0, 0, VE, .unit = "rate_control" },
1270  { "gld_vbr", "Global low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR}, 0, 0, VE, .unit = "rate_control"},
1271 
1272  {"scenario", "Select usage scenario", OFFSET(opt_enc_scenario), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, .unit = "scenario"},
1273  { "default", "Default scenario", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "scenario"},
1274  { "display_remoting", "Display remoting", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemoting}, 0, 0, VE, .unit = "scenario"},
1275  { "video_conference", "Video conference", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_VideoConference}, 0, 0, VE, .unit = "scenario"},
1276  { "archive", "Archive", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_Archive}, 0, 0, VE, .unit = "scenario"},
1277  { "live_streaming", "Live streaming", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_LiveStreaming}, 0, 0, VE, .unit = "scenario"},
1278  { "camera_record", "Camera record", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_CameraRecord}, 0, 0, VE, .unit = "scenario"},
1279  { "display_remoting_with_feature_map", "Display remoting with feature map", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap}, 0, 0, VE, .unit = "scenario"},
1280 
1281  {"quality", "Quality", OFFSET(opt_enc_quality), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 100, VE},
1282  {"hw_encoding", "Force hardware encoding", OFFSET(opt_enc_hw), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VE},
1283  {NULL}
1284 };
1285 
1286 #define VFMTS \
1287  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1288  AV_PIX_FMT_YUV420P, \
1289  AV_PIX_FMT_NONE },
1290 #define VCAPS \
1291  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1292  AV_CODEC_CAP_DR1,
1293 
1294 MF_ENCODER(VIDEO, h264, H264, venc_opts, VFMTS, VCAPS);
1295 MF_ENCODER(VIDEO, hevc, HEVC, venc_opts, VFMTS, VCAPS);
AVCodec
AVCodec.
Definition: codec.h:187
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ff_hr_str
#define ff_hr_str(hr)
Definition: mf_utils.h:156
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
VCAPS
#define VCAPS
Definition: mfenc.c:1290
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:443
FF_VAL_VT_UI4
#define FF_VAL_VT_UI4(v)
Definition: mf_utils.h:161
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1050
venc_opts
static const AVOption venc_opts[]
Definition: mfenc.c:1260
mf_v_avframe_to_sample
static IMFSample * mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:305
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:750
mf_choose_input_type
static int mf_choose_input_type(AVCodecContext *avctx)
Definition: mfenc.c:827
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
LOAD_MF_FUNCTION
#define LOAD_MF_FUNCTION(context, func_name)
Definition: mfenc.c:1138
AV_PROFILE_H264_MAIN
#define AV_PROFILE_H264_MAIN
Definition: defs.h:111
int64_t
long long int64_t
Definition: coverity.c:34
normalize.log
log
Definition: normalize.py:21
ff_codec_to_mf_subtype
const CLSID * ff_codec_to_mf_subtype(enum AVCodecID codec)
Definition: mf_utils.c:507
mf_enca_output_score
static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:534
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
MFContext::opt_enc_hw
int opt_enc_hw
Definition: mfenc.c:56
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
MFContext::av_class
AVClass * av_class
Definition: mfenc.c:35
mf_receive_sample
static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
Definition: mfenc.c:407
mf_enca_output_type_get
static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:140
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:522
MFContext::sample_sent
int sample_sent
Definition: mfenc.c:48
ff_eAVEncCommonRateControlMode_Quality
@ ff_eAVEncCommonRateControlMode_Quality
Definition: mf_utils.h:118
ff_eAVEncCommonRateControlMode_CBR
@ ff_eAVEncCommonRateControlMode_CBR
Definition: mf_utils.h:115
AVOption
AVOption.
Definition: opt.h:346
encode.h
data
const char data[16]
Definition: mxf.c:148
mf_encv_output_score
static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:634
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
category
category
Definition: openal-dec.c:249
MFContext::draining_done
int draining_done
Definition: mfenc.c:47
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
ff_MFSetAttributeSize
HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid, UINT32 uw, UINT32 uh)
Definition: mf_utils.c:40
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:577
MFContext::functions
MFFunctions functions
Definition: mfenc.c:37
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
ff_eAVEncH264VProfile_High
@ ff_eAVEncH264VProfile_High
Definition: mf_utils.h:152
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:338
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:560
MFContext::out_stream_id
DWORD out_stream_id
Definition: mfenc.c:43
MFContext::async_marker
int async_marker
Definition: mfenc.c:49
mf_avframe_to_sample
static IMFSample * mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:352
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1065
ff_media_type_to_sample_fmt
enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type)
Definition: mf_utils.c:114
MFContext::async_need_input
int async_need_input
Definition: mfenc.c:49
OFFSET
#define OFFSET(x)
Definition: mfenc.c:1224
mf_receive_packet
static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: mfenc.c:489
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
MFContext::is_audio
int is_audio
Definition: mfenc.c:39
mf_utils.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
VFMTS
#define VFMTS
Definition: mfenc.c:1286
pts
static int64_t pts
Definition: transcode_aac.c:643
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:441
ff_eAVEncCommonRateControlMode_GlobalVBR
@ ff_eAVEncCommonRateControlMode_GlobalVBR
Definition: mf_utils.h:120
AVRational::num
int num
Numerator.
Definition: rational.h:59
ff_instantiate_mf
int ff_instantiate_mf(void *log, MFFunctions *f, GUID category, MFT_REGISTER_TYPE_INFO *in_type, MFT_REGISTER_TYPE_INFO *out_type, int use_hw, IMFTransform **res)
Definition: mf_utils.c:550
ff_free_mf
void ff_free_mf(MFFunctions *f, IMFTransform **mft)
Definition: mf_utils.c:643
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
mf_setup_context
static int mf_setup_context(AVCodecContext *avctx)
Definition: mfenc.c:942
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_METransformMarker
@ ff_METransformMarker
Definition: mf_utils.h:142
MF_ENCODER
#define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS)
Definition: mfenc.c:1226
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
MFContext::opt_enc_rc
int opt_enc_rc
Definition: mfenc.c:53
MFContext::reorder_delay
int64_t reorder_delay
Definition: mfenc.c:50
mf_encv_output_adjust
static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:650
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
MFContext::opt_enc_scenario
int opt_enc_scenario
Definition: mfenc.c:55
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
MFContext::codec_api
ICodecAPI * codec_api
Definition: mfenc.c:51
MFContext::in_info
MFT_INPUT_STREAM_INFO in_info
Definition: mfenc.c:44
MFContext::out_stream_provides_samples
int out_stream_provides_samples
Definition: mfenc.c:46
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
MFContext::library
HMODULE library
Definition: mfenc.c:36
MFContext::frame
AVFrame * frame
Definition: mfenc.c:38
if
if(ret)
Definition: filter_design.txt:179
ff_eAVScenarioInfo_LiveStreaming
@ ff_eAVScenarioInfo_LiveStreaming
Definition: mf_utils.h:129
ff_MFSetAttributeRatio
#define ff_MFSetAttributeRatio
Definition: mf_utils.c:47
framerate
float framerate
Definition: av1_levels.c:29
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
@ ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
Definition: mf_utils.h:116
AVCodec::type
enum AVMediaType type
Definition: codec.h:200
mf_enca_input_score
static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:584
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
MF_INVALID_TIME
#define MF_INVALID_TIME
Definition: mfenc.c:64
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_METransformNeedInput
@ ff_METransformNeedInput
Definition: mf_utils.h:139
mf_enca_input_adjust
static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:608
time.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_AAC
@ AV_CODEC_ID_AAC
Definition: codec_id.h:442
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:544
ff_media_type_to_pix_fmt
enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type)
Definition: mf_utils.c:158
MFFunctions
Definition: mf_utils.h:48
f
f
Definition: af_crystalizer.c:121
mf_a_avframe_to_sample
static IMFSample * mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:288
ff_eAVScenarioInfo_Archive
@ ff_eAVScenarioInfo_Archive
Definition: mf_utils.h:128
codec_internal.h
bps
unsigned bps
Definition: movenc.c:1792
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1057
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
MFContext::is_video
int is_video
Definition: mfenc.c:39
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_eAVEncH264VProfile_Base
@ ff_eAVEncH264VProfile_Base
Definition: mf_utils.h:150
ff_eAVScenarioInfo_DisplayRemoting
@ ff_eAVScenarioInfo_DisplayRemoting
Definition: mf_utils.h:126
ACAPS
#define ACAPS
Definition: mfenc.c:1251
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
MFContext::opt_enc_quality
int opt_enc_quality
Definition: mfenc.c:54
MFContext::async_events
IMFMediaEventGenerator * async_events
Definition: mfenc.c:42
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
ff_eAVEncCommonRateControlMode_UnconstrainedVBR
@ ff_eAVEncCommonRateControlMode_UnconstrainedVBR
Definition: mf_utils.h:117
MF_TIMEBASE
#define MF_TIMEBASE
Definition: mfenc.c:62
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
@ ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
Definition: mf_utils.h:131
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:528
tg
#define tg
Definition: regdef.h:74
ff_METransformHaveOutput
@ ff_METransformHaveOutput
Definition: mf_utils.h:140
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
mf_get_tb
static AVRational mf_get_tb(AVCodecContext *avctx)
Definition: mfenc.c:105
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:424
mf_load_library
static int mf_load_library(AVCodecContext *avctx)
Definition: mfenc.c:1161
ff_METransformDrainComplete
@ ff_METransformDrainComplete
Definition: mf_utils.h:141
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
mf_send_sample
static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:369
MFContext::in_stream_id
DWORD in_stream_id
Definition: mfenc.c:43
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
MFContext::async_have_output
int async_have_output
Definition: mfenc.c:49
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
VE
#define VE
Definition: mfenc.c:1259
MFContext::out_info
MFT_OUTPUT_STREAM_INFO out_info
Definition: mfenc.c:45
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:576
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
profile
int profile
Definition: mxfenc.c:2226
MFContext
Definition: mfenc.c:34
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
mf_negotiate_types
static int mf_negotiate_types(AVCodecContext *avctx)
Definition: mfenc.c:915
mf_enca_output_adjust
static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:574
ret
ret
Definition: filter_design.txt:187
mf_sample_set_pts
static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
Definition: mfenc.c:119
mf_to_mf_time
static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
Definition: mfenc.c:112
mf_create
static int mf_create(void *log, MFFunctions *f, IMFTransform **mft, const AVCodec *codec, int use_hw)
Definition: mfenc.c:1019
mf_from_mf_time
static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
Definition: mfenc.c:126
mf_init
static int mf_init(AVCodecContext *avctx)
Definition: mfenc.c:1213
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AV_PROFILE_H264_HIGH
#define AV_PROFILE_H264_HIGH
Definition: defs.h:113
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:105
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
mf_init_encoder
static int mf_init_encoder(AVCodecContext *avctx)
Definition: mfenc.c:1049
mf_close
static int mf_close(AVCodecContext *avctx)
Definition: mfenc.c:1185
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:576
mf_encv_input_adjust
static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:725
MFContext::draining
int draining
Definition: mfenc.c:47
ff_eAVScenarioInfo_CameraRecord
@ ff_eAVScenarioInfo_CameraRecord
Definition: mf_utils.h:130
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:453
FF_VAL_VT_BOOL
#define FF_VAL_VT_BOOL(v)
Definition: mf_utils.h:162
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:795
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:204
mf_choose_output_type
static int mf_choose_output_type(AVCodecContext *avctx)
Definition: mfenc.c:738
ff_eAVScenarioInfo_VideoConference
@ ff_eAVScenarioInfo_VideoConference
Definition: mf_utils.h:127
AFMTS
#define AFMTS
Definition: mfenc.c:1248
ff_media_type_dump
void ff_media_type_dump(void *log, IMFMediaType *type)
Definition: mf_utils.c:502
mf_encv_input_score
static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:716
mf_output_type_get
static int mf_output_type_get(AVCodecContext *avctx)
Definition: mfenc.c:203
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
mf_sample_to_avpacket
static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
Definition: mfenc.c:233
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
MFContext::mft
IMFTransform * mft
Definition: mfenc.c:41
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
mf_unlock_async
static int mf_unlock_async(AVCodecContext *avctx)
Definition: mfenc.c:970
ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
@ ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
Definition: mf_utils.h:121
int
int
Definition: ffmpeg_filter.c:409
mf_wait_events
static int mf_wait_events(AVCodecContext *avctx)
Definition: mfenc.c:66
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_eAVEncH264VProfile_Main
@ ff_eAVEncH264VProfile_Main
Definition: mf_utils.h:151
mf_sample_get_pts
static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:131
MFContext::main_subtype
GUID main_subtype
Definition: mfenc.c:40
ff_eAVEncCommonRateControlMode_LowDelayVBR
@ ff_eAVEncCommonRateControlMode_LowDelayVBR
Definition: mf_utils.h:119
w32dlfcn.h
mf_encv_output_type_get
static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:180
ff_create_memory_sample
IMFSample * ff_create_memory_sample(MFFunctions *f, void *fill_data, size_t size, size_t align)
Definition: mf_utils.c:76