FFmpeg
mfenc.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #define COBJMACROS
20 #if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
21 #undef _WIN32_WINNT
22 #define _WIN32_WINNT 0x0602
23 #endif
24 
25 #include "encode.h"
26 #include "mf_utils.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/time.h"
31 #include "codec_internal.h"
32 #include "internal.h"
33 #include "compat/w32dlfcn.h"
34 
35 typedef struct MFContext {
37  HMODULE library;
42  IMFTransform *mft;
43  IMFMediaEventGenerator *async_events;
45  MFT_INPUT_STREAM_INFO in_info;
46  MFT_OUTPUT_STREAM_INFO out_info;
52  ICodecAPI *codec_api;
53  // set by AVOption
58 } MFContext;
59 
60 static int mf_choose_output_type(AVCodecContext *avctx);
61 static int mf_setup_context(AVCodecContext *avctx);
62 
63 #define MF_TIMEBASE (AVRational){1, 10000000}
64 // Sentinel value only used by us.
65 #define MF_INVALID_TIME AV_NOPTS_VALUE
66 
67 static int mf_wait_events(AVCodecContext *avctx)
68 {
69  MFContext *c = avctx->priv_data;
70 
71  if (!c->async_events)
72  return 0;
73 
74  while (!(c->async_need_input || c->async_have_output || c->draining_done || c->async_marker)) {
75  IMFMediaEvent *ev = NULL;
76  MediaEventType ev_id = 0;
77  HRESULT hr = IMFMediaEventGenerator_GetEvent(c->async_events, 0, &ev);
78  if (FAILED(hr)) {
79  av_log(avctx, AV_LOG_ERROR, "IMFMediaEventGenerator_GetEvent() failed: %s\n",
80  ff_hr_str(hr));
81  return AVERROR_EXTERNAL;
82  }
83  IMFMediaEvent_GetType(ev, &ev_id);
84  switch (ev_id) {
86  if (!c->draining)
87  c->async_need_input = 1;
88  break;
90  c->async_have_output = 1;
91  break;
93  c->draining_done = 1;
94  break;
96  c->async_marker = 1;
97  break;
98  default: ;
99  }
100  IMFMediaEvent_Release(ev);
101  }
102 
103  return 0;
104 }
105 
107 {
108  if (avctx->time_base.num > 0 && avctx->time_base.den > 0)
109  return avctx->time_base;
110  return MF_TIMEBASE;
111 }
112 
113 static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
114 {
115  if (av_pts == AV_NOPTS_VALUE)
116  return MF_INVALID_TIME;
117  return av_rescale_q(av_pts, mf_get_tb(avctx), MF_TIMEBASE);
118 }
119 
120 static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
121 {
122  LONGLONG stime = mf_to_mf_time(avctx, av_pts);
123  if (stime != MF_INVALID_TIME)
124  IMFSample_SetSampleTime(sample, stime);
125 }
126 
127 static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
128 {
129  return av_rescale_q(stime, MF_TIMEBASE, mf_get_tb(avctx));
130 }
131 
132 static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
133 {
134  LONGLONG pts;
135  HRESULT hr = IMFSample_GetSampleTime(sample, &pts);
136  if (FAILED(hr))
137  return AV_NOPTS_VALUE;
138  return mf_from_mf_time(avctx, pts);
139 }
140 
141 static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
142 {
143  MFContext *c = avctx->priv_data;
144  HRESULT hr;
145  UINT32 sz;
146 
147  if (avctx->codec_id != AV_CODEC_ID_MP3 && avctx->codec_id != AV_CODEC_ID_AC3) {
148  hr = IMFAttributes_GetBlobSize(type, &MF_MT_USER_DATA, &sz);
149  if (!FAILED(hr) && sz > 0) {
151  if (!avctx->extradata)
152  return AVERROR(ENOMEM);
153  avctx->extradata_size = sz;
154  hr = IMFAttributes_GetBlob(type, &MF_MT_USER_DATA, avctx->extradata, sz, NULL);
155  if (FAILED(hr))
156  return AVERROR_EXTERNAL;
157 
158  if (avctx->codec_id == AV_CODEC_ID_AAC && avctx->extradata_size >= 12) {
159  // Get rid of HEAACWAVEINFO (after wfx field, 12 bytes).
160  avctx->extradata_size = avctx->extradata_size - 12;
161  memmove(avctx->extradata, avctx->extradata + 12, avctx->extradata_size);
162  }
163  }
164  }
165 
166  // I don't know where it's documented that we need this. It happens with the
167  // MS mp3 encoder MFT. The idea for the workaround is taken from NAudio.
168  // (Certainly any lossy codec will have frames much smaller than 1 second.)
169  if (!c->out_info.cbSize && !c->out_stream_provides_samples) {
170  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
171  if (!FAILED(hr)) {
172  av_log(avctx, AV_LOG_VERBOSE, "MFT_OUTPUT_STREAM_INFO.cbSize set to 0, "
173  "assuming %d bytes instead.\n", (int)sz);
174  c->out_info.cbSize = sz;
175  }
176  }
177 
178  return 0;
179 }
180 
181 static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
182 {
183  HRESULT hr;
184  UINT32 sz;
185 
186  hr = IMFAttributes_GetBlobSize(type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
187  if (!FAILED(hr) && sz > 0) {
188  uint8_t *extradata = av_mallocz(sz + AV_INPUT_BUFFER_PADDING_SIZE);
189  if (!extradata)
190  return AVERROR(ENOMEM);
191  hr = IMFAttributes_GetBlob(type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz, NULL);
192  if (FAILED(hr)) {
193  av_free(extradata);
194  return AVERROR_EXTERNAL;
195  }
196  av_freep(&avctx->extradata);
197  avctx->extradata = extradata;
198  avctx->extradata_size = sz;
199  }
200 
201  return 0;
202 }
203 
205 {
206  MFContext *c = avctx->priv_data;
207  HRESULT hr;
208  IMFMediaType *type;
209  int ret;
210 
211  hr = IMFTransform_GetOutputCurrentType(c->mft, c->out_stream_id, &type);
212  if (FAILED(hr)) {
213  av_log(avctx, AV_LOG_ERROR, "could not get output type\n");
214  return AVERROR_EXTERNAL;
215  }
216 
217  av_log(avctx, AV_LOG_VERBOSE, "final output type:\n");
218  ff_media_type_dump(avctx, type);
219 
220  ret = 0;
221  if (c->is_video) {
222  ret = mf_encv_output_type_get(avctx, type);
223  } else if (c->is_audio) {
224  ret = mf_enca_output_type_get(avctx, type);
225  }
226 
227  if (ret < 0)
228  av_log(avctx, AV_LOG_ERROR, "output type not supported\n");
229 
230  IMFMediaType_Release(type);
231  return ret;
232 }
233 
234 static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
235 {
236  MFContext *c = avctx->priv_data;
237  HRESULT hr;
238  int ret;
239  DWORD len;
240  IMFMediaBuffer *buffer;
241  BYTE *data;
242  UINT64 t;
243  UINT32 t32;
244 
245  hr = IMFSample_GetTotalLength(sample, &len);
246  if (FAILED(hr))
247  return AVERROR_EXTERNAL;
248 
249  if ((ret = ff_get_encode_buffer(avctx, avpkt, len, 0)) < 0)
250  return ret;
251 
252  hr = IMFSample_ConvertToContiguousBuffer(sample, &buffer);
253  if (FAILED(hr))
254  return AVERROR_EXTERNAL;
255 
256  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
257  if (FAILED(hr)) {
258  IMFMediaBuffer_Release(buffer);
259  return AVERROR_EXTERNAL;
260  }
261 
262  memcpy(avpkt->data, data, len);
263 
264  IMFMediaBuffer_Unlock(buffer);
265  IMFMediaBuffer_Release(buffer);
266 
267  avpkt->pts = avpkt->dts = mf_sample_get_pts(avctx, sample);
268 
269  hr = IMFAttributes_GetUINT32(sample, &MFSampleExtension_CleanPoint, &t32);
270  if (c->is_audio || (!FAILED(hr) && t32 != 0))
271  avpkt->flags |= AV_PKT_FLAG_KEY;
272 
273  hr = IMFAttributes_GetUINT64(sample, &MFSampleExtension_DecodeTimestamp, &t);
274  if (!FAILED(hr)) {
275  avpkt->dts = mf_from_mf_time(avctx, t);
276  // At least on Qualcomm's HEVC encoder on SD 835, the output dts
277  // starts from the input pts of the first frame, while the output pts
278  // is shifted forward. Therefore, shift the output values back so that
279  // the output pts matches the input.
280  if (c->reorder_delay == AV_NOPTS_VALUE)
281  c->reorder_delay = avpkt->pts - avpkt->dts;
282  avpkt->dts -= c->reorder_delay;
283  avpkt->pts -= c->reorder_delay;
284  }
285 
286  return 0;
287 }
288 
289 static IMFSample *mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
290 {
291  MFContext *c = avctx->priv_data;
292  size_t len;
293  size_t bps;
294  IMFSample *sample;
295 
297  len = frame->nb_samples * bps;
298 
299  sample = ff_create_memory_sample(&c->functions, frame->data[0], len,
300  c->in_info.cbAlignment);
301  if (sample)
302  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->nb_samples));
303  return sample;
304 }
305 
306 static IMFSample *mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
307 {
308  MFContext *c = avctx->priv_data;
309  IMFSample *sample;
310  IMFMediaBuffer *buffer;
311  BYTE *data;
312  HRESULT hr;
313  int ret;
314  int size;
315 
316  size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
317  if (size < 0)
318  return NULL;
319 
320  sample = ff_create_memory_sample(&c->functions, NULL, size,
321  c->in_info.cbAlignment);
322  if (!sample)
323  return NULL;
324 
325  hr = IMFSample_GetBufferByIndex(sample, 0, &buffer);
326  if (FAILED(hr)) {
327  IMFSample_Release(sample);
328  return NULL;
329  }
330 
331  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
332  if (FAILED(hr)) {
333  IMFMediaBuffer_Release(buffer);
334  IMFSample_Release(sample);
335  return NULL;
336  }
337 
338  ret = av_image_copy_to_buffer((uint8_t *)data, size, (void *)frame->data, frame->linesize,
339  avctx->pix_fmt, avctx->width, avctx->height, 1);
340  IMFMediaBuffer_SetCurrentLength(buffer, size);
341  IMFMediaBuffer_Unlock(buffer);
342  IMFMediaBuffer_Release(buffer);
343  if (ret < 0) {
344  IMFSample_Release(sample);
345  return NULL;
346  }
347 
348  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->duration));
349 
350  return sample;
351 }
352 
353 static IMFSample *mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
354 {
355  MFContext *c = avctx->priv_data;
356  IMFSample *sample;
357 
358  if (c->is_audio) {
360  } else {
362  }
363 
364  if (sample)
365  mf_sample_set_pts(avctx, sample, frame->pts);
366 
367  return sample;
368 }
369 
370 static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
371 {
372  MFContext *c = avctx->priv_data;
373  HRESULT hr;
374  int ret;
375 
376  if (sample) {
377  if (c->async_events) {
378  if ((ret = mf_wait_events(avctx)) < 0)
379  return ret;
380  if (!c->async_need_input)
381  return AVERROR(EAGAIN);
382  }
383  if (!c->sample_sent)
384  IMFSample_SetUINT32(sample, &MFSampleExtension_Discontinuity, TRUE);
385  c->sample_sent = 1;
386  hr = IMFTransform_ProcessInput(c->mft, c->in_stream_id, sample, 0);
387  if (hr == MF_E_NOTACCEPTING) {
388  return AVERROR(EAGAIN);
389  } else if (FAILED(hr)) {
390  av_log(avctx, AV_LOG_ERROR, "failed processing input: %s\n", ff_hr_str(hr));
391  return AVERROR_EXTERNAL;
392  }
393  c->async_need_input = 0;
394  } else if (!c->draining) {
395  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
396  if (FAILED(hr))
397  av_log(avctx, AV_LOG_ERROR, "failed draining: %s\n", ff_hr_str(hr));
398  // Some MFTs (AC3) will send a frame after each drain command (???), so
399  // this is required to make draining actually terminate.
400  c->draining = 1;
401  c->async_need_input = 0;
402  } else {
403  return AVERROR_EOF;
404  }
405  return 0;
406 }
407 
408 static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
409 {
410  MFContext *c = avctx->priv_data;
411  HRESULT hr;
412  DWORD st;
413  MFT_OUTPUT_DATA_BUFFER out_buffers;
414  IMFSample *sample;
415  int ret = 0;
416 
417  while (1) {
418  *out_sample = NULL;
419  sample = NULL;
420 
421  if (c->async_events) {
422  if ((ret = mf_wait_events(avctx)) < 0)
423  return ret;
424  if (!c->async_have_output || c->draining_done) {
425  ret = 0;
426  break;
427  }
428  }
429 
430  if (!c->out_stream_provides_samples) {
431  sample = ff_create_memory_sample(&c->functions, NULL,
432  c->out_info.cbSize,
433  c->out_info.cbAlignment);
434  if (!sample)
435  return AVERROR(ENOMEM);
436  }
437 
438  out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
439  .dwStreamID = c->out_stream_id,
440  .pSample = sample,
441  };
442 
443  st = 0;
444  hr = IMFTransform_ProcessOutput(c->mft, 0, 1, &out_buffers, &st);
445 
446  if (out_buffers.pEvents)
447  IMFCollection_Release(out_buffers.pEvents);
448 
449  if (!FAILED(hr)) {
450  *out_sample = out_buffers.pSample;
451  ret = 0;
452  break;
453  }
454 
455  if (out_buffers.pSample)
456  IMFSample_Release(out_buffers.pSample);
457 
458  if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
459  if (c->draining)
460  c->draining_done = 1;
461  ret = 0;
462  } else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
463  av_log(avctx, AV_LOG_WARNING, "stream format change\n");
464  ret = mf_choose_output_type(avctx);
465  if (ret == 0) // we don't expect renegotiating the input type
467  if (ret > 0) {
468  ret = mf_setup_context(avctx);
469  if (ret >= 0) {
470  c->async_have_output = 0;
471  continue;
472  }
473  }
474  } else {
475  av_log(avctx, AV_LOG_ERROR, "failed processing output: %s\n", ff_hr_str(hr));
477  }
478 
479  break;
480  }
481 
482  c->async_have_output = 0;
483 
484  if (ret >= 0 && !*out_sample)
485  ret = c->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
486 
487  return ret;
488 }
489 
490 static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
491 {
492  MFContext *c = avctx->priv_data;
493  IMFSample *sample = NULL;
494  int ret;
495 
496  if (!c->frame->buf[0]) {
497  ret = ff_encode_get_frame(avctx, c->frame);
498  if (ret < 0 && ret != AVERROR_EOF)
499  return ret;
500  }
501 
502  if (c->frame->buf[0]) {
503  sample = mf_avframe_to_sample(avctx, c->frame);
504  if (!sample) {
505  av_frame_unref(c->frame);
506  return AVERROR(ENOMEM);
507  }
508  if (c->is_video && c->codec_api) {
509  if (c->frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
510  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
511  }
512  }
513 
514  ret = mf_send_sample(avctx, sample);
515  if (sample)
516  IMFSample_Release(sample);
517  if (ret != AVERROR(EAGAIN))
518  av_frame_unref(c->frame);
519  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
520  return ret;
521 
522  ret = mf_receive_sample(avctx, &sample);
523  if (ret < 0)
524  return ret;
525 
526  ret = mf_sample_to_avpacket(avctx, sample, avpkt);
527  IMFSample_Release(sample);
528 
529  return ret;
530 }
531 
532 // Most encoders seem to enumerate supported audio formats on the output types,
533 // at least as far as channel configuration and sample rate is concerned. Pick
534 // the one which seems to match best.
535 static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
536 {
537  MFContext *c = avctx->priv_data;
538  HRESULT hr;
539  UINT32 t;
540  GUID tg;
541  int64_t score = 0;
542 
543  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
544  if (!FAILED(hr) && t == avctx->sample_rate)
545  score |= 1LL << 32;
546 
547  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
548  if (!FAILED(hr) && t == avctx->ch_layout.nb_channels)
549  score |= 2LL << 32;
550 
551  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
552  if (!FAILED(hr)) {
553  if (IsEqualGUID(&c->main_subtype, &tg))
554  score |= 4LL << 32;
555  }
556 
557  // Select the bitrate (lowest priority).
558  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
559  if (!FAILED(hr)) {
560  int diff = (int)t - avctx->bit_rate / 8;
561  if (diff >= 0) {
562  score |= (1LL << 31) - diff; // prefer lower bitrate
563  } else {
564  score |= (1LL << 30) + diff; // prefer higher bitrate
565  }
566  }
567 
568  hr = IMFAttributes_GetUINT32(type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
569  if (!FAILED(hr) && t != 0)
570  return -1;
571 
572  return score;
573 }
574 
575 static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
576 {
577  // (some decoders allow adjusting this freely, but it can also cause failure
578  // to set the output type - so it's commented for being too fragile)
579  //IMFAttributes_SetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, avctx->bit_rate / 8);
580  //IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
581 
582  return 0;
583 }
584 
585 static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
586 {
587  HRESULT hr;
588  UINT32 t;
589  int64_t score = 0;
590 
591  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
592  if (sformat == AV_SAMPLE_FMT_NONE)
593  return -1; // can not use
594 
595  if (sformat == avctx->sample_fmt)
596  score |= 1;
597 
598  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
599  if (!FAILED(hr) && t == avctx->sample_rate)
600  score |= 2;
601 
602  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
603  if (!FAILED(hr) && t == avctx->ch_layout.nb_channels)
604  score |= 4;
605 
606  return score;
607 }
608 
609 static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
610 {
611  HRESULT hr;
612  UINT32 t;
613 
614  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
615  if (sformat != avctx->sample_fmt) {
616  av_log(avctx, AV_LOG_ERROR, "unsupported input sample format set\n");
617  return AVERROR(EINVAL);
618  }
619 
620  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
621  if (FAILED(hr) || t != avctx->sample_rate) {
622  av_log(avctx, AV_LOG_ERROR, "unsupported input sample rate set\n");
623  return AVERROR(EINVAL);
624  }
625 
626  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
627  if (FAILED(hr) || t != avctx->ch_layout.nb_channels) {
628  av_log(avctx, AV_LOG_ERROR, "unsupported input channel number set\n");
629  return AVERROR(EINVAL);
630  }
631 
632  return 0;
633 }
634 
635 static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
636 {
637  MFContext *c = avctx->priv_data;
638  GUID tg;
639  HRESULT hr;
640  int score = -1;
641 
642  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
643  if (!FAILED(hr)) {
644  if (IsEqualGUID(&c->main_subtype, &tg))
645  score = 1;
646  }
647 
648  return score;
649 }
650 
651 static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
652 {
653  MFContext *c = avctx->priv_data;
655 
656  ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
657  IMFAttributes_SetUINT32(type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
658 
659  if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
660  framerate = avctx->framerate;
661  } else {
662  framerate = av_inv_q(avctx->time_base);
663  }
664 
665  ff_MFSetAttributeRatio((IMFAttributes *)type, &MF_MT_FRAME_RATE, framerate.num, framerate.den);
666 
667  // (MS HEVC supports eAVEncH265VProfile_Main_420_8 only.)
668  if (avctx->codec_id == AV_CODEC_ID_H264) {
670  switch (avctx->profile) {
673  break;
676  break;
677  }
678  IMFAttributes_SetUINT32(type, &MF_MT_MPEG2_PROFILE, profile);
679  }
680 
681  IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
682 
683  // Note that some of the ICodecAPI options must be set before SetOutputType.
684  if (c->codec_api) {
685  if (avctx->bit_rate)
686  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMeanBitRate, FF_VAL_VT_UI4(avctx->bit_rate));
687 
688  if (c->opt_enc_rc >= 0)
689  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode, FF_VAL_VT_UI4(c->opt_enc_rc));
690 
691  if (c->opt_enc_quality >= 0)
692  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQuality, FF_VAL_VT_UI4(c->opt_enc_quality));
693 
694  if (avctx->rc_max_rate > 0)
695  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMaxBitRate, FF_VAL_VT_UI4(avctx->rc_max_rate));
696 
697  if (avctx->gop_size > 0)
698  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVGOPSize, FF_VAL_VT_UI4(avctx->gop_size));
699 
700  if(avctx->rc_buffer_size > 0)
701  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonBufferSize, FF_VAL_VT_UI4(avctx->rc_buffer_size));
702 
703  if(avctx->compression_level >= 0)
704  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQualityVsSpeed, FF_VAL_VT_UI4(avctx->compression_level));
705 
706  if(avctx->global_quality > 0)
707  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoEncodeQP, FF_VAL_VT_UI4(avctx->global_quality ));
708 
709  // Always set the number of b-frames. Qualcomm's HEVC encoder on SD835
710  // defaults this to 1, and that setting is buggy with many of the
711  // rate control modes. (0 or 2 b-frames works fine with most rate
712  // control modes, but 2 seems buggy with the u_vbr mode.) Setting
713  // "scenario" to "camera_record" sets it in CFR mode (where the default
714  // is VFR), which makes the encoder avoid dropping frames.
715  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVDefaultBPictureCount, FF_VAL_VT_UI4(avctx->max_b_frames));
716  avctx->has_b_frames = avctx->max_b_frames > 0;
717 
718  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable, FF_VAL_VT_BOOL(1));
719 
720  if (c->opt_enc_scenario >= 0)
721  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVScenarioInfo, FF_VAL_VT_UI4(c->opt_enc_scenario));
722  }
723 
724  return 0;
725 }
726 
727 static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
728 {
729  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
730  if (pix_fmt != avctx->pix_fmt)
731  return -1; // can not use
732 
733  return 0;
734 }
735 
736 static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
737 {
738  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
739  if (pix_fmt != avctx->pix_fmt) {
740  av_log(avctx, AV_LOG_ERROR, "unsupported input pixel format set\n");
741  return AVERROR(EINVAL);
742  }
743 
744  //ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
745 
746  return 0;
747 }
748 
750 {
751  MFContext *c = avctx->priv_data;
752  HRESULT hr;
753  int ret;
754  IMFMediaType *out_type = NULL;
755  int64_t out_type_score = -1;
756  int out_type_index = -1;
757  int n;
758 
759  av_log(avctx, AV_LOG_VERBOSE, "output types:\n");
760  for (n = 0; ; n++) {
761  IMFMediaType *type;
762  int64_t score = -1;
763 
764  hr = IMFTransform_GetOutputAvailableType(c->mft, c->out_stream_id, n, &type);
765  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
766  break;
767  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
768  av_log(avctx, AV_LOG_VERBOSE, "(need to set input type)\n");
769  ret = 0;
770  goto done;
771  }
772  if (FAILED(hr)) {
773  av_log(avctx, AV_LOG_ERROR, "error getting output type: %s\n", ff_hr_str(hr));
775  goto done;
776  }
777 
778  av_log(avctx, AV_LOG_VERBOSE, "output type %d:\n", n);
779  ff_media_type_dump(avctx, type);
780 
781  if (c->is_video) {
782  score = mf_encv_output_score(avctx, type);
783  } else if (c->is_audio) {
784  score = mf_enca_output_score(avctx, type);
785  }
786 
787  if (score > out_type_score) {
788  if (out_type)
789  IMFMediaType_Release(out_type);
790  out_type = type;
791  out_type_score = score;
792  out_type_index = n;
793  IMFMediaType_AddRef(out_type);
794  }
795 
796  IMFMediaType_Release(type);
797  }
798 
799  if (out_type) {
800  av_log(avctx, AV_LOG_VERBOSE, "picking output type %d.\n", out_type_index);
801  } else {
802  hr = c->functions.MFCreateMediaType(&out_type);
803  if (FAILED(hr)) {
804  ret = AVERROR(ENOMEM);
805  goto done;
806  }
807  }
808 
809  ret = 0;
810  if (c->is_video) {
811  ret = mf_encv_output_adjust(avctx, out_type);
812  } else if (c->is_audio) {
813  ret = mf_enca_output_adjust(avctx, out_type);
814  }
815 
816  if (ret >= 0) {
817  av_log(avctx, AV_LOG_VERBOSE, "setting output type:\n");
818  ff_media_type_dump(avctx, out_type);
819 
820  hr = IMFTransform_SetOutputType(c->mft, c->out_stream_id, out_type, 0);
821  if (!FAILED(hr)) {
822  ret = 1;
823  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
824  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set input type\n");
825  ret = 0;
826  } else {
827  av_log(avctx, AV_LOG_ERROR, "could not set output type (%s)\n", ff_hr_str(hr));
829  }
830  }
831 
832 done:
833  if (out_type)
834  IMFMediaType_Release(out_type);
835  return ret;
836 }
837 
839 {
840  MFContext *c = avctx->priv_data;
841  HRESULT hr;
842  int ret;
843  IMFMediaType *in_type = NULL;
844  int64_t in_type_score = -1;
845  int in_type_index = -1;
846  int n;
847 
848  av_log(avctx, AV_LOG_VERBOSE, "input types:\n");
849  for (n = 0; ; n++) {
850  IMFMediaType *type = NULL;
851  int64_t score = -1;
852 
853  hr = IMFTransform_GetInputAvailableType(c->mft, c->in_stream_id, n, &type);
854  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
855  break;
856  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
857  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 1)\n");
858  ret = 0;
859  goto done;
860  }
861  if (FAILED(hr)) {
862  av_log(avctx, AV_LOG_ERROR, "error getting input type: %s\n", ff_hr_str(hr));
864  goto done;
865  }
866 
867  av_log(avctx, AV_LOG_VERBOSE, "input type %d:\n", n);
868  ff_media_type_dump(avctx, type);
869 
870  if (c->is_video) {
871  score = mf_encv_input_score(avctx, type);
872  } else if (c->is_audio) {
873  score = mf_enca_input_score(avctx, type);
874  }
875 
876  if (score > in_type_score) {
877  if (in_type)
878  IMFMediaType_Release(in_type);
879  in_type = type;
880  in_type_score = score;
881  in_type_index = n;
882  IMFMediaType_AddRef(in_type);
883  }
884 
885  IMFMediaType_Release(type);
886  }
887 
888  if (in_type) {
889  av_log(avctx, AV_LOG_VERBOSE, "picking input type %d.\n", in_type_index);
890  } else {
891  // Some buggy MFTs (WMA encoder) fail to return MF_E_TRANSFORM_TYPE_NOT_SET.
892  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 2)\n");
893  ret = 0;
894  goto done;
895  }
896 
897  ret = 0;
898  if (c->is_video) {
899  ret = mf_encv_input_adjust(avctx, in_type);
900  } else if (c->is_audio) {
901  ret = mf_enca_input_adjust(avctx, in_type);
902  }
903 
904  if (ret >= 0) {
905  av_log(avctx, AV_LOG_VERBOSE, "setting input type:\n");
906  ff_media_type_dump(avctx, in_type);
907 
908  hr = IMFTransform_SetInputType(c->mft, c->in_stream_id, in_type, 0);
909  if (!FAILED(hr)) {
910  ret = 1;
911  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
912  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set output type\n");
913  ret = 0;
914  } else {
915  av_log(avctx, AV_LOG_ERROR, "could not set input type (%s)\n", ff_hr_str(hr));
917  }
918  }
919 
920 done:
921  if (in_type)
922  IMFMediaType_Release(in_type);
923  return ret;
924 }
925 
927 {
928  // This follows steps 1-5 on:
929  // https://msdn.microsoft.com/en-us/library/windows/desktop/aa965264(v=vs.85).aspx
930  // If every MFT implementer does this correctly, this loop should at worst
931  // be repeated once.
932  int need_input = 1, need_output = 1;
933  int n;
934  for (n = 0; n < 2 && (need_input || need_output); n++) {
935  int ret;
936  ret = mf_choose_input_type(avctx);
937  if (ret < 0)
938  return ret;
939  need_input = ret < 1;
940  ret = mf_choose_output_type(avctx);
941  if (ret < 0)
942  return ret;
943  need_output = ret < 1;
944  }
945  if (need_input || need_output) {
946  av_log(avctx, AV_LOG_ERROR, "format negotiation failed (%d/%d)\n",
947  need_input, need_output);
948  return AVERROR_EXTERNAL;
949  }
950  return 0;
951 }
952 
954 {
955  MFContext *c = avctx->priv_data;
956  HRESULT hr;
957  int ret;
958 
959  hr = IMFTransform_GetInputStreamInfo(c->mft, c->in_stream_id, &c->in_info);
960  if (FAILED(hr))
961  return AVERROR_EXTERNAL;
962  av_log(avctx, AV_LOG_VERBOSE, "in_info: size=%d, align=%d\n",
963  (int)c->in_info.cbSize, (int)c->in_info.cbAlignment);
964 
965  hr = IMFTransform_GetOutputStreamInfo(c->mft, c->out_stream_id, &c->out_info);
966  if (FAILED(hr))
967  return AVERROR_EXTERNAL;
968  c->out_stream_provides_samples =
969  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
970  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
971  av_log(avctx, AV_LOG_VERBOSE, "out_info: size=%d, align=%d%s\n",
972  (int)c->out_info.cbSize, (int)c->out_info.cbAlignment,
973  c->out_stream_provides_samples ? " (provides samples)" : "");
974 
975  if ((ret = mf_output_type_get(avctx)) < 0)
976  return ret;
977 
978  return 0;
979 }
980 
981 static int mf_unlock_async(AVCodecContext *avctx)
982 {
983  MFContext *c = avctx->priv_data;
984  HRESULT hr;
985  IMFAttributes *attrs;
986  UINT32 v;
987  int res = AVERROR_EXTERNAL;
988 
989  // For hw encoding we unfortunately need to use async mode, otherwise
990  // play it safe and avoid it.
991  if (!(c->is_video && c->opt_enc_hw))
992  return 0;
993 
994  hr = IMFTransform_GetAttributes(c->mft, &attrs);
995  if (FAILED(hr)) {
996  av_log(avctx, AV_LOG_ERROR, "error retrieving MFT attributes: %s\n", ff_hr_str(hr));
997  goto err;
998  }
999 
1000  hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
1001  if (FAILED(hr)) {
1002  av_log(avctx, AV_LOG_ERROR, "error querying async: %s\n", ff_hr_str(hr));
1003  goto err;
1004  }
1005 
1006  if (!v) {
1007  av_log(avctx, AV_LOG_ERROR, "hardware MFT is not async\n");
1008  goto err;
1009  }
1010 
1011  hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
1012  if (FAILED(hr)) {
1013  av_log(avctx, AV_LOG_ERROR, "could not set async unlock: %s\n", ff_hr_str(hr));
1014  goto err;
1015  }
1016 
1017  hr = IMFTransform_QueryInterface(c->mft, &IID_IMFMediaEventGenerator, (void **)&c->async_events);
1018  if (FAILED(hr)) {
1019  av_log(avctx, AV_LOG_ERROR, "could not get async interface\n");
1020  goto err;
1021  }
1022 
1023  res = 0;
1024 
1025 err:
1026  IMFAttributes_Release(attrs);
1027  return res;
1028 }
1029 
1030 static int mf_create(void *log, MFFunctions *f, IMFTransform **mft,
1031  const AVCodec *codec, int use_hw)
1032 {
1033  int is_audio = codec->type == AVMEDIA_TYPE_AUDIO;
1034  const CLSID *subtype = ff_codec_to_mf_subtype(codec->id);
1035  MFT_REGISTER_TYPE_INFO reg = {0};
1036  GUID category;
1037  int ret;
1038 
1039  *mft = NULL;
1040 
1041  if (!subtype)
1042  return AVERROR(ENOSYS);
1043 
1044  reg.guidSubtype = *subtype;
1045 
1046  if (is_audio) {
1047  reg.guidMajorType = MFMediaType_Audio;
1048  category = MFT_CATEGORY_AUDIO_ENCODER;
1049  } else {
1050  reg.guidMajorType = MFMediaType_Video;
1051  category = MFT_CATEGORY_VIDEO_ENCODER;
1052  }
1053 
1054  if ((ret = ff_instantiate_mf(log, f, category, NULL, &reg, use_hw, mft)) < 0)
1055  return ret;
1056 
1057  return 0;
1058 }
1059 
1061 {
1062  MFContext *c = avctx->priv_data;
1063  HRESULT hr;
1064  int ret;
1065  const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id);
1066  int use_hw = 0;
1067 
1068  c->frame = av_frame_alloc();
1069  if (!c->frame)
1070  return AVERROR(ENOMEM);
1071 
1072  c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO;
1073  c->is_video = !c->is_audio;
1074  c->reorder_delay = AV_NOPTS_VALUE;
1075 
1076  if (c->is_video && c->opt_enc_hw)
1077  use_hw = 1;
1078 
1079  if (!subtype)
1080  return AVERROR(ENOSYS);
1081 
1082  c->main_subtype = *subtype;
1083 
1084  if ((ret = mf_create(avctx, &c->functions, &c->mft, avctx->codec, use_hw)) < 0)
1085  return ret;
1086 
1087  if ((ret = mf_unlock_async(avctx)) < 0)
1088  return ret;
1089 
1090  hr = IMFTransform_QueryInterface(c->mft, &IID_ICodecAPI, (void **)&c->codec_api);
1091  if (!FAILED(hr))
1092  av_log(avctx, AV_LOG_VERBOSE, "MFT supports ICodecAPI.\n");
1093 
1094 
1095  hr = IMFTransform_GetStreamIDs(c->mft, 1, &c->in_stream_id, 1, &c->out_stream_id);
1096  if (hr == E_NOTIMPL) {
1097  c->in_stream_id = c->out_stream_id = 0;
1098  } else if (FAILED(hr)) {
1099  av_log(avctx, AV_LOG_ERROR, "could not get stream IDs (%s)\n", ff_hr_str(hr));
1100  return AVERROR_EXTERNAL;
1101  }
1102 
1103  if ((ret = mf_negotiate_types(avctx)) < 0)
1104  return ret;
1105 
1106  if ((ret = mf_setup_context(avctx)) < 0)
1107  return ret;
1108 
1109  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
1110  if (FAILED(hr)) {
1111  av_log(avctx, AV_LOG_ERROR, "could not start streaming (%s)\n", ff_hr_str(hr));
1112  return AVERROR_EXTERNAL;
1113  }
1114 
1115  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
1116  if (FAILED(hr)) {
1117  av_log(avctx, AV_LOG_ERROR, "could not start stream (%s)\n", ff_hr_str(hr));
1118  return AVERROR_EXTERNAL;
1119  }
1120 
1121  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && c->async_events &&
1122  c->is_video && !avctx->extradata) {
1123  int sleep = 10000, total = 0;
1124  av_log(avctx, AV_LOG_VERBOSE, "Awaiting extradata\n");
1125  while (total < 70*1000) {
1126  // The Qualcomm H264 encoder on SD835 doesn't provide extradata
1127  // immediately, but it becomes available soon after init (without
1128  // any waitable event). In practice, it's available after less
1129  // than 10 ms, but wait for up to 70 ms before giving up.
1130  // Some encoders (Qualcomm's HEVC encoder on SD835, some versions
1131  // of the QSV H264 encoder at least) don't provide extradata this
1132  // way at all, not even after encoding a frame - it's only
1133  // available prepended to frames.
1134  av_usleep(sleep);
1135  total += sleep;
1136  mf_output_type_get(avctx);
1137  if (avctx->extradata)
1138  break;
1139  sleep *= 2;
1140  }
1141  av_log(avctx, AV_LOG_VERBOSE, "%s extradata in %d ms\n",
1142  avctx->extradata ? "Got" : "Didn't get", total / 1000);
1143  }
1144 
1145  return 0;
1146 }
1147 
1148 #if !HAVE_UWP
1149 #define LOAD_MF_FUNCTION(context, func_name) \
1150  context->functions.func_name = (void *)dlsym(context->library, #func_name); \
1151  if (!context->functions.func_name) { \
1152  av_log(context, AV_LOG_ERROR, "DLL mfplat.dll failed to find function "\
1153  #func_name "\n"); \
1154  return AVERROR_UNKNOWN; \
1155  }
1156 #else
1157 // In UWP (which lacks LoadLibrary), just link directly against
1158 // the functions - this requires building with new/complete enough
1159 // import libraries.
1160 #define LOAD_MF_FUNCTION(context, func_name) \
1161  context->functions.func_name = func_name; \
1162  if (!context->functions.func_name) { \
1163  av_log(context, AV_LOG_ERROR, "Failed to find function " #func_name \
1164  "\n"); \
1165  return AVERROR_UNKNOWN; \
1166  }
1167 #endif
1168 
1169 // Windows N editions does not provide MediaFoundation by default.
1170 // So to avoid DLL loading error, MediaFoundation is dynamically loaded except
1171 // on UWP build since LoadLibrary is not available on it.
1173 {
1174  MFContext *c = avctx->priv_data;
1175 
1176 #if !HAVE_UWP
1177  c->library = dlopen("mfplat.dll", 0);
1178 
1179  if (!c->library) {
1180  av_log(c, AV_LOG_ERROR, "DLL mfplat.dll failed to open\n");
1181  return AVERROR_UNKNOWN;
1182  }
1183 #endif
1184 
1185  LOAD_MF_FUNCTION(c, MFStartup);
1186  LOAD_MF_FUNCTION(c, MFShutdown);
1187  LOAD_MF_FUNCTION(c, MFCreateAlignedMemoryBuffer);
1188  LOAD_MF_FUNCTION(c, MFCreateSample);
1189  LOAD_MF_FUNCTION(c, MFCreateMediaType);
1190  // MFTEnumEx is missing in Windows Vista's mfplat.dll.
1191  LOAD_MF_FUNCTION(c, MFTEnumEx);
1192 
1193  return 0;
1194 }
1195 
1196 static int mf_close(AVCodecContext *avctx)
1197 {
1198  MFContext *c = avctx->priv_data;
1199 
1200  if (c->codec_api)
1201  ICodecAPI_Release(c->codec_api);
1202 
1203  if (c->async_events)
1204  IMFMediaEventGenerator_Release(c->async_events);
1205 
1206 #if !HAVE_UWP
1207  if (c->library)
1208  ff_free_mf(&c->functions, &c->mft);
1209 
1210  dlclose(c->library);
1211  c->library = NULL;
1212 #else
1213  ff_free_mf(&c->functions, &c->mft);
1214 #endif
1215 
1216  av_frame_free(&c->frame);
1217 
1218  av_freep(&avctx->extradata);
1219  avctx->extradata_size = 0;
1220 
1221  return 0;
1222 }
1223 
1224 static av_cold int mf_init(AVCodecContext *avctx)
1225 {
1226  int ret;
1227  if ((ret = mf_load_library(avctx)) == 0) {
1228  if ((ret = mf_init_encoder(avctx)) == 0) {
1229  return 0;
1230  }
1231  }
1232  return ret;
1233 }
1234 
1235 #define OFFSET(x) offsetof(MFContext, x)
1236 
1237 #define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS, DEFAULTS) \
1238  static const AVClass ff_ ## NAME ## _mf_encoder_class = { \
1239  .class_name = #NAME "_mf", \
1240  .item_name = av_default_item_name, \
1241  .option = OPTS, \
1242  .version = LIBAVUTIL_VERSION_INT, \
1243  }; \
1244  const FFCodec ff_ ## NAME ## _mf_encoder = { \
1245  .p.priv_class = &ff_ ## NAME ## _mf_encoder_class, \
1246  .p.name = #NAME "_mf", \
1247  CODEC_LONG_NAME(#ID " via MediaFoundation"), \
1248  .p.type = AVMEDIA_TYPE_ ## MEDIATYPE, \
1249  .p.id = AV_CODEC_ID_ ## ID, \
1250  .priv_data_size = sizeof(MFContext), \
1251  .init = mf_init, \
1252  .close = mf_close, \
1253  FF_CODEC_RECEIVE_PACKET_CB(mf_receive_packet), \
1254  FMTS \
1255  CAPS \
1256  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
1257  .defaults = DEFAULTS, \
1258  };
1259 
1260 #define AFMTS \
1261  CODEC_SAMPLEFMTS(AV_SAMPLE_FMT_S16),
1262 #define ACAPS \
1263  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1264  AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
1265 
1266 MF_ENCODER(AUDIO, aac, AAC, NULL, AFMTS, ACAPS, NULL);
1267 MF_ENCODER(AUDIO, ac3, AC3, NULL, AFMTS, ACAPS, NULL);
1268 MF_ENCODER(AUDIO, mp3, MP3, NULL, AFMTS, ACAPS, NULL);
1269 
1270 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1271 static const AVOption venc_opts[] = {
1272  {"rate_control", "Select rate control mode", OFFSET(opt_enc_rc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, .unit = "rate_control"},
1273  { "default", "Default mode", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "rate_control"},
1274  { "cbr", "CBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_CBR}, 0, 0, VE, .unit = "rate_control"},
1275  { "pc_vbr", "Peak constrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_PeakConstrainedVBR}, 0, 0, VE, .unit = "rate_control"},
1276  { "u_vbr", "Unconstrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_UnconstrainedVBR}, 0, 0, VE, .unit = "rate_control"},
1277  { "quality", "Quality mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_Quality}, 0, 0, VE, .unit = "rate_control" },
1278  // The following rate_control modes require Windows 8.
1279  { "ld_vbr", "Low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_LowDelayVBR}, 0, 0, VE, .unit = "rate_control"},
1280  { "g_vbr", "Global VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalVBR}, 0, 0, VE, .unit = "rate_control" },
1281  { "gld_vbr", "Global low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR}, 0, 0, VE, .unit = "rate_control"},
1282 
1283  {"scenario", "Select usage scenario", OFFSET(opt_enc_scenario), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, .unit = "scenario"},
1284  { "default", "Default scenario", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "scenario"},
1285  { "display_remoting", "Display remoting", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemoting}, 0, 0, VE, .unit = "scenario"},
1286  { "video_conference", "Video conference", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_VideoConference}, 0, 0, VE, .unit = "scenario"},
1287  { "archive", "Archive", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_Archive}, 0, 0, VE, .unit = "scenario"},
1288  { "live_streaming", "Live streaming", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_LiveStreaming}, 0, 0, VE, .unit = "scenario"},
1289  { "camera_record", "Camera record", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_CameraRecord}, 0, 0, VE, .unit = "scenario"},
1290  { "display_remoting_with_feature_map", "Display remoting with feature map", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap}, 0, 0, VE, .unit = "scenario"},
1291 
1292  {"quality", "Quality", OFFSET(opt_enc_quality), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 100, VE},
1293  {"hw_encoding", "Force hardware encoding", OFFSET(opt_enc_hw), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VE},
1294  {NULL}
1295 };
1296 
1297 static const FFCodecDefault defaults[] = {
1298  { "g", "0" },
1299  { NULL },
1300 };
1301 
1302 #define VFMTS \
1303  CODEC_PIXFMTS(AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P),
1304 #define VCAPS \
1305  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1306  AV_CODEC_CAP_DR1,
1307 
1308 MF_ENCODER(VIDEO, h264, H264, venc_opts, VFMTS, VCAPS, defaults);
1309 MF_ENCODER(VIDEO, hevc, HEVC, venc_opts, VFMTS, VCAPS, defaults);
1310 MF_ENCODER(VIDEO, av1, AV1, venc_opts, VFMTS, VCAPS, defaults);
AVCodec
AVCodec.
Definition: codec.h:172
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
ff_hr_str
#define ff_hr_str(hr)
Definition: mf_utils.h:162
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
VCAPS
#define VCAPS
Definition: mfenc.c:1304
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:451
FF_VAL_VT_UI4
#define FF_VAL_VT_UI4(v)
Definition: mf_utils.h:167
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1027
venc_opts
static const AVOption venc_opts[]
Definition: mfenc.c:1271
mf_v_avframe_to_sample
static IMFSample * mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:306
mf_choose_input_type
static int mf_choose_input_type(AVCodecContext *avctx)
Definition: mfenc.c:838
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
LOAD_MF_FUNCTION
#define LOAD_MF_FUNCTION(context, func_name)
Definition: mfenc.c:1149
AV_PROFILE_H264_MAIN
#define AV_PROFILE_H264_MAIN
Definition: defs.h:112
int64_t
long long int64_t
Definition: coverity.c:34
normalize.log
log
Definition: normalize.py:21
ff_codec_to_mf_subtype
const CLSID * ff_codec_to_mf_subtype(enum AVCodecID codec)
Definition: mf_utils.c:508
mf_enca_output_score
static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:535
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
MFContext::opt_enc_hw
int opt_enc_hw
Definition: mfenc.c:57
MFContext::av_class
AVClass * av_class
Definition: mfenc.c:36
mf_receive_sample
static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
Definition: mfenc.c:408
MF_ENCODER
#define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS, DEFAULTS)
Definition: mfenc.c:1237
mf_enca_output_type_get
static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:141
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:535
MFContext::sample_sent
int sample_sent
Definition: mfenc.c:49
ff_eAVEncCommonRateControlMode_Quality
@ ff_eAVEncCommonRateControlMode_Quality
Definition: mf_utils.h:124
ff_eAVEncCommonRateControlMode_CBR
@ ff_eAVEncCommonRateControlMode_CBR
Definition: mf_utils.h:121
AVOption
AVOption.
Definition: opt.h:429
encode.h
data
const char data[16]
Definition: mxf.c:149
mf_encv_output_score
static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:635
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
category
category
Definition: openal-dec.c:249
MFContext::draining_done
int draining_done
Definition: mfenc.c:48
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
ff_METransformDrainComplete
@ ff_METransformDrainComplete
Definition: mf_utils.h:147
ff_MFSetAttributeSize
HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid, UINT32 uw, UINT32 uh)
Definition: mf_utils.c:40
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:590
MFContext::functions
MFFunctions functions
Definition: mfenc.c:38
ff_eAVEncH264VProfile_High
@ ff_eAVEncH264VProfile_High
Definition: mf_utils.h:158
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:318
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:554
MFContext::out_stream_id
DWORD out_stream_id
Definition: mfenc.c:44
MFContext::async_marker
int async_marker
Definition: mfenc.c:50
FFCodecDefault
Definition: codec_internal.h:96
mf_avframe_to_sample
static IMFSample * mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:353
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1042
ff_media_type_to_sample_fmt
enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type)
Definition: mf_utils.c:114
MFContext::async_need_input
int async_need_input
Definition: mfenc.c:50
OFFSET
#define OFFSET(x)
Definition: mfenc.c:1235
mf_receive_packet
static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: mfenc.c:490
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
MFContext::is_audio
int is_audio
Definition: mfenc.c:40
mf_utils.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
VFMTS
#define VFMTS
Definition: mfenc.c:1302
pts
static int64_t pts
Definition: transcode_aac.c:644
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:449
ff_eAVEncCommonRateControlMode_GlobalVBR
@ ff_eAVEncCommonRateControlMode_GlobalVBR
Definition: mf_utils.h:126
AVRational::num
int num
Numerator.
Definition: rational.h:59
ff_instantiate_mf
int ff_instantiate_mf(void *log, MFFunctions *f, GUID category, MFT_REGISTER_TYPE_INFO *in_type, MFT_REGISTER_TYPE_INFO *out_type, int use_hw, IMFTransform **res)
Definition: mf_utils.c:552
ff_free_mf
void ff_free_mf(MFFunctions *f, IMFTransform **mft)
Definition: mf_utils.c:645
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
mf_setup_context
static int mf_setup_context(AVCodecContext *avctx)
Definition: mfenc.c:953
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:518
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:700
MFContext::opt_enc_rc
int opt_enc_rc
Definition: mfenc.c:54
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1220
MFContext::reorder_delay
int64_t reorder_delay
Definition: mfenc.c:51
mf_encv_output_adjust
static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:651
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
MFContext::opt_enc_scenario
int opt_enc_scenario
Definition: mfenc.c:56
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
MFContext::codec_api
ICodecAPI * codec_api
Definition: mfenc.c:52
MFContext::in_info
MFT_INPUT_STREAM_INFO in_info
Definition: mfenc.c:45
MFContext::out_stream_provides_samples
int out_stream_provides_samples
Definition: mfenc.c:47
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1273
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
MFContext::library
HMODULE library
Definition: mfenc.c:37
MFContext::frame
AVFrame * frame
Definition: mfenc.c:39
if
if(ret)
Definition: filter_design.txt:179
ff_METransformNeedInput
@ ff_METransformNeedInput
Definition: mf_utils.h:145
ff_eAVScenarioInfo_LiveStreaming
@ ff_eAVScenarioInfo_LiveStreaming
Definition: mf_utils.h:135
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1258
ff_MFSetAttributeRatio
#define ff_MFSetAttributeRatio
Definition: mf_utils.c:47
framerate
float framerate
Definition: av1_levels.c:29
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
@ ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
Definition: mf_utils.h:122
AVCodec::type
enum AVMediaType type
Definition: codec.h:185
mf_enca_input_score
static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:585
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
MF_INVALID_TIME
#define MF_INVALID_TIME
Definition: mfenc.c:65
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
mf_enca_input_adjust
static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:609
time.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_METransformMarker
@ ff_METransformMarker
Definition: mf_utils.h:148
AV_CODEC_ID_AAC
@ AV_CODEC_ID_AAC
Definition: codec_id.h:450
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:538
ff_media_type_to_pix_fmt
enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type)
Definition: mf_utils.c:158
MFFunctions
Definition: mf_utils.h:48
f
f
Definition: af_crystalizer.c:122
mf_a_avframe_to_sample
static IMFSample * mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:289
ff_eAVScenarioInfo_Archive
@ ff_eAVScenarioInfo_Archive
Definition: mf_utils.h:134
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1008
codec_internal.h
bps
unsigned bps
Definition: movenc.c:1912
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1034
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
MFContext::is_video
int is_video
Definition: mfenc.c:40
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_eAVEncH264VProfile_Base
@ ff_eAVEncH264VProfile_Base
Definition: mf_utils.h:156
ff_eAVScenarioInfo_DisplayRemoting
@ ff_eAVScenarioInfo_DisplayRemoting
Definition: mf_utils.h:132
ACAPS
#define ACAPS
Definition: mfenc.c:1262
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
MFContext::opt_enc_quality
int opt_enc_quality
Definition: mfenc.c:55
MFContext::async_events
IMFMediaEventGenerator * async_events
Definition: mfenc.c:43
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:534
ff_eAVEncCommonRateControlMode_UnconstrainedVBR
@ ff_eAVEncCommonRateControlMode_UnconstrainedVBR
Definition: mf_utils.h:123
MF_TIMEBASE
#define MF_TIMEBASE
Definition: mfenc.c:63
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
@ ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
Definition: mf_utils.h:137
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:541
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
mf_get_tb
static AVRational mf_get_tb(AVCodecContext *avctx)
Definition: mfenc.c:106
mf_load_library
static int mf_load_library(AVCodecContext *avctx)
Definition: mfenc.c:1172
ff_METransformHaveOutput
@ ff_METransformHaveOutput
Definition: mf_utils.h:146
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:528
mf_send_sample
static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:370
MFContext::in_stream_id
DWORD in_stream_id
Definition: mfenc.c:44
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:517
MFContext::async_have_output
int async_have_output
Definition: mfenc.c:50
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
VE
#define VE
Definition: mfenc.c:1270
MFContext::out_info
MFT_OUTPUT_STREAM_INFO out_info
Definition: mfenc.c:46
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
profile
int profile
Definition: mxfenc.c:2250
MFContext
Definition: mfenc.c:35
AVCodecContext::height
int height
Definition: avcodec.h:595
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:634
mf_negotiate_types
static int mf_negotiate_types(AVCodecContext *avctx)
Definition: mfenc.c:926
mf_enca_output_adjust
static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:575
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
mf_sample_set_pts
static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
Definition: mfenc.c:120
mf_to_mf_time
static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
Definition: mfenc.c:113
mf_create
static int mf_create(void *log, MFFunctions *f, IMFTransform **mft, const AVCodec *codec, int use_hw)
Definition: mfenc.c:1030
mf_from_mf_time
static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
Definition: mfenc.c:127
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AV_PROFILE_H264_HIGH
#define AV_PROFILE_H264_HIGH
Definition: defs.h:114
mf_init
static av_cold int mf_init(AVCodecContext *avctx)
Definition: mfenc.c:1224
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:106
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1621
mf_init_encoder
static int mf_init_encoder(AVCodecContext *avctx)
Definition: mfenc.c:1060
mf_close
static int mf_close(AVCodecContext *avctx)
Definition: mfenc.c:1196
mf_encv_input_adjust
static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:736
MFContext::draining
int draining
Definition: mfenc.c:48
ff_eAVScenarioInfo_CameraRecord
@ ff_eAVScenarioInfo_CameraRecord
Definition: mf_utils.h:136
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:439
FF_VAL_VT_BOOL
#define FF_VAL_VT_BOOL(v)
Definition: mf_utils.h:168
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:772
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:205
mf_choose_output_type
static int mf_choose_output_type(AVCodecContext *avctx)
Definition: mfenc.c:749
ff_eAVScenarioInfo_VideoConference
@ ff_eAVScenarioInfo_VideoConference
Definition: mf_utils.h:133
AFMTS
#define AFMTS
Definition: mfenc.c:1260
ff_media_type_dump
void ff_media_type_dump(void *log, IMFMediaType *type)
Definition: mf_utils.c:503
defaults
static const FFCodecDefault defaults[]
Definition: mfenc.c:1297
mf_encv_input_score
static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:727
mf_output_type_get
static int mf_output_type_get(AVCodecContext *avctx)
Definition: mfenc.c:204
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:512
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
mf_sample_to_avpacket
static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
Definition: mfenc.c:234
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:595
imgutils.h
MFContext::mft
IMFTransform * mft
Definition: mfenc.c:42
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
mf_unlock_async
static int mf_unlock_async(AVCodecContext *avctx)
Definition: mfenc.c:981
ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
@ ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
Definition: mf_utils.h:127
mf_wait_events
static int mf_wait_events(AVCodecContext *avctx)
Definition: mfenc.c:67
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_eAVEncH264VProfile_Main
@ ff_eAVEncH264VProfile_Main
Definition: mf_utils.h:157
mf_sample_get_pts
static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:132
MFContext::main_subtype
GUID main_subtype
Definition: mfenc.c:41
ff_eAVEncCommonRateControlMode_LowDelayVBR
@ ff_eAVEncCommonRateControlMode_LowDelayVBR
Definition: mf_utils.h:125
w32dlfcn.h
mf_encv_output_type_get
static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:181
AVCodecContext::compression_level
int compression_level
Definition: avcodec.h:1226
ff_create_memory_sample
IMFSample * ff_create_memory_sample(MFFunctions *f, void *fill_data, size_t size, size_t align)
Definition: mf_utils.c:76