Go to the documentation of this file.
20 #if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
22 #define _WIN32_WINNT 0x0602
62 #define MF_TIMEBASE (AVRational){1, 10000000}
64 #define MF_INVALID_TIME AV_NOPTS_VALUE
73 while (!(
c->async_need_input ||
c->async_have_output ||
c->draining_done ||
c->async_marker)) {
74 IMFMediaEvent *ev =
NULL;
75 MediaEventType ev_id = 0;
76 HRESULT hr = IMFMediaEventGenerator_GetEvent(
c->async_events, 0, &ev);
82 IMFMediaEvent_GetType(ev, &ev_id);
86 c->async_need_input = 1;
89 c->async_have_output = 1;
99 IMFMediaEvent_Release(ev);
123 IMFSample_SetSampleTime(
sample, stime);
134 HRESULT hr = IMFSample_GetSampleTime(
sample, &
pts);
147 hr = IMFAttributes_GetBlobSize(
type, &MF_MT_USER_DATA, &sz);
148 if (!FAILED(hr) && sz > 0) {
168 if (!
c->out_info.cbSize && !
c->out_stream_provides_samples) {
169 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
172 "assuming %d bytes instead.\n", (
int)sz);
173 c->out_info.cbSize = sz;
185 hr = IMFAttributes_GetBlobSize(
type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
186 if (!FAILED(hr) && sz > 0) {
190 hr = IMFAttributes_GetBlob(
type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz,
NULL);
210 hr = IMFTransform_GetOutputCurrentType(
c->mft,
c->out_stream_id, &
type);
222 }
else if (
c->is_audio) {
229 IMFMediaType_Release(
type);
244 hr = IMFSample_GetTotalLength(
sample, &
len);
251 hr = IMFSample_ConvertToContiguousBuffer(
sample, &
buffer);
257 IMFMediaBuffer_Release(
buffer);
263 IMFMediaBuffer_Unlock(
buffer);
264 IMFMediaBuffer_Release(
buffer);
268 hr = IMFAttributes_GetUINT32(
sample, &MFSampleExtension_CleanPoint, &t32);
269 if (
c->is_audio || (!FAILED(hr) && t32 != 0))
272 hr = IMFAttributes_GetUINT64(
sample, &MFSampleExtension_DecodeTimestamp, &t);
280 c->reorder_delay = avpkt->
pts - avpkt->
dts;
281 avpkt->
dts -=
c->reorder_delay;
282 avpkt->
pts -=
c->reorder_delay;
299 c->in_info.cbAlignment);
320 c->in_info.cbAlignment);
326 IMFSample_Release(
sample);
332 IMFMediaBuffer_Release(
buffer);
333 IMFSample_Release(
sample);
339 IMFMediaBuffer_SetCurrentLength(
buffer,
size);
340 IMFMediaBuffer_Unlock(
buffer);
341 IMFMediaBuffer_Release(
buffer);
343 IMFSample_Release(
sample);
376 if (
c->async_events) {
379 if (!
c->async_need_input)
383 IMFSample_SetUINT32(
sample, &MFSampleExtension_Discontinuity, TRUE);
385 hr = IMFTransform_ProcessInput(
c->mft,
c->in_stream_id,
sample, 0);
386 if (hr == MF_E_NOTACCEPTING) {
388 }
else if (FAILED(hr)) {
392 c->async_need_input = 0;
393 }
else if (!
c->draining) {
394 hr = IMFTransform_ProcessMessage(
c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
400 c->async_need_input = 0;
412 MFT_OUTPUT_DATA_BUFFER out_buffers;
420 if (
c->async_events) {
423 if (!
c->async_have_output ||
c->draining_done) {
429 if (!
c->out_stream_provides_samples) {
432 c->out_info.cbAlignment);
437 out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
438 .dwStreamID =
c->out_stream_id,
443 hr = IMFTransform_ProcessOutput(
c->mft, 0, 1, &out_buffers, &st);
445 if (out_buffers.pEvents)
446 IMFCollection_Release(out_buffers.pEvents);
449 *out_sample = out_buffers.pSample;
454 if (out_buffers.pSample)
455 IMFSample_Release(out_buffers.pSample);
457 if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
459 c->draining_done = 1;
461 }
else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
469 c->async_have_output = 0;
481 c->async_have_output = 0;
483 if (
ret >= 0 && !*out_sample)
495 if (!
c->frame->buf[0]) {
501 if (
c->frame->buf[0]) {
507 if (
c->is_video &&
c->codec_api) {
509 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame,
FF_VAL_VT_UI4(1));
515 IMFSample_Release(
sample);
526 IMFSample_Release(
sample);
542 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
546 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
550 hr = IMFAttributes_GetGUID(
type, &MF_MT_SUBTYPE, &
tg);
552 if (IsEqualGUID(&
c->main_subtype, &
tg))
557 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
561 score |= (1LL << 31) -
diff;
563 score |= (1LL << 30) +
diff;
567 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
568 if (!FAILED(hr) && t != 0)
597 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
601 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
619 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
625 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
641 hr = IMFAttributes_GetGUID(
type, &MF_MT_SUBTYPE, &
tg);
643 if (IsEqualGUID(&
c->main_subtype, &
tg))
656 IMFAttributes_SetUINT32(
type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
662 #if FF_API_TICKS_PER_FRAME
682 IMFAttributes_SetUINT32(
type, &MF_MT_MPEG2_PROFILE,
profile);
685 IMFAttributes_SetUINT32(
type, &MF_MT_AVG_BITRATE, avctx->
bit_rate);
692 if (
c->opt_enc_rc >= 0)
693 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode,
FF_VAL_VT_UI4(
c->opt_enc_rc));
695 if (
c->opt_enc_quality >= 0)
696 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncCommonQuality,
FF_VAL_VT_UI4(
c->opt_enc_quality));
707 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable,
FF_VAL_VT_BOOL(1));
709 if (
c->opt_enc_scenario >= 0)
710 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVScenarioInfo,
FF_VAL_VT_UI4(
c->opt_enc_scenario));
743 IMFMediaType *out_type =
NULL;
745 int out_type_index = -1;
753 hr = IMFTransform_GetOutputAvailableType(
c->mft,
c->out_stream_id, n, &
type);
754 if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
756 if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
772 }
else if (
c->is_audio) {
776 if (score > out_type_score) {
778 IMFMediaType_Release(out_type);
780 out_type_score = score;
782 IMFMediaType_AddRef(out_type);
785 IMFMediaType_Release(
type);
791 hr =
c->functions.MFCreateMediaType(&out_type);
801 }
else if (
c->is_audio) {
809 hr = IMFTransform_SetOutputType(
c->mft,
c->out_stream_id, out_type, 0);
812 }
else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
823 IMFMediaType_Release(out_type);
832 IMFMediaType *in_type =
NULL;
834 int in_type_index = -1;
842 hr = IMFTransform_GetInputAvailableType(
c->mft,
c->in_stream_id, n, &
type);
843 if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
845 if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
861 }
else if (
c->is_audio) {
865 if (score > in_type_score) {
867 IMFMediaType_Release(in_type);
869 in_type_score = score;
871 IMFMediaType_AddRef(in_type);
874 IMFMediaType_Release(
type);
889 }
else if (
c->is_audio) {
897 hr = IMFTransform_SetInputType(
c->mft,
c->in_stream_id, in_type, 0);
900 }
else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
911 IMFMediaType_Release(in_type);
921 int need_input = 1, need_output = 1;
923 for (n = 0; n < 2 && (need_input || need_output); n++) {
928 need_input =
ret < 1;
932 need_output =
ret < 1;
934 if (need_input || need_output) {
936 need_input, need_output);
948 hr = IMFTransform_GetInputStreamInfo(
c->mft,
c->in_stream_id, &
c->in_info);
952 (
int)
c->in_info.cbSize, (
int)
c->in_info.cbAlignment);
954 hr = IMFTransform_GetOutputStreamInfo(
c->mft,
c->out_stream_id, &
c->out_info);
957 c->out_stream_provides_samples =
958 (
c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
959 (
c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
961 (
int)
c->out_info.cbSize, (
int)
c->out_info.cbAlignment,
962 c->out_stream_provides_samples ?
" (provides samples)" :
"");
974 IMFAttributes *attrs;
980 if (!(
c->is_video &&
c->opt_enc_hw))
983 hr = IMFTransform_GetAttributes(
c->mft, &attrs);
989 hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
1000 hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
1006 hr = IMFTransform_QueryInterface(
c->mft, &IID_IMFMediaEventGenerator, (
void **)&
c->async_events);
1015 IMFAttributes_Release(attrs);
1020 const AVCodec *codec,
int use_hw)
1024 MFT_REGISTER_TYPE_INFO reg = {0};
1033 reg.guidSubtype = *subtype;
1036 reg.guidMajorType = MFMediaType_Audio;
1037 category = MFT_CATEGORY_AUDIO_ENCODER;
1039 reg.guidMajorType = MFMediaType_Video;
1040 category = MFT_CATEGORY_VIDEO_ENCODER;
1062 c->is_video = !
c->is_audio;
1065 if (
c->is_video &&
c->opt_enc_hw)
1071 c->main_subtype = *subtype;
1079 hr = IMFTransform_QueryInterface(
c->mft, &IID_ICodecAPI, (
void **)&
c->codec_api);
1084 hr = IMFTransform_GetStreamIDs(
c->mft, 1, &
c->in_stream_id, 1, &
c->out_stream_id);
1085 if (hr == E_NOTIMPL) {
1086 c->in_stream_id =
c->out_stream_id = 0;
1087 }
else if (FAILED(hr)) {
1098 hr = IMFTransform_ProcessMessage(
c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
1104 hr = IMFTransform_ProcessMessage(
c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
1112 int sleep = 10000, total = 0;
1114 while (total < 70*1000) {
1131 avctx->
extradata ?
"Got" :
"Didn't get", total / 1000);
1138 #define LOAD_MF_FUNCTION(context, func_name) \
1139 context->functions.func_name = (void *)dlsym(context->library, #func_name); \
1140 if (!context->functions.func_name) { \
1141 av_log(context, AV_LOG_ERROR, "DLL mfplat.dll failed to find function "\
1143 return AVERROR_UNKNOWN; \
1149 #define LOAD_MF_FUNCTION(context, func_name) \
1150 context->functions.func_name = func_name; \
1151 if (!context->functions.func_name) { \
1152 av_log(context, AV_LOG_ERROR, "Failed to find function " #func_name \
1154 return AVERROR_UNKNOWN; \
1166 c->library = dlopen(
"mfplat.dll", 0);
1190 ICodecAPI_Release(
c->codec_api);
1192 if (
c->async_events)
1193 IMFMediaEventGenerator_Release(
c->async_events);
1199 dlclose(
c->library);
1224 #define OFFSET(x) offsetof(MFContext, x)
1226 #define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS) \
1227 static const AVClass ff_ ## NAME ## _mf_encoder_class = { \
1228 .class_name = #NAME "_mf", \
1229 .item_name = av_default_item_name, \
1231 .version = LIBAVUTIL_VERSION_INT, \
1233 const FFCodec ff_ ## NAME ## _mf_encoder = { \
1234 .p.priv_class = &ff_ ## NAME ## _mf_encoder_class, \
1235 .p.name = #NAME "_mf", \
1236 CODEC_LONG_NAME(#ID " via MediaFoundation"), \
1237 .p.type = AVMEDIA_TYPE_ ## MEDIATYPE, \
1238 .p.id = AV_CODEC_ID_ ## ID, \
1239 .priv_data_size = sizeof(MFContext), \
1241 .close = mf_close, \
1242 FF_CODEC_RECEIVE_PACKET_CB(mf_receive_packet), \
1245 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
1249 .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, \
1250 AV_SAMPLE_FMT_NONE },
1252 .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1253 AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
1259 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1261 {
"rate_control",
"Select rate control mode",
OFFSET(opt_enc_rc),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX,
VE, .unit =
"rate_control"},
1262 {
"default",
"Default mode", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0,
VE, .unit =
"rate_control"},
1272 {
"scenario",
"Select usage scenario",
OFFSET(opt_enc_scenario),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX,
VE, .unit =
"scenario"},
1273 {
"default",
"Default scenario", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0,
VE, .unit =
"scenario"},
1287 .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1288 AV_PIX_FMT_YUV420P, \
1291 .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int sample_rate
samples per second
static const AVOption venc_opts[]
static IMFSample * mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
int64_t duration
Duration of the frame, in the same units as pts.
static int mf_choose_input_type(AVCodecContext *avctx)
#define AVERROR_EOF
End of file.
#define LOAD_MF_FUNCTION(context, func_name)
#define AV_PROFILE_H264_MAIN
const CLSID * ff_codec_to_mf_subtype(enum AVCodecID codec)
static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
@ ff_eAVEncCommonRateControlMode_Quality
@ ff_eAVEncCommonRateControlMode_CBR
static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
#define AV_LOG_VERBOSE
Detailed information.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int nb_channels
Number of channels in this layout.
HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid, UINT32 uw, UINT32 uh)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ ff_eAVEncH264VProfile_High
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
static IMFSample * mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
const struct AVCodec * codec
AVChannelLayout ch_layout
Audio channel layout.
enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type)
static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
int flags
AV_CODEC_FLAG_*.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
@ ff_eAVEncCommonRateControlMode_GlobalVBR
int ff_instantiate_mf(void *log, MFFunctions *f, GUID category, MFT_REGISTER_TYPE_INFO *in_type, MFT_REGISTER_TYPE_INFO *out_type, int use_hw, IMFTransform **res)
void ff_free_mf(MFFunctions *f, IMFTransform **mft)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mf_setup_context(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS)
int has_b_frames
Size of the frame reordering buffer in the decoder.
static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
static enum AVPixelFormat pix_fmt
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
MFT_INPUT_STREAM_INFO in_info
int out_stream_provides_samples
int av_usleep(unsigned usec)
Sleep for a period of time.
@ ff_eAVScenarioInfo_LiveStreaming
#define ff_MFSetAttributeRatio
Describe the class of an AVClass context structure.
@ ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
Rational number (pair of numerator and denominator).
int64_t bit_rate
the average bitrate
@ AV_PICTURE_TYPE_I
Intra.
@ ff_METransformNeedInput
static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type)
static IMFSample * mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
@ ff_eAVScenarioInfo_Archive
enum AVSampleFormat sample_fmt
audio sample format
#define AV_NOPTS_VALUE
Undefined timestamp value.
@ ff_eAVEncH264VProfile_Base
@ ff_eAVScenarioInfo_DisplayRemoting
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
IMFMediaEventGenerator * async_events
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
@ ff_eAVEncCommonRateControlMode_UnconstrainedVBR
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
@ ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
@ ff_METransformHaveOutput
static AVRational mf_get_tb(AVCodecContext *avctx)
int nb_samples
number of audio samples (per channel) described by this frame
static int mf_load_library(AVCodecContext *avctx)
@ ff_METransformDrainComplete
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVSampleFormat
Audio sample formats.
MFT_OUTPUT_STREAM_INFO out_info
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static int mf_negotiate_types(AVCodecContext *avctx)
static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
static int mf_create(void *log, MFFunctions *f, IMFTransform **mft, const AVCodec *codec, int use_hw)
static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
static int mf_init(AVCodecContext *avctx)
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
#define AV_PROFILE_H264_HIGH
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static int mf_init_encoder(AVCodecContext *avctx)
static int mf_close(AVCodecContext *avctx)
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
@ ff_eAVScenarioInfo_CameraRecord
#define FF_DISABLE_DEPRECATION_WARNINGS
enum AVMediaType codec_type
#define FF_VAL_VT_BOOL(v)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
static int mf_choose_output_type(AVCodecContext *avctx)
@ ff_eAVScenarioInfo_VideoConference
void ff_media_type_dump(void *log, IMFMediaType *type)
static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
static int mf_output_type_get(AVCodecContext *avctx)
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
This structure stores compressed data.
static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static int mf_unlock_async(AVCodecContext *avctx)
@ ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
static int mf_wait_events(AVCodecContext *avctx)
@ ff_eAVEncH264VProfile_Main
static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
@ ff_eAVEncCommonRateControlMode_LowDelayVBR
static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
IMFSample * ff_create_memory_sample(MFFunctions *f, void *fill_data, size_t size, size_t align)