Go to the documentation of this file.
26 #include <media/NdkMediaFormat.h>
27 #include <media/NdkMediaCodec.h>
28 #include <android/native_window_jni.h>
66 {
"android/media/MediaCodecList",
"findDecoderForFormat",
"(Landroid/media/MediaFormat;)Ljava/lang/String;",
FF_JNI_METHOD, offsetof(
struct JNIAMediaCodecListFields, find_decoder_for_format_id), 0 },
73 {
"android/media/MediaCodecInfo",
"getCapabilitiesForType",
"(Ljava/lang/String;)Landroid/media/MediaCodecInfo$CodecCapabilities;",
FF_JNI_METHOD, offsetof(
struct JNIAMediaCodecListFields, get_codec_capabilities_id), 1 },
80 {
"android/media/MediaCodecInfo$CodecCapabilities",
"profileLevels",
"[Landroid/media/MediaCodecInfo$CodecProfileLevel;",
FF_JNI_FIELD, offsetof(
struct JNIAMediaCodecListFields, profile_levels_id), 1 },
123 {
"android/media/MediaFormat",
"getByteBuffer",
"(Ljava/lang/String;)Ljava/nio/ByteBuffer;",
FF_JNI_METHOD, offsetof(
struct JNIAMediaFormatFields, get_bytebuffer_id), 1 },
129 {
"android/media/MediaFormat",
"setByteBuffer",
"(Ljava/lang/String;Ljava/nio/ByteBuffer;)V",
FF_JNI_METHOD, offsetof(
struct JNIAMediaFormatFields, set_bytebuffer_id), 1 },
224 {
"android/media/MediaCodec",
"configure",
"(Landroid/media/MediaFormat;Landroid/view/Surface;Landroid/media/MediaCrypto;I)V",
FF_JNI_METHOD, offsetof(
struct JNIAMediaCodecFields, configure_id), 1 },
230 {
"android/media/MediaCodec",
"getOutputFormat",
"()Landroid/media/MediaFormat;",
FF_JNI_METHOD, offsetof(
struct JNIAMediaCodecFields, get_output_format_id), 1 },
237 {
"android/media/MediaCodec",
"dequeueOutputBuffer",
"(Landroid/media/MediaCodec$BufferInfo;J)I",
FF_JNI_METHOD, offsetof(
struct JNIAMediaCodecFields, dequeue_output_buffer_id), 1 },
289 #define JNI_GET_ENV_OR_RETURN(env, log_ctx, ret) do { \
290 (env) = ff_jni_get_env(log_ctx); \
296 #define JNI_GET_ENV_OR_RETURN_VOID(env, log_ctx) do { \
297 (env) = ff_jni_get_env(log_ctx); \
306 static const int AVCProfileBaseline = 0x01;
307 static const int AVCProfileMain = 0x02;
308 static const int AVCProfileExtended = 0x04;
309 static const int AVCProfileHigh = 0x08;
310 static const int AVCProfileHigh10 = 0x10;
311 static const int AVCProfileHigh422 = 0x20;
312 static const int AVCProfileHigh444 = 0x40;
313 static const int AVCProfileConstrainedBaseline = 0x10000;
314 static const int AVCProfileConstrainedHigh = 0x80000;
316 static const int HEVCProfileMain = 0x01;
317 static const int HEVCProfileMain10 = 0x02;
318 static const int HEVCProfileMainStill = 0x04;
319 static const int HEVCProfileMain10HDR10 = 0x1000;
320 static const int HEVCProfileMain10HDR10Plus = 0x2000;
323 (void)AVCProfileConstrainedHigh;
324 (void)HEVCProfileMain10HDR10;
325 (void)HEVCProfileMain10HDR10Plus;
330 return AVCProfileBaseline;
332 return AVCProfileConstrainedBaseline;
334 return AVCProfileMain;
337 return AVCProfileExtended;
339 return AVCProfileHigh;
342 return AVCProfileHigh10;
345 return AVCProfileHigh422;
349 return AVCProfileHigh444;
354 return HEVCProfileMain;
356 return HEVCProfileMainStill;
358 return HEVCProfileMain10;
372 char *supported_type =
NULL;
378 jobject codec_name =
NULL;
382 jobjectArray types =
NULL;
384 jobject capabilities =
NULL;
385 jobject profile_level =
NULL;
386 jobjectArray profile_levels =
NULL;
403 for(
i = 0;
i < codec_count;
i++) {
423 if (is_encoder != encoder) {
433 if (is_software_only) {
449 (*env)->DeleteLocalRef(env, codec_name);
455 strstr(
name,
"OMX.google") ||
456 strstr(
name,
"OMX.ffmpeg") ||
457 (strstr(
name,
"OMX.SEC") && strstr(
name,
".sw.")) ||
458 !strcmp(
name,
"OMX.qcom.video.decoder.hevcswvdec")) {
462 type_count = (*env)->GetArrayLength(env, types);
463 for (j = 0; j < type_count; j++) {
467 type = (*env)->GetObjectArrayElement(env, types, j);
473 if (!supported_type) {
486 profile_levels = (*env)->GetObjectField(env, capabilities, jfields.
profile_levels_id);
491 profile_count = (*env)->GetArrayLength(env, profile_levels);
492 if (!profile_count) {
495 for (k = 0; k < profile_count; k++) {
496 int supported_profile = 0;
503 profile_level = (*env)->GetObjectArrayElement(env, profile_levels, k);
508 supported_profile = (*env)->GetIntField(env, profile_level, jfields.
profile_id);
513 found_codec =
profile == supported_profile;
516 (*env)->DeleteLocalRef(env, profile_level);
517 profile_level =
NULL;
526 if (profile_levels) {
527 (*env)->DeleteLocalRef(env, profile_levels);
528 profile_levels =
NULL;
532 (*env)->DeleteLocalRef(env, capabilities);
537 (*env)->DeleteLocalRef(env,
type);
550 (*env)->DeleteLocalRef(env,
info);
555 (*env)->DeleteLocalRef(env, types);
568 (*env)->DeleteLocalRef(env, codec_name);
572 (*env)->DeleteLocalRef(env,
info);
576 (*env)->DeleteLocalRef(env,
type);
580 (*env)->DeleteLocalRef(env, types);
584 (*env)->DeleteLocalRef(env, capabilities);
588 (*env)->DeleteLocalRef(env, profile_level);
591 if (profile_levels) {
592 (*env)->DeleteLocalRef(env, profile_levels);
611 jobject
object =
NULL;
629 object = (*env)->NewObject(env,
format->jfields.mediaformat_class,
format->jfields.init_id);
634 format->object = (*env)->NewGlobalRef(env,
object);
641 (*env)->DeleteLocalRef(env,
object);
673 format->object = (*env)->NewGlobalRef(env,
object);
699 (*env)->DeleteGlobalRef(env,
format->object);
740 jboolean contains_key;
752 contains_key = (*env)->CallBooleanMethod(env,
format->object,
format->jfields.contains_key_id,
key);
758 *
out = (*env)->CallIntMethod(env,
format->object,
format->jfields.get_integer_id,
key);
767 (*env)->DeleteLocalRef(env,
key);
779 jboolean contains_key;
791 contains_key = (*env)->CallBooleanMethod(env,
format->object,
format->jfields.contains_key_id,
key);
806 (*env)->DeleteLocalRef(env,
key);
818 jboolean contains_key;
830 contains_key = (*env)->CallBooleanMethod(env,
format->object,
format->jfields.contains_key_id,
key);
836 *
out = (*env)->CallFloatMethod(env,
format->object,
format->jfields.get_float_id,
key);
845 (*env)->DeleteLocalRef(env,
key);
857 jboolean contains_key;
870 contains_key = (*env)->CallBooleanMethod(env,
format->object,
format->jfields.contains_key_id,
key);
882 *
data = (*env)->GetDirectBufferAddress(env,
result);
883 *
size = (*env)->GetDirectBufferCapacity(env,
result);
899 (*env)->DeleteLocalRef(env,
key);
903 (*env)->DeleteLocalRef(env,
result);
915 jboolean contains_key;
928 contains_key = (*env)->CallBooleanMethod(env,
format->object,
format->jfields.contains_key_id,
key);
949 (*env)->DeleteLocalRef(env,
key);
953 (*env)->DeleteLocalRef(env,
result);
981 (*env)->DeleteLocalRef(env,
key);
1007 (*env)->DeleteLocalRef(env,
key);
1033 (*env)->DeleteLocalRef(env,
key);
1041 jstring
string =
NULL;
1058 (*env)->CallVoidMethod(env,
format->object,
format->jfields.set_string_id,
key,
string);
1065 (*env)->DeleteLocalRef(env,
key);
1069 (*env)->DeleteLocalRef(env,
string);
1113 (*env)->DeleteLocalRef(env,
key);
1117 (*env)->DeleteLocalRef(env,
buffer);
1175 #define CREATE_CODEC_BY_NAME 0
1176 #define CREATE_DECODER_BY_TYPE 1
1177 #define CREATE_ENCODER_BY_TYPE 2
1184 jstring jarg =
NULL;
1185 jobject
object =
NULL;
1186 jobject buffer_info =
NULL;
1187 jmethodID create_id =
NULL;
1218 object = (*env)->CallStaticObjectMethod(env,
1226 codec->
object = (*env)->NewGlobalRef(env,
object);
1244 codec->
buffer_info = (*env)->NewGlobalRef(env, buffer_info);
1252 (*env)->DeleteLocalRef(env, jarg);
1256 (*env)->DeleteLocalRef(env,
object);
1260 (*env)->DeleteLocalRef(env, buffer_info);
1265 (*env)->DeleteGlobalRef(env, codec->
object);
1279 #define DECLARE_FF_AMEDIACODEC_CREATE_FUNC(name, method) \
1280 static FFAMediaCodec *mediacodec_jni_##name(const char *arg) \
1282 return codec_create(method, arg); \
1312 (*env)->DeleteGlobalRef(env, codec->
object);
1343 (*env)->DeleteLocalRef(env,
name);
1560 jobject input_buffers =
NULL;
1576 codec->
input_buffers = (*env)->NewGlobalRef(env, input_buffers);
1588 ret = (*env)->GetDirectBufferAddress(env,
buffer);
1592 (*env)->DeleteLocalRef(env,
buffer);
1595 if (input_buffers) {
1596 (*env)->DeleteLocalRef(env, input_buffers);
1608 jobject output_buffers =
NULL;
1624 codec->
output_buffers = (*env)->NewGlobalRef(env, output_buffers);
1636 ret = (*env)->GetDirectBufferAddress(env,
buffer);
1640 (*env)->DeleteLocalRef(env,
buffer);
1643 if (output_buffers) {
1644 (*env)->DeleteLocalRef(env, output_buffers);
1656 jobject mediaformat =
NULL;
1668 (*env)->DeleteLocalRef(env, mediaformat);
1781 .createCodecByName = mediacodec_jni_createCodecByName,
1782 .createDecoderByType = mediacodec_jni_createDecoderByType,
1783 .createEncoderByType = mediacodec_jni_createEncoderByType,
1822 AMediaFormat *(*new)(void);
1823 media_status_t (*
delete)(AMediaFormat*);
1825 const char* (*toString)(AMediaFormat*);
1852 AMediaCodec* (*createCodecByName)(
const char *
name);
1853 AMediaCodec* (*createDecoderByType)(
const char *mime_type);
1854 AMediaCodec* (*createEncoderByType)(
const char *mime_type);
1855 media_status_t (*
delete)(AMediaCodec*);
1858 const AMediaFormat *
format,
1859 ANativeWindow *surface,
1860 AMediaCrypto *crypto,
1863 media_status_t (*
stop)(AMediaCodec*);
1866 uint8_t* (*getInputBuffer)(AMediaCodec*,
size_t idx,
size_t *
out_size);
1867 uint8_t* (*getOutputBuffer)(AMediaCodec*,
size_t idx,
size_t *
out_size);
1872 uint64_t time, uint32_t
flags);
1875 AMediaFormat* (*getOutputFormat)(AMediaCodec*);
1881 media_status_t (*
getName)(AMediaCodec*,
char** out_name);
1912 format->libmedia = dlopen(
"libmediandk.so", RTLD_NOW);
1916 #define GET_OPTIONAL_SYMBOL(sym) \
1917 format->sym = dlsym(format->libmedia, "AMediaFormat_" #sym);
1919 #define GET_SYMBOL(sym) \
1920 GET_OPTIONAL_SYMBOL(sym) \
1945 #undef GET_OPTIONAL_SYMBOL
1959 dlclose(
format->libmedia);
1981 dlclose(
format->libmedia);
2101 const char *lib_name =
"libmediandk.so";
2107 codec->
libmedia = dlopen(lib_name, RTLD_NOW);
2111 #define GET_SYMBOL(sym, required) \
2112 codec->sym = dlsym(codec->libmedia, "AMediaCodec_" #sym); \
2113 if (!codec->sym) { \
2114 av_log(codec, required ? AV_LOG_ERROR : AV_LOG_INFO, \
2115 #sym "() unavailable from %s\n", lib_name); \
2175 #define DECLARE_NDK_AMEDIACODEC_CREATE_FUNC(name, method) \
2176 static FFAMediaCodec *mediacodec_ndk_##name(const char *arg) \
2178 return ndk_codec_create(method, arg); \
2198 ANativeWindow_release(codec->
window);
2215 ANativeWindow *native_window =
NULL;
2221 native_window = ANativeWindow_fromSurface(env,
window->surface);
2223 codec->
window = native_window;
2224 }
else if (
window->native_window) {
2225 native_window =
window->native_window;
2234 if (
flags & AMEDIACODEC_CONFIGURE_FLAG_ENCODE) {
2241 if (
status != AMEDIA_OK) {
2250 if (
status != AMEDIA_OK) {
2256 if (
status != AMEDIA_OK) {
2265 #define MEDIACODEC_NDK_WRAPPER(method) \
2266 static int mediacodec_ndk_ ## method(FFAMediaCodec* ctx) \
2268 FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx; \
2269 media_status_t status = codec->method(codec->impl); \
2271 if (status != AMEDIA_OK) { \
2272 av_log(codec, AV_LOG_ERROR, #method " failed, %d\n", status); \
2273 return AVERROR_EXTERNAL; \
2303 uint64_t time, uint32_t
flags)
2312 AMediaCodecBufferInfo buf_info = {0};
2316 info->offset = buf_info.offset;
2317 info->size = buf_info.size;
2318 info->presentationTimeUs = buf_info.presentationTimeUs;
2319 info->flags = buf_info.flags;
2340 if (
status != AMEDIA_OK) {
2354 if (
status != AMEDIA_OK) {
2364 return idx == AMEDIACODEC_INFO_TRY_AGAIN_LATER;
2369 return idx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED;
2374 return idx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED;
2379 return AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG;
2384 return AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
2394 return AMEDIACODEC_CONFIGURE_FLAG_ENCODE;
2413 if (
status != AMEDIA_OK) {
2450 .createCodecByName = mediacodec_ndk_createCodecByName,
2451 .createDecoderByType = mediacodec_ndk_createDecoderByType,
2452 .createEncoderByType = mediacodec_ndk_createEncoderByType,
2456 .start = mediacodec_ndk_start,
2457 .stop = mediacodec_ndk_stop,
2458 .flush = mediacodec_ndk_flush,
2517 #if __ANDROID_API__ >= 24
2529 ret = android_get_device_api_level();
2532 jclass versionClass;
2533 jfieldID sdkIntFieldID;
2536 versionClass = (*env)->FindClass(env,
"android/os/Build$VERSION");
2537 sdkIntFieldID = (*env)->GetStaticFieldID(env, versionClass,
"SDK_INT",
"I");
2538 ret = (*env)->GetStaticIntField(env, versionClass, sdkIntFieldID);
2539 (*env)->DeleteLocalRef(env, versionClass);
static void error(const char *err)
#define AV_LOG_WARNING
Something somehow does not look correct.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FF_PROFILE_H264_BASELINE
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
int ff_jni_reset_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx)
static SDL_Window * window
#define FF_PROFILE_H264_HIGH
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
jstring ff_jni_utf_chars_to_jstring(JNIEnv *env, const char *utf_chars, void *log_ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_PROFILE_HEVC_MAIN
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define FF_PROFILE_H264_EXTENDED
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_jni_init_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
const char * av_default_item_name(void *ptr)
Return the context name.
#define FF_PROFILE_HEVC_MAIN_10
JNIEnv * ff_jni_get_env(void *log_ctx)
#define FF_PROFILE_H264_HIGH_422
#define AVERROR_EXTERNAL
Generic error in an external library.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
char * ff_jni_jstring_to_utf_chars(JNIEnv *env, jstring string, void *log_ctx)
#define i(width, name, range_min, range_max)
#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE
int ff_jni_exception_check(JNIEnv *env, int log, void *log_ctx)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
#define FF_PROFILE_H264_HIGH_10_INTRA
#define FF_PROFILE_H264_HIGH_444_INTRA
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define FF_PROFILE_H264_HIGH_444
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_PROFILE_H264_HIGH_422_INTRA
main external API structure.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define FF_PROFILE_H264_MAIN
char * av_strdup(const char *s)
Duplicate a string.
void(* flush)(AVBSFContext *ctx)
#define FF_PROFILE_H264_HIGH_10
#define flags(name, subs,...)