Go to the documentation of this file.
50 #define OFFSET(x) offsetof(ShowInfoContext, x)
51 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
63 double yaw, pitch, roll;
66 if (sd->
size <
sizeof(*spherical)) {
82 yaw = ((double)spherical->
yaw) / (1 << 16);
83 pitch = ((double)spherical->
pitch) / (1 << 16);
84 roll = ((double)spherical->
roll) / (1 << 16);
104 if (sd->
size <
sizeof(*stereo)) {
125 if (!roi_size || sd->
size % roi_size != 0) {
129 nb_rois = sd->
size / roi_size;
132 for (
int i = 0;
i < nb_rois;
i++) {
144 if (sd->
size <
sizeof(*mastering_display)) {
152 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f) "
153 "min_luminance=%f, max_luminance=%f",
170 "MaxCLL=%d, MaxFALL=%d",
184 int delta_qp = par->
delta_qp[plane][acdc];
187 plane, acdc, delta_qp);
230 for (
i = 0;
i <
len;
i++) {
238 const uint16_t *
src1 = (
const uint16_t *)
src;
241 for (
i = 0;
i <
len / 2;
i++) {
242 if ((HAVE_BIGENDIAN && !
be) || (!HAVE_BIGENDIAN &&
be)) {
247 *sum2 += (uint32_t)
src1[
i] * (uint32_t)
src1[
i];
265 uint32_t plane_checksum[4] = {0},
checksum = 0;
266 int64_t sum[4] = {0}, sum2[4] = {0};
268 int bitdepth =
desc->comp[0].depth;
270 int i, plane, vsub =
desc->log2_chroma_h;
272 for (plane = 0; plane < 4 &&
s->calculate_checksums &&
frame->data[plane] &&
frame->linesize[plane]; plane++) {
276 int width = linesize >> (bitdepth > 8);
281 for (
i = 0;
i <
h;
i++) {
286 pixelcount[plane] +=
width;
292 "n:%4"PRId64
" pts:%7s pts_time:%-7s pos:%9"PRId64
" "
293 "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c ",
297 frame->sample_aspect_ratio.num,
frame->sample_aspect_ratio.den,
299 !
frame->interlaced_frame ?
'P' :
300 frame->top_field_first ?
'T' :
'B',
304 if (
s->calculate_checksums) {
306 "checksum:%08"PRIX32
" plane_checksum:[%08"PRIX32,
309 for (plane = 1; plane < 4 &&
frame->data[plane] &&
frame->linesize[plane]; plane++)
312 for (plane = 0; plane < 4 &&
frame->data[plane] &&
frame->linesize[plane]; plane++)
315 for (plane = 0; plane < 4 &&
frame->data[plane] &&
frame->linesize[plane]; plane++)
317 sqrt((sum2[plane] - sum[plane]*(
double)sum[plane]/pixelcount[plane])/pixelcount[plane]));
322 for (
i = 0;
i <
frame->nb_side_data;
i++) {
340 uint32_t *
tc = (uint32_t*)sd->
data;
342 if (sd->
size != 16) {
346 for (
int j = 1; j <= m; j++) {
396 is_out ?
"out" :
"in",
397 link->time_base.num,
link->time_base.den,
398 link->frame_rate.num,
link->frame_rate.den);
440 .priv_class = &showinfo_class,
int32_t qp
Base quantisation parameter for the frame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it be(in the first position) for now. Options ------- Then comes the options array. This is what will define the user accessible options. For example
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_TIMECODE_STR_SIZE
enum AVSphericalProjection projection
Projection type.
char * av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df)
Get the timecode string from the SMPTE timecode format.
static void update_sample_stats(int depth, int be, const uint8_t *src, int len, int64_t *sum, int64_t *sum2)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
static int config_props_out(AVFilterLink *link)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
static const AVFilterPad avfilter_vf_showinfo_inputs[]
void av_spherical_tile_bounds(const AVSphericalMapping *map, size_t width, size_t height, size_t *left, size_t *top, size_t *right, size_t *bottom)
Convert the bounding fields from an AVSphericalVideo from 0.32 fixed point to pixels.
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
@ AV_SPHERICAL_EQUIRECTANGULAR_TILE
Video represents a portion of a sphere mapped on a flat surface using equirectangular projection.
const char * name
Filter name.
static void dump_color_property(AVFilterContext *ctx, AVFrame *frame)
@ AV_SPHERICAL_EQUIRECTANGULAR
Video represents a sphere mapped on a flat surface using equirectangular projection.
A link between two filters.
static void dump_spherical(AVFilterContext *ctx, AVFrame *frame, AVFrameSideData *sd)
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
const char * av_color_space_name(enum AVColorSpace space)
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Video encoding parameters for a given frame.
AVFILTER_DEFINE_CLASS(showinfo)
A filter pad used for either input or output.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int config_props_in(AVFilterLink *link)
Structure describing a single Region Of Interest.
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
#define AV_CEIL_RSHIFT(a, b)
static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
enum AVVideoEncParamsType type
Type of the parameters (the codec they are used with).
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
int flags
Additional information about the frame packing.
const char * av_color_range_name(enum AVColorRange range)
static int config_props(AVFilterContext *ctx, AVFilterLink *link, int is_out)
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Describe the class of an AVClass context structure.
uint32_t self_size
Must be set to the size of this data structure (that is, sizeof(AVRegionOfInterest)).
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
@ AVCOL_RANGE_UNSPECIFIED
static void dump_roi(AVFilterContext *ctx, AVFrameSideData *sd)
@ AV_SPHERICAL_CUBEMAP
Video frame is split into 6 faces of a cube, and arranged on a 3x2 layout.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
static const AVOption showinfo_options[]
unsigned int nb_blocks
Number of blocks in the array.
uint32_t padding
Number of pixels to pad from the edge of each cube face.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static void update_sample_stats_16(int be, const uint8_t *src, int len, int64_t *sum, int64_t *sum2)
#define AV_LOG_INFO
Standard information.
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
int32_t roll
Rotation around the forward vector [-180, 180].
static void dump_content_light_metadata(AVFilterContext *ctx, AVFrameSideData *sd)
#define i(width, name, range_min, range_max)
int top
Distance in pixels from the top edge of the frame to the top and bottom edges and from the left edge ...
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
const char * name
Pad name.
static const AVFilterPad avfilter_vf_showinfo_outputs[]
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int32_t pitch
Rotation around the right vector [-90, 90].
enum AVStereo3DType type
How views are packed within the video.
static volatile int checksum
#define FF_ARRAY_ELEMS(a)
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, unsigned int len)
Calculate the Adler32 checksum of a buffer.
static void update_sample_stats_8(const uint8_t *src, int len, int64_t *sum, int64_t *sum2)
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
enum AVFrameSideDataType type
Structure to hold side data for an AVFrame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
unsigned MaxFALL
Max average light level per frame (cd/m^2).
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
const char * av_stereo3d_type_name(unsigned int type)
Provide a human-readable name of a given stereo3d type.
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
static void dump_mastering_display(AVFilterContext *ctx, AVFrameSideData *sd)
AVRational qoffset
Quantisation offset.
This structure describes how to handle spherical videos, outlining information about projection,...
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
int32_t yaw
Rotation around the up vector [-180, 180].
double av_display_rotation_get(const int32_t matrix[9])
Extract the rotation component of the transformation matrix.
static void dump_video_enc_params(AVFilterContext *ctx, AVFrameSideData *sd)