Go to the documentation of this file.
21 #include <stdatomic.h>
35 #define MAX_THREADS 64
40 #define BUFFER_SIZE (MAX_THREADS + 2)
70 #define OFF(member) offsetof(ThreadContext, member)
72 (
OFF(buffer_mutex),
OFF(task_fifo_mutex),
OFF(finished_task_mutex)),
73 (
OFF(task_fifo_cond),
OFF(finished_task_cond)));
76 static void * attribute_align_arg
worker(
void *v){
81 int got_packet = 0,
ret;
88 while (
c->next_task_index ==
c->task_index ||
atomic_load(&
c->exit)) {
95 task_index =
c->next_task_index;
96 c->next_task_index = (
c->next_task_index + 1) %
c->max_tasks;
102 task = &
c->tasks[task_index];
109 if (
ret >= 0 && ret2 < 0)
148 "Forcing thread count to 1 for MJPEG encoding, use -thread_type slice "
149 "or a constant quantizer if you want to use multiple cpu cores\n");
156 "MJPEG CBR encoding works badly with frame multi-threading, consider "
157 "using -threads 1, -thread_type slice or a constant quantizer.\n");
174 "Forcing thread count to 1 for huffyuv encoding with first pass or context 1\n");
195 c->parent_avctx = avctx;
203 for (
unsigned j = 0; j <
c->max_tasks; j++) {
219 *thread_avctx = *avctx;
273 for (
unsigned i = 0;
i <
c->max_tasks;
i++) {
294 c->task_index = (
c->task_index + 1) %
c->max_tasks;
299 outtask = &
c->tasks[
c->finished_task_index];
303 if (
c->task_index ==
c->finished_task_index ||
305 (
c->task_index -
c->finished_task_index +
c->max_tasks) %
c->max_tasks <= avctx->
thread_count)) {
319 c->finished_task_index = (
c->finished_task_index + 1) %
c->max_tasks;
av_cold int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define atomic_store(object, desired)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const AVClass * priv_class
AVClass for the private context.
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int capabilities
Codec capabilities.
void * frame_thread_encoder
pthread_mutex_t buffer_mutex
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
const struct AVCodec * codec
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
AVCodecContext * parent_avctx
int flags
AV_CODEC_FLAG_*.
unsigned finished_task_index
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold int ff_frame_thread_encoder_init(AVCodecContext *avctx)
Initialize frame thread encoder.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int thread_type
Which multithreading methods to use.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
DEFINE_OFFSET_ARRAY(ThreadContext, thread_ctx, pthread_init_cnt,(OFF(buffer_mutex), OFF(task_fifo_mutex), OFF(finished_task_mutex)),(OFF(task_fifo_cond), OFF(finished_task_cond)))
#define atomic_load(object)
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, AVFrame *frame, int *got_packet_ptr)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
pthread_mutex_t task_fifo_mutex
struct AVCodecInternal * internal
Private context used for internal data.
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define pthread_mutex_unlock(a)
int(* encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt, const struct AVFrame *frame, int *got_packet_ptr)
Encode data to an AVPacket.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
#define FF_THREAD_FRAME
Decode more than one frame at once.
pthread_cond_t finished_task_cond
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void *attribute_align_arg worker(void *v)
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
pthread_mutex_t finished_task_mutex
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
unsigned pthread_init_cnt
av_cold void ff_frame_thread_encoder_free(AVCodecContext *avctx)
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
int av_opt_copy(void *dst, const void *src)
Copy options from src object into dest object.
This structure stores compressed data.
#define atomic_init(obj, value)
pthread_cond_t task_fifo_cond
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
#define pthread_mutex_lock(a)