0%

ffmpeg_sourcecode

ffmpeg 源码简介

ffmpeg源代码中,有很多函数钩子挂载点,类似linux内核,因为ffmpeg也是c语言写得,所以这样实现,有利于接入新类型,即泛型。
也使得代码结构更清晰更容易阅读。

ffmpeg源码结构图

ffmpeg.c分析:

为了了解ffmpeg的框架结构,先看看ffmpeg工具是如何调用ffmpeg中的接口的。

总体:

ffmpeg.c:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
int main(int argc, char **argv)
{
int i, ret;
BenchmarkTimeStamps ti;

init_dynload();//win下设置 dll动态库路径(SetDllDirectory("");),非关键逻辑。

register_exit(ffmpeg_cleanup);//注册或者说挂载退出函数 programe_exit = ffmpeg_cleanup

setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */

av_log_set_flags(AV_LOG_SKIP_REPEATED);//设置log flag,skip repeated messages
parse_loglevel(argc, argv, options);//根据输入参数调整log等级

if(argc>1 && !strcmp(argv[1], "-d")){//是否有-d,则作为后台程序。
run_as_daemon=1;
av_log_set_callback(log_callback_null);
argc--;
argv++;
}
//注册组件
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif
avformat_network_init();//win32下需要

show_banner(argc, argv, options);//显示打印参数。

/* parse options and open all input/output files */
ret = ffmpeg_parse_options(argc, argv);//解析输入参数,包括input,output,--关键函数1
if (ret < 0)
exit_program(1);

if (nb_output_files <= 0 && nb_input_files == 0) {
show_usage();
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
exit_program(1);
}

/* file converter / grab */
if (nb_output_files <= 0) {
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
exit_program(1);
}

for (i = 0; i < nb_output_files; i++) {
if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
want_sdp = 0;
}

current_time = ti = get_benchmark_time_stamps();
if (transcode() < 0)//读取接收的媒体数据,进行处理: 主循环,也就这个循环了。--关键函数2
exit_program(1);
if (do_benchmark) {
int64_t utime, stime, rtime;
current_time = get_benchmark_time_stamps();
utime = current_time.user_usec - ti.user_usec;
stime = current_time.sys_usec - ti.sys_usec;
rtime = current_time.real_usec - ti.real_usec;
av_log(NULL, AV_LOG_INFO,
"bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
}
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
decode_error_stat[0], decode_error_stat[1]);
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
exit_program(69);

exit_program(received_nb_signals ? 255 : main_return_code);
return main_return_code;
}

解析参数,初始化输入输出

ffmpeg_parse_options(argc, argv);//解析输入参数,包括input,output,–关键函数1
分析:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
int ffmpeg_parse_options(int argc, char **argv)
{
OptionParseContext octx;
uint8_t error[128];
int ret;

memset(&octx, 0, sizeof(octx));

/* split the commandline into an internal representation */
ret = split_commandline(&octx, argc, argv, options, groups,
FF_ARRAY_ELEMS(groups));//解析参数和初始化octx
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error splitting the argument list: ");
goto fail;
}

/* apply global options */
ret = parse_optgroup(NULL, &octx.global_opts);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error parsing global options: ");
goto fail;
}

/* configure terminal and setup signal handlers */
term_init();

/* open input files */ //关键函数1_1 初始化输入:
ret = open_files(&octx.groups[GROUP_INFILE], "input", open_input_file);
/*{
ffmpeg_opt.c static int open_input_file(OptionsContext *o, const char *filename)
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
init_input(s, filename, &tmp))
if ((ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options)) < 0)
}
*/
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error opening input files: ");
goto fail;
}

/* create the complex filtergraphs */
ret = init_complex_filters();
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error initializing complex filters.\n");
goto fail;
}

/* open output files */ //关键函数1_2 初始化输出
ret = open_files(&octx.groups[GROUP_OUTFILE], "output", open_output_file);
/*{
ffmpeg_opt.c int open_output_file(OptionsContext *o, const char *filename)
err = avformat_alloc_output_context2(&oc, NULL, o->format, filename);
codec = avcodec_find_encoder_by_name(codec_name);
c = avcodec_alloc_context3(codec);
}
*/
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error opening output files: ");
goto fail;
}

check_filter_outputs();

fail:
uninit_parse_context(&octx);
if (ret < 0) {
av_strerror(ret, error, sizeof(error));
av_log(NULL, AV_LOG_FATAL, "%s\n", error);
}
return ret;
}
初始化输入文件:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
static int open_input_file(OptionsContext *o, const char *filename)
1) 分配AVFormatContext结构;
ic = avformat_alloc_context();
avformat_get_context_defaults(ic);
static void avformat_get_context_defaults(AVFormatContext *s)
{
memset(s, 0, sizeof(AVFormatContext));

s->av_class = &av_format_context_class;

s->io_open = io_open_default;//挂载默认io开启函数,可能是从文件读取或者是url
s->io_close = io_close_default;//默认io关闭函数

av_opt_set_defaults(s);
}
2) /* open the input file with generic avformat function */
//初始化读取io, avformat容器格式相关结构,并分配和初始化 AVFormatContext
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
init_input(s, filename, &tmp))
s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options))
io_open_default
av_probe_input_buffer2
/* Guess file format. */
*fmt = av_probe_input_format2(&pd, 1, &score);

3) avformat_find_stream_info //初始化AVFormatContext中的AVStream **stream,创建stream
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {//解码器相关。 codec = find_probe_decoder(ic, st, st->codecpar->codec_id);去找解码器;
关于如何创建nb_streams:
是被赋值初始化:
{
avformat_find_stream_info
-> read_frame_internal(AVFormatContext *s, AVPacket *pkt)
-> ff_read_packet(s, pkt);
-> err = s->iformat->read_packet(s, pkt);中

//测试时是读取flv文件,所以这里走到:flv_read_packet//因为要先解出头
if (i == s->nb_streams) {
static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE, AVMEDIA_TYPE_DATA};
st = create_stream(s, stream_types[stream_type]);//这里面创建stream,并为nb_stream累加、
if (!st)
return AVERROR(ENOMEM);
}
try_decode_frame(ic, st, pkt,。。) //头的解码?
}
4) 循环每个输入流:
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream = ifmt_ctx->streams[i];
avcodec_find_decoder ////根据id寻找解码器;返回对应的实例;
codec_ctx = avcodec_alloc_context3(dec); ////分配一个AVCodecContext;
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);//填充参数
/* Open decoder */
ret = avcodec_open2(codec_ctx, dec, NULL); //调用AVCodec的init函数进行初始化,注意此时avcodec成员已被赋值的

-----------------------------------------------
{ trace io_open_default函数:
options.c:io_open_default
aviobuf.c: ffio_open_whitelist(pb, url, flags, &s->interrupt_callback, options, s->protocol_whitelist, s->protocol_blacklist);
err = ffurl_open_whitelist(&h, filename, flags, int_cb, options, whitelist, blacklist, NULL);
int ret = ffurl_alloc(puc, filename, flags, int_cb);//根据filename在url_protocol列表中找对应的结构
ret = ffurl_connect(*puc, options);//调用该结构的 初始化函数。
err = ffio_fdopen(s, h);
*s = avio_alloc_context(...//分配AVIOContext
}

初始化输出:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
//输出:
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); //初始化和分配输出相关context,即初始化AVFormatContext中的oformat


//编码:
/* find the mpeg1video encoder */ //可以通过文件名或编码名;
codec = avcodec_find_encoder_by_name(codec_name);

//分配编码上下文:
c = avcodec_alloc_context3(codec);


//编码: /* encode the image */
encode(c, frame, pkt, f); //传入AVFrame和要填充的pkt
ret = avcodec_send_frame(enc_ctx, frame);
while (ret >= 0) {
ret = avcodec_receive_packet(enc_ctx, pkt);



分析主循环:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
if (transcode() < 0)//读取接收的媒体数据,进行处理: 主循环,也就这个循环了。--关键函数2
分析:

读取接收的媒体数据,进行处理: 主循环,也就这个循环了。
transcode
transcode_init
transcode_step(void)
process_input
get_input_packet(ifile, &pkt);
av_read_frame//一次只读一个包,所以需要循环读;
read_frame_internal
ff_read_packet(s, pkt);
{
for (;;) {
PacketList *pktl = s->internal->raw_packet_buffer;
const AVPacket *pkt1;
err = s->iformat->read_packet(s, pkt);
//调用原始的ffurl_read读,所以应该是边读边处理;
//从format中读取数据,会调用其 read_packet函数,
//这个函数在初始化时被赋值为ffurl_read:
//ffio_fdopen中做处理aviobuf.c 填充流的解码相关结构。
}

for (;;) {
PacketList *pktl = s->internal->packet_buffer; //
process_input_packet(ist, NULL, 1);
flush_encoders
term_exit();

分析输入url的结构框架构建:

对应结构:AVIOContext *s, 重要数据结构:
以rtmp输入为例:
可以用ffmpeg做推拉流,但是主要是client端,传入的filename携带 “rtmp” 关键字;
从上面的分析可以看到:
open_input_file的时候:会调用:io_open_default
接着去找一个URLProtocol结构的实例,然后挂载,那么具体是什么样的?
ffmpeg在编译后会生成一个:”libavformat/protocol_list.c” 文件,包含一个URLProtocol类型
的数组:
如:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
static const URLProtocol * const url_protocols[] = {
&ff_async_protocol,
&ff_cache_protocol,
&ff_concat_protocol,
&ff_concatf_protocol,
&ff_crypto_protocol,
&ff_data_protocol,
&ff_ffrtmphttp_protocol,
&ff_file_protocol,
&ff_ftp_protocol,
&ff_gopher_protocol,
&ff_hls_protocol,
&ff_http_protocol,
&ff_httpproxy_protocol,
&ff_icecast_protocol,
&ff_mmsh_protocol,
&ff_mmst_protocol,
&ff_md5_protocol,
&ff_pipe_protocol,
&ff_prompeg_protocol,
&ff_rtmp_protocol,
&ff_rtmpt_protocol,
&ff_rtp_protocol,
&ff_srtp_protocol,
&ff_subfile_protocol,
&ff_tee_protocol,
&ff_tcp_protocol,
&ff_udp_protocol,
&ff_udplite_protocol,
&ff_unix_protocol,
NULL };

看下ff_rtmp_protocol:如何被定义:rtmpproto.c:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#define RTMP_PROTOCOL_0(flavor)
#define RTMP_PROTOCOL_1(flavor) \
static const AVClass flavor##_class = { \
.class_name = #flavor, \
.item_name = av_default_item_name, \
.option = rtmp_options, \
.version = LIBAVUTIL_VERSION_INT, \
}; \
\
const URLProtocol ff_##flavor##_protocol = { \
.name = #flavor, \
.url_open2 = rtmp_open, \
.url_read = rtmp_read, \
.url_read_seek = rtmp_seek, \
.url_read_pause = rtmp_pause, \
.url_write = rtmp_write, \
.url_close = rtmp_close, \
.priv_data_size = sizeof(RTMPContext), \
.flags = URL_PROTOCOL_FLAG_NETWORK, \
.priv_data_class= &flavor##_class, \
};
#define RTMP_PROTOCOL_2(flavor, enabled) \
RTMP_PROTOCOL_ ## enabled(flavor)
#define RTMP_PROTOCOL_3(flavor, config) \
RTMP_PROTOCOL_2(flavor, config)
#define RTMP_PROTOCOL(flavor, uppercase) \
RTMP_PROTOCOL_3(flavor, CONFIG_ ## uppercase ## _PROTOCOL)

RTMP_PROTOCOL(rtmp, RTMP)
RTMP_PROTOCOL(rtmpe, RTMPE)
RTMP_PROTOCOL(rtmps, RTMPS)
RTMP_PROTOCOL(rtmpt, RTMPT)
RTMP_PROTOCOL(rtmpte, RTMPTE)
RTMP_PROTOCOL(rtmpts, RTMPTS)

在protocols.c 支持各种网络协议: rtmp,rtp等等
extern const URLProtocol ff_async_protocol;
extern const URLProtocol ff_bluray_protocol;
extern const URLProtocol ff_cache_protocol;

跟踪如何找到URLProtocol 实例:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
io_open_default->
ffio_open_whitelist->
ffurl_open_whitelist->
ffurl_alloc(puc, filename, flags, int_cb);->
p = url_find_protocol(filename);//从url_protocol数组中返回一组protocol后,匹配.name和filename挑选一个
url_alloc_for_protocol //填充

ffurl_connect
uc->prot->url_open2 ? uc->prot->url_open2(...)
uc->prot->url_open(...)
->int rtmp_open(URLContext *s,
//把s->stream->prot 设置为tcp_protocol 的,这样在发送和接收时都是用的tcp_write和tcp_send
{
else {
/* open the tcp connection */
if (port < 0)
port = RTMP_DEFAULT_PORT;
if (rt->listen)
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port,
"?listen&listen_timeout=%d&tcp_nodelay=%d",
rt->listen_timeout * 1000, rt->tcp_nodelay);
else
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, "?tcp_nodelay=%d", rt->tcp_nodelay);
}
具体连接的图,见上面;

实际运行bt:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
推流到url 或者说输出的场景:
Breakpoint 4, tcp_write (h=0x55555715fc80, buf=0x55555713d0c0 "\002", size=387) at libavformat/tcp.c:253
253 static int tcp_write(URLContext *h, const uint8_t *buf, int size)
(gdb) bt
#0 tcp_write (h=0x55555715fc80, buf=0x55555713d0c0 "\002", size=387) at libavformat/tcp.c:253
#1 tcp_write (h=h@entry=0x55555715fc80, buf=buf@entry=0x55555713d0c0 "\002", size=size@entry=387) at libavformat/tcp.c:253
#2 0x0000555555954baa in retry_transfer_wrapper (transfer_func=0x555555a77f30 <tcp_write>, size_min=387, size=387, buf=0x55555713d0c0 "\002", h=0x55555715fc80) at libavformat/avio.c:370
#3 ffurl_write (h=0x55555715fc80, buf=0x55555713d0c0 "\002", size=387) at libavformat/avio.c:423
#4 0x0000555555ac2bf0 in ff_rtmp_packet_write (h=0x55555715fc80, pkt=0x5555570e13b8, chunk_size=60000, prev_pkt_ptr=<optimized out>, nb_prev_pkt=<optimized out>) at libavformat/rtmppkt.c:388
#5 0x0000555555a4af67 in rtmp_send_packet (track=0, pkt=0x5555570e13b8, rt=0x5555570e1340) at libavformat/rtmpproto.c:3050
#6 rtmp_write (s=s@entry=0x555557163c40, buf=buf@entry=0x5555570cc100 "FLV\001\005", size=size@entry=479) at libavformat/rtmpproto.c:3050
#7 0x0000555555954baa in retry_transfer_wrapper (transfer_func=0x555555a4aca0 <rtmp_write>, size_min=479, size=479, buf=0x5555570cc100 "FLV\001\005", h=0x555557163c40) at libavformat/avio.c:370
#8 ffurl_write (h=0x555557163c40, buf=0x5555570cc100 "FLV\001\005", size=479) at libavformat/avio.c:423
#9 0x00005555559550e7 in writeout (s=s@entry=0x55555713d380, data=<optimized out>, len=479) at libavformat/aviobuf.c:163
#10 0x0000555555955e73 in flush_buffer (s=0x55555713d380) at libavformat/aviobuf.c:184
#11 avio_flush (s=0x55555713d380) at libavformat/aviobuf.c:241
#12 0x00005555559569a0 in avio_write_marker (s=<optimized out>, time=<optimized out>, type=<optimized out>) at libavformat/aviobuf.c:480
#13 0x0000555555a086a8 in flush_if_needed (s=0x555557178c00) at libavformat/mux.c:437
#14 avformat_write_header (s=0x555557178c00, options=<optimized out>) at libavformat/mux.c:487
#15 0x00005555556f5783 in check_init_output_file (of=0x5555571944c0, file_index=0) at fftools/ffmpeg.c:3049
#16 0x00005555556f698d in init_output_stream (ost=<optimized out>, frame=<optimized out>, error=<optimized out>, error_len=1024) at fftools/ffmpeg.c:3724
#17 0x00005555556f8b5d in init_output_stream_wrapper (fatal=0, frame=0x0, ost=0x555557163080) at fftools/ffmpeg.c:992
#18 transcode_init () at fftools/ffmpeg.c:3802
#19 0x00005555556fd46e in transcode () at fftools/ffmpeg.c:4801
#20 0x00005555556d6e6b in main (argc=10, argv=0x7fffffffe108) at fftools/ffmpeg.c:5035

分析读取文件解析容器格式框架:

对应:avformat_open_input ,对应上面调用init_input下子过程
读取文件内容解析成flv或其他容器格式: AVPacket的过程:
对应结构:AVInputFormat
相关结构:猜测容器格式?通过扩展名。 allformats.c 中罗列了所有的扩展名下的容器格式处理结构;

1
2
3
4
5
extern const AVOutputFormat ff_flac_muxer;
extern const AVInputFormat ff_flic_demuxer;
extern const AVInputFormat ff_flv_demuxer;
extern const AVOutputFormat ff_flv_muxer;
。。。。

demuxer_list结构,在编译后生成:libavformat/demuxer_list.c:

1
2
3
4
5
6
7
8
9
10
static const AVInputFormat * const demuxer_list[] = {
&ff_aa_demuxer,
&ff_aac_demuxer,
&ff_aax_demuxer,
&ff_ac3_demuxer,
&ff_ace_demuxer,
...
&ff_flic_demuxer,
&ff_flv_demuxer,
...

相关逻辑深入:

1
2
3
4
5
6
7
8
9
10
11
const AVInputFormat *av_probe_input_format2(const AVProbeData *pd,
int is_opened, int *score_max) //用来匹配扩展名找到对应的处理结构 AVInputFormat,这样在后面调用 av_probe_input_buffer2 中的avio_read 时可以对应到具体格式的函数,填充好格式。
{
int score_ret;
const AVInputFormat *fmt = av_probe_input_format3(pd, is_opened, &score_ret);
if (score_ret > *score_max) {
*score_max = score_ret;
return fmt;
} else
return NULL;
}

寻找解码器的过程:

对应open_input_file的子过程:avformat_find_stream_info
关键结构:AVCodec
allcodecs.c

1
2
3
4
5
extern const AVCodec ff_h264_amf_encoder;
extern const AVCodec ff_h264_cuvid_decoder;
extern const AVCodec ff_aac_mf_encoder;
extern const AVCodec ff_ac3_mf_encoder;
...一堆解码器编码器

codec_list数组,libavcodec/codec_list.c

1
2
3
4
5
6
7
8
static const AVCodec * const codec_list[] = {
&ff_a64multi_encoder,
&ff_a64multi5_encoder,
&ff_alias_pix_encoder,
&ff_amv_encoder,
...
&ff_h264_decoder,
&ff_h264_v4l2m2m_decoder

关键逻辑:

1
2
3
4
codec = find_probe_decoder(ic, st, st->codecpar->codec_id);
codec = find_decoder(s, st, codec_id);
avcodec_find_decoder(codec_id);
从而在codec_list中找到合适的解码器,返回。

一些结构解释:

AVStream 是流的结构,一个流一个实例,其中这个结构中有AVStreamInternal ,其中又AVCodecContext,
即含有编码解码相关结构;

一个场景例子:

输入一个url rtmp/其他的网上的流, 转码,转封装后,推流到网上。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
-re -i source.flv  -c copy -f flv -y  rtmp://localhost:1936/live/livestream

-re (input)
Read input at native frame rate. This is equivalent to setting -readrate 1.
-i input
-c copy
copy all stream
-f fmt (input/output)
Force input or output file format. The format is normally auto detected for input files and guessed from the file extension for output files, so this option is not needed in most cases.

-i url (input)
input file url

-y (global)
Overwrite output files without asking.

关于封装,更多:

封装和头: mux.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
{
int ret = 0;
int already_initialized = s->internal->initialized;
int streams_already_initialized = s->internal->streams_initialized;

if (!already_initialized)
if ((ret = avformat_init_output(s, options)) < 0)
return ret;

if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
if (s->oformat->write_header) {
ret = s->oformat->write_header(s); //找到对应的实例然后调用对应函数
if (ret >= 0 && s->pb && s->pb->error < 0)
ret = s->pb->error;
if (ret < 0)
goto fail;
flush_if_needed(s);
}
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);

if (!s->internal->streams_initialized) {
if ((ret = init_pts(s)) < 0)
goto fail;
}

return streams_already_initialized;

fail:
deinit_muxer(s);
return ret;
}

音频格式中的头怎么封装的? 其实是 AVFormatContext 中的AVFormatContext 成员,
在识别文件名或输入的格式后,去找对应的AVOutputFormat ,预定义的:
如: adtsenc.c:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
const AVOutputFormat ff_adts_muxer = {
.name = "adts",
.long_name = NULL_IF_CONFIG_SMALL("ADTS AAC (Advanced Audio Coding)"),
.mime_type = "audio/aac",
.extensions = "aac,adts",
.priv_data_size = sizeof(ADTSContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_NONE,
.init = adts_init,
.write_header = adts_write_header,
.write_packet = adts_write_packet,
.write_trailer = adts_write_trailer,
.priv_class = &adts_muxer_class,
.flags = AVFMT_NOTIMESTAMPS,
};
adts_write_packet-->
static int adts_write_frame_header(ADTSContext *ctx,
uint8_t *buf, int size, int pce_size)
{
PutBitContext pb;

unsigned full_frame_size = (unsigned)ADTS_HEADER_SIZE + size + pce_size;
if (full_frame_size > ADTS_MAX_FRAME_BYTES) {
av_log(NULL, AV_LOG_ERROR, "ADTS frame size too large: %u (max %d)\n",
full_frame_size, ADTS_MAX_FRAME_BYTES);
return AVERROR_INVALIDDATA;
}

init_put_bits(&pb, buf, ADTS_HEADER_SIZE);

/* adts_fixed_header */
put_bits(&pb, 12, 0xfff); /* syncword */
put_bits(&pb, 1, ctx->mpeg_id); /* ID */
put_bits(&pb, 2, 0); /* layer */
put_bits(&pb, 1, 1); /* protection_absent */
put_bits(&pb, 2, ctx->objecttype); /* profile_objecttype */
put_bits(&pb, 4, ctx->sample_rate_index);
put_bits(&pb, 1, 0); /* private_bit */
put_bits(&pb, 3, ctx->channel_conf); /* channel_configuration */
put_bits(&pb, 1, 0); /* original_copy */
put_bits(&pb, 1, 0); /* home */

/* adts_variable_header */
put_bits(&pb, 1, 0); /* copyright_identification_bit */
put_bits(&pb, 1, 0); /* copyright_identification_start */
put_bits(&pb, 13, full_frame_size); /* aac_frame_length */
put_bits(&pb, 11, 0x7ff); /* adts_buffer_fullness */
put_bits(&pb, 2, 0); /* number_of_raw_data_blocks_in_frame */

flush_put_bits(&pb);

return 0;
}

更多格式:
allformats.c:
还有类似flac等,都可以在这里查到解封装和封装,主要是头,和编解码无关。


编解码更多:

MPEG2编码器对应的AVCodec结构体ff_mpeg2video_encoder:

1
2
3
init() -> encode_init()
encode2() -> ff_mpv_encode_picture()
close() -> ff_mpv_encode_end()

mpeg12enc.c:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
const AVCodec ff_mpeg1video_encoder = {
.name = "mpeg1video",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode2 = ff_mpv_encode_picture,
.close = ff_mpv_encode_end,
.supported_framerates = ff_mpeg12_frame_rate_tab + 1,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE },
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_class = &mpeg1_class,
};

const AVCodec ff_mpeg2video_encoder = {
.name = "mpeg2video",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode2 = ff_mpv_encode_picture,
.close = ff_mpv_encode_end,
.supported_framerates = ff_mpeg2_frame_rate_tab,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_NONE },
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_class = &mpeg2_class,
};

more: 见源码和官网