IjkMediaPlayer 通过 prepareAsync() 之后去加载数据、解码数据,调用 start() 之后去渲染

stream_open

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat)
{

// ..........
/* start video display */
if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)
goto fail;
if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
goto fail;
if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
goto fail;

if (packet_queue_init(&is->videoq) < 0 ||
packet_queue_init(&is->audioq) < 0 ||
packet_queue_init(&is->subtitleq) < 0)
goto fail;
// ..........
is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");
if (!is->video_refresh_tid) {
av_freep(&ffp->is);
return NULL;
}

is->initialized_decoder = 0;
is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read");
if (!is->read_tid) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
goto fail;
}
}
  • frame_queue_init() 初始化队列,第一个参数是解码之后的队列(FrameQueue),第二个参数是解码之前的队列(PackerQueue)
  • packet_queue_init() 给 PacketQueue 创建互斥锁
  • 创建两个线程
    • ff_vout
    • ff_read

其中 ff_read 为读取线程,调用的是 read_thread() 方法

read_thread

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
/* this thread gets the stream from the disk or the network */
static int read_thread(void *arg)
{

// ....
ic = avformat_alloc_context();
// ....
err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts);
// ....
err = avformat_find_stream_info(ic, opts);
// ....
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
} else {
ffp->av_sync_type = AV_SYNC_VIDEO_MASTER;
is->av_sync_type = ffp->av_sync_type;
}

ret = -1;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]);
}
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;

if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]);
}
// ....
for (;;) {
ret = av_read_frame(ic, pkt);
// ...
if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
packet_queue_put(&is->audioq, pkt);
} else if (pkt->stream_index == is->video_stream && pkt_in_play_range
&& !(is->video_st && (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))) {
packet_queue_put(&is->videoq, pkt);
} else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
packet_queue_put(&is->subtitleq, pkt);
} else {
av_packet_unref(pkt);
}
// ....
}
}

read_thread() 整体流程跟 ffmpeg 类似,数据会在在 for(;;) 中进行读取,通过 packet_queue_put() 塞到队列中;同时在 stream_component_open() 方法中开启另外的线程进行解码

av_read_frame

关于 av_read_frame() 可以参考 ijkplayer框架简析 — av_read_frame

stream_component_open

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
/* open a given stream. Return 0 if OK */
static int stream_component_open(FFPlayer *ffp, int stream_index)
{

// ...
codec = avcodec_find_decoder(avctx->codec_id);

switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = ffp->audio_codec_name; break;
case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = ffp->subtitle_codec_name; break;
case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = ffp->video_codec_name; break;
default: break;
}
if (forced_codec_name)
codec = avcodec_find_decoder_by_name(forced_codec_name);
// ...
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
/* prepare audio output */
if ((ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
goto fail;
// ...
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
// ...
if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
goto out;
SDL_AoutPauseAudio(ffp->aout, 0);
break;
case AVMEDIA_TYPE_VIDEO:
is->video_stream = stream_index;
is->video_st = ic->streams[stream_index];

if (ffp->async_init_decoder) {
while (!is->initialized_decoder) {
SDL_Delay(5);
}
if (ffp->node_vdec) {
is->viddec.avctx = avctx;
ret = ffpipeline_config_video_decoder(ffp->pipeline, ffp);
}
if (ret || !ffp->node_vdec) {
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if (!ffp->node_vdec)
goto fail;
}
} else {
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if (!ffp->node_vdec)
goto fail;
}
if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
goto out;
// ...
}

先通过 avcodec_find_decoder() 来找解码器,然后判断是音频还是视频来进行解码

AVMEDIA_TYPE_AUDIO

audio_open

如果是音频的话,首先调用 audio_open() 方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
static int audio_open(FFPlayer *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
{

FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
SDL_AudioSpec wanted_spec, spec;
const char *env;
static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
#ifdef FFP_MERGE
static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
#endif
static const int next_sample_rates[] = {0, 44100, 48000};
int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;

env = SDL_getenv("SDL_AUDIO_CHANNELS");
if (env) {
wanted_nb_channels = atoi(env);
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
}
if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
}
wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
wanted_spec.channels = wanted_nb_channels;
wanted_spec.freq = wanted_sample_rate;
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
return -1;
}
while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
next_sample_rate_idx--;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.silence = 0;
wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AoutGetAudioPerSecondCallBacks(ffp->aout)));
wanted_spec.callback = sdl_audio_callback;
wanted_spec.userdata = opaque;
while (SDL_AoutOpenAudio(ffp->aout, &wanted_spec, &spec) < 0) {
/* avoid infinity loop on exit. --by bbcallen */
if (is->abort_request)
return -1;
av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
wanted_spec.channels, wanted_spec.freq, SDL_GetError());
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
if (!wanted_spec.channels) {
wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
wanted_spec.channels = wanted_nb_channels;
if (!wanted_spec.freq) {
av_log(NULL, AV_LOG_ERROR,
"No more combinations to try, audio open failed\n");
return -1;
}
}
wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
}
if (spec.format != AUDIO_S16SYS) {
av_log(NULL, AV_LOG_ERROR,
"SDL advised audio format %d is not supported!\n", spec.format);
return -1;
}
if (spec.channels != wanted_spec.channels) {
wanted_channel_layout = av_get_default_channel_layout(spec.channels);
if (!wanted_channel_layout) {
av_log(NULL, AV_LOG_ERROR,
"SDL advised channel count %d is not supported!\n", spec.channels);
return -1;
}
}

audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
audio_hw_params->freq = spec.freq;
audio_hw_params->channel_layout = wanted_channel_layout;
audio_hw_params->channels = spec.channels;
audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
return -1;
}

SDL_AoutSetDefaultLatencySeconds(ffp->aout, ((double)(2 * spec.size)) / audio_hw_params->bytes_per_sec);
return spec.size;
}

在其中调用了 SDL_AoutOpenAudio() 方法

1
2
3
4
5
6
7
int SDL_AoutOpenAudio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{

if (aout && desired && aout->open_audio)
return aout->open_audio(aout, desired, obtained);

return -1;
}

aout->open_audio 指向的是 aout_open_audio() 方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
SDL_Aout *SDL_AoutAndroid_CreateForAudioTrack()
{

SDL_Aout *aout = SDL_Aout_CreateInternal(sizeof(SDL_Aout_Opaque));
if (!aout)
return NULL;

SDL_Aout_Opaque *opaque = aout->opaque;
opaque->wakeup_cond = SDL_CreateCond();
opaque->wakeup_mutex = SDL_CreateMutex();
opaque->speed = 1.0f;

aout->opaque_class = &g_audiotrack_class;
aout->free_l = aout_free_l;
aout->open_audio = aout_open_audio;
aout->pause_audio = aout_pause_audio;
aout->flush_audio = aout_flush_audio;
aout->set_volume = aout_set_volume;
aout->close_audio = aout_close_audio;
aout->func_get_audio_session_id = aout_get_audio_session_id;
aout->func_set_playback_rate = func_set_playback_rate;

return aout;
}

来看在 aout_open_audio() 处理了什么:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
static int aout_open_audio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{

// SDL_Aout_Opaque *opaque = aout->opaque;
JNIEnv *env = NULL;
if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) {
ALOGE("aout_open_audio: AttachCurrentThread: failed");
return -1;
}

return aout_open_audio_n(env, aout, desired, obtained);
}

static int aout_open_audio_n(JNIEnv *env, SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{

assert(desired);
SDL_Aout_Opaque *opaque = aout->opaque;

opaque->spec = *desired;
opaque->atrack = SDL_Android_AudioTrack_new_from_sdl_spec(env, desired);
if (!opaque->atrack) {
ALOGE("aout_open_audio_n: failed to new AudioTrcak()");
return -1;
}

opaque->buffer_size = SDL_Android_AudioTrack_get_min_buffer_size(opaque->atrack);
if (opaque->buffer_size <= 0) {
ALOGE("aout_open_audio_n: failed to getMinBufferSize()");
SDL_Android_AudioTrack_free(env, opaque->atrack);
opaque->atrack = NULL;
return -1;
}

opaque->buffer = malloc(opaque->buffer_size);
if (!opaque->buffer) {
ALOGE("aout_open_audio_n: failed to allocate buffer");
SDL_Android_AudioTrack_free(env, opaque->atrack);
opaque->atrack = NULL;
return -1;
}

if (obtained) {
SDL_Android_AudioTrack_get_target_spec(opaque->atrack, obtained);
SDLTRACE("audio target format fmt:0x%x, channel:0x%x", (int)obtained->format, (int)obtained->channels);
}

opaque->audio_session_id = SDL_Android_AudioTrack_getAudioSessionId(env, opaque->atrack);
ALOGI("audio_session_id = %d\n", opaque->audio_session_id);

opaque->pause_on = 1;
opaque->abort_request = 0;
opaque->audio_tid = SDL_CreateThreadEx(&opaque->_audio_tid, aout_thread, aout, "ff_aout_android");
if (!opaque->audio_tid) {
ALOGE("aout_open_audio_n: failed to create audio thread");
SDL_Android_AudioTrack_free(env, opaque->atrack);
opaque->atrack = NULL;
return -1;
}

return 0;
}

在最终调用的 aout_open_audio_n() 中,输出使用的是 AudioTrack,开启了一个叫 ff_aout_android 的音频播放线程

decoder_start

调用完 audio_open 之后,紧接着调用了 decoder_start 方法

1
2
3
4
5
6
7
8
9
10
static int decoder_start(Decoder *d, int (*fn)(void *), void *arg, const char *name)
{
packet_queue_start(d->queue);
d->decoder_tid = SDL_CreateThreadEx(&d->_decoder_tid, fn, arg, name);
if (!d->decoder_tid) {
av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
return 0;
}

其中参数 const char *name 传的是创建线程的名字 decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec"),这里创建的是音频的解码线程

AVMEDIA_TYPE_VIDEO

ffpipeline_open_video_decoder

视频的流程与音频差不多,调用 ffpipeline_open_video_decoder 打开解码器

1
2
3
4
IJKFF_Pipenode* ffpipeline_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{

return pipeline->func_open_video_decoder(pipeline, ffp);
}

在 ijkplayer 最开始的时候初始化了 IJKFF_Pipenode

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
IjkMediaPlayer *ijkmp_android_create(int(*msg_loop)(void*))
{
IjkMediaPlayer *mp = ijkmp_create(msg_loop);
if (!mp)
goto fail;

mp->ffplayer->vout = SDL_VoutAndroid_CreateForAndroidSurface();
if (!mp->ffplayer->vout)
goto fail;

mp->ffplayer->pipeline = ffpipeline_create_from_android(mp->ffplayer);
if (!mp->ffplayer->pipeline)
goto fail;

ffpipeline_set_vout(mp->ffplayer->pipeline, mp->ffplayer->vout);

return mp;

fail:
ijkmp_dec_ref_p(&mp);
return NULL;
}

所以这里调用到了 ffpipeline_android.c 中的方法

1
2
3
4
5
6
7
8
9
10
11
12
13
static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{

IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
IJKFF_Pipenode *node = NULL;

if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2)
node = ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);
if (!node) {
node = ffpipenode_create_video_decoder_from_ffplay(ffp);
}

return node;
}

从代码可以看出,如果硬解失败的话就转软解

decoder_start

decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec") 同样创建了一个视频的解码线程

  • 软解:从 videoq 队列中获取视频包,解码视频帧放入 pictq 列表中
  • 硬解:从 videoq 队列中获取视频包,推送 MediaCodec 解码,获取解码 outputbuffer index 并存储在 Picts 列表中

read_thread

在循环中不断的向几个队列中put数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
/* this thread gets the stream from the disk or the network */
static int read_thread(void *arg)
{

// ....
for (;;) {
// ....
if (is->audio_stream >= 0) {
packet_queue_flush(&is->audioq);
packet_queue_put(&is->audioq, &flush_pkt);
// TODO: clear invaild audio data
// SDL_AoutFlushAudio(ffp->aout);
}
if (is->subtitle_stream >= 0) {
packet_queue_flush(&is->subtitleq);
packet_queue_put(&is->subtitleq, &flush_pkt);
}
if (is->video_stream >= 0) {
if (ffp->node_vdec) {
ffpipenode_flush(ffp->node_vdec);
}
packet_queue_flush(&is->videoq);
packet_queue_put(&is->videoq, &flush_pkt);
}
// ....
}
}