NDK--音视频同步实现原生播放流媒体

2020-11-13 14:53:34 浏览数 (1)

之前实现了FFmpeg解码视频流并原生绘制到屏幕上以及解码音频流并利用OpenSL进行音频播放,今天来将两者联合使用,实现真正的视频播放。
思路:如果想要顺畅的播放视频,很显然视频流和音频流需要同时进行播放,即两个线程分别播放视频流和音频流,而解码需要放一个单独线程中作为生产者,不断为视频线程和音频线程提供每一帧的数据,按照这个思路,我们开始编写相应代码
1.首先需要一个线程不断读取每一帧数据

自定义SurfaceView类

代码语言:javascript复制
package com.aruba.ffmpegsyncapplication;

import android.content.Context;
import android.graphics.PixelFormat;
import android.util.AttributeSet;
import android.view.Surface;
import android.view.SurfaceHolder;
import android.view.SurfaceView;

/**
 * Created by aruba on 2020/6/30.
 */
public class FFmpegPlayView extends SurfaceView implements SurfaceHolder.Callback {
    static {
        System.loadLibrary("native-lib");
    }

    public FFmpegPlayView(Context context) {
        this(context, null);
    }

    public FFmpegPlayView(Context context, AttributeSet attrs) {
        this(context, attrs, 0);
    }

    public FFmpegPlayView(Context context, AttributeSet attrs, int defStyleAttr) {
        super(context, attrs, defStyleAttr);
        init();
    }

    private void init() {
        setKeepScreenOn(true);
        //设置一个像素占4字节
        getHolder().setFormat(PixelFormat.RGBA_8888);

        getHolder().addCallback(this);
    }

    /**
     * 开始播放
     */
    public synchronized void play(String path) {
        stop();

        //调用natvie方法开始解码
        render(path);
    }

    public synchronized void stop() {
        if (isPlaying()) {
            //调用natvie方法停止播放
            stopPlay();
        }
    }

    public native boolean isPlaying();

    @Override
    protected void onDetachedFromWindow() {
        super.onDetachedFromWindow();
        stop();
    }

    @Override
    public void surfaceCreated(SurfaceHolder surfaceHolder) {

    }

    @Override
    public void surfaceChanged(SurfaceHolder surfaceHolder, int i, int i1, int i2) {
        display(surfaceHolder.getSurface());
    }

    @Override
    public void surfaceDestroyed(SurfaceHolder surfaceHolder) {

    }

    /**
     * 设置surface,用于native层原生绘制
     *
     * @param surface
     */
    public native void display(Surface surface);

    /**
     * 开始播放
     *
     * @param filePath
     * @return
     */
    public native int render(String filePath);

    public native void stopPlay();
}

下面是布局文件,一个开始按钮,一个停止按钮,一个自定义的SurfaceView

代码语言:javascript复制
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
    xmlns:app="http://schemas.android.com/apk/res-auto"
    xmlns:tools="http://schemas.android.com/tools"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    android:orientation="vertical"
    tools:context=".MainActivity">

    <Button
        android:layout_width="match_parent"
        android:layout_height="wrap_content"
        android:onClick="play"
        android:text="播放" />

    <Button
        android:layout_width="match_parent"
        android:layout_height="wrap_content"
        android:onClick="stop"
        android:text="停止" />

    <com.aruba.ffmpegsyncapplication.FFmpegPlayView
        android:id="@ id/sv_video"
        android:layout_width="match_parent"
        android:layout_height="0dp"
        android:layout_weight="1" />

</LinearLayout>

下面是Activity的代码,非常简单

代码语言:javascript复制
package com.aruba.ffmpegsyncapplication;

import android.os.Bundle;
import android.view.View;

import androidx.appcompat.app.AppCompatActivity;

public class MainActivity extends AppCompatActivity {
    private FFmpegPlayView sv_video;
    
    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        sv_video = findViewById(R.id.sv_video);
    }

    public void play(View view) {
        sv_video.play("http://ivi.bupt.edu.cn/hls/jstv.m3u8");
    }

    public void stop(View view) {
    }
    
}
有了以上的基础,现在开始进入正题:natvie代码

首先我们设想在FFmpegPlayView的render方法中只进行视频的读取,并将读取到的每一帧数据传递给另外两个线程解码,所以可以先定义一个解码的基类来接收视频的数据

代码语言:javascript复制
//
// Created by aruba on 2020/10/21.
//

#ifndef FFMPEGSYNCAPPLICATION_FFMPEG_DECODER_H
#define FFMPEGSYNCAPPLICATION_FFMPEG_DECODER_H

#include "queue"
#include "pthread.h"
#include <android/log.h>

extern "C" {
//编码
#include "libavcodec/avcodec.h"
//封装格式处理
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
//像素处理
#include "libswscale/swscale.h"
#include "_opensl_helper.h"
}

#define  LOG_TAG_FFMPEG_DECODER    "_ffmpeg_decoder"

class FFmpegDecoder {
public:
    //是否正在播放
    bool isPlay;
    //流索引
    int stream_idx;
    //解码器
    AVCodecContext *codecContext;
    //数据队列
    std::queue<AVPacket *> queue;

    //处理线程
    pthread_t p_playid;
    //同步锁
    pthread_mutex_t mutex;
    //条件变量
    pthread_cond_t cond;

    FFmpegDecoder();

    //设置解码器
    void setAvCodecContext(AVCodecContext *codecContext);

    //从队列中获取一帧数据(消费者)
    int get(AVPacket *packet);

    //将一帧数据放入队列(生产者)
    int put(AVPacket *packet);

    void stop();

    //派生类需要实现的方法,用于实现play方法
    virtual void play() = 0;

    ~FFmpegDecoder();

protected:
    //开启播放线程,子类调用
    void startPlay(void *play(void *), void *arg);
};


#endif //FFMPEGSYNCAPPLICATION_FFMPEG_DECODER_H

这边用到了线程同步,put方法为生产者,get方法为消费者

代码语言:javascript复制
//
// Created by aruba on 2020/10/21.
//

#include "_ffmpeg_decoder.h"

FFmpegDecoder::FFmpegDecoder() {
    pthread_mutex_init(&mutex, NULL);
    pthread_cond_init(&cond, NULL);
}

//设置解码器
void FFmpegDecoder::setAvCodecContext(AVCodecContext *codecContext) {
    this->codecContext = codecContext;
}

//从队列中获取一帧数据(消费者)
int FFmpegDecoder::get(AVPacket *packet) {
    pthread_mutex_lock(&mutex);

    while (isPlay) {
        if (queue.empty()) {//队列为空,则等待生成者生产
            pthread_cond_wait(&cond, &mutex);
        } else {
            //从队列取出一个packet   clone一个 给入参对象
            if (av_packet_ref(packet, queue.front())) {
                //clone失败
                break;
            }

            //clone成功,取出Packet并释放内存
            AVPacket *pkt = queue.front();
            queue.pop();
            av_free_packet(pkt);
        }
    }

    pthread_mutex_unlock(&mutex);
}

//将一帧数据放入队列(生产者)
int FFmpegDecoder::put(AVPacket *packet) {
    AVPacket *avPacketClone = (AVPacket *) (av_malloc(sizeof(AVPacket)));
    if (av_packet_ref(avPacketClone, packet)) {
        //克隆失败
        return 0;
    }

    __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_DECODER, "压入一帧数据 %d", stream_idx);
    pthread_mutex_lock(&mutex);
    queue.push(avPacketClone);
    //给消费者解锁
    pthread_cond_signal(&cond);
    pthread_mutex_unlock(&mutex);
    return 1;
}

//开启播放线程
void FFmpegDecoder::startPlay(void *play(void *), void *arg) {
    isPlay = true;
    pthread_create(&this->p_playid, NULL, play, arg);
}

void FFmpegDecoder::stop() {
    isPlay = false;
}

FFmpegDecoder::~FFmpegDecoder() {
    avcodec_close(codecContext);
    codecContext = NULL;
    pthread_cond_destroy(&cond);
    pthread_mutex_destroy(&mutex);
}
解码的基类基本完成后,我们定义两个派生类:FFmpegVideoDecoder和FFmpegAudioDecoder,分别用于解码视频和音频。
先来完成视频数据的读取,在render方法开启线程不断读取音频流和视频流,以提供给两个解码线程。
代码语言:javascript复制
#include <jni.h>
#include <string>
#include <unistd.h>
#include <android/log.h>
#include <android/native_window_jni.h>
#include "_ffmpeg_video_decoder.h"
#include "_ffmpeg_audio_decoder.h"

extern "C" {
//编码
#include "libavcodec/avcodec.h"
//封装格式处理
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
//像素处理
#include "libswscale/swscale.h"
#include "_opensl_helper.h"
}

#define  LOG_TAG    "aruba"
#define  LOGE(...)  __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)

//ffmpeg上下文
AVFormatContext *formatContext;
const char *filePath;
//开始播放
bool is_playing;
//线程是否运行
bool is_running;
FFmpegVideoDecoder *videoDecoder;
FFmpegAudioDecoder *audioDecoder;

//获取帧数据线程
pthread_t pid;

//获取帧数据线程
void *getPacket(void *arg) {
    //注册FFmpeg中各大组件
    av_register_all();

    //网络视频需要加上这句代码
    avformat_network_init();

    //打开文件
    formatContext = avformat_alloc_context();
    if (avformat_open_input(&formatContext, filePath, NULL, NULL) != 0) {
        LOGE("打开失败");
        avformat_free_context(formatContext);
        is_playing = false;
        pthread_exit(0);
    }

    //将文件信息填充进AVFormatContext
    if (avformat_find_stream_info(formatContext, NULL) < 0) {
        LOGE("获取文件信息失败");
        avformat_free_context(formatContext);
        is_playing = false;
        pthread_exit(0);
    }

    //获取视频流的编解码器上下文
    AVCodecContext *videoCodecContext = NULL;
    AVCodecContext *audioCodecContext = NULL;
    int vidio_stream_idx = -1;
    int audio_stream_idx = -1;
    for (int i = 0; i < formatContext->nb_streams;   i) {
        if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {//如果是视频流
            videoCodecContext = formatContext->streams[i]->codec;
            vidio_stream_idx = i;
        } else if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {//如果是音频流
            audioCodecContext = formatContext->streams[i]->codec;
            audio_stream_idx = i;
        }
    }

    if (videoCodecContext == NULL || audioCodecContext == NULL) {
        LOGE("获取编解码器上下文失败");
        avformat_free_context(formatContext);
        is_playing = false;
        pthread_exit(0);
    }

    //根据编解码器上下文的id获取视频流解码器
    AVCodec *videoCodec = avcodec_find_decoder(videoCodecContext->codec_id);
    //打开解码器
    if (avcodec_open2(videoCodecContext, videoCodec, NULL) < 0) {
        LOGE("打开视频解码失败");
        avformat_free_context(formatContext);
        is_playing = false;
        pthread_exit(0);
    }

    //根据编解码器上下文的id获取音频流解码器
    AVCodec *audioCodec = avcodec_find_decoder(videoCodecContext->codec_id);
    //打开解码器
    if (avcodec_open2(videoCodecContext, audioCodec, NULL) < 0) {
        LOGE("打开音频解码失败");
        avformat_free_context(formatContext);
        is_playing = false;
        pthread_exit(0);
    }

    videoDecoder->stream_idx = vidio_stream_idx;
    videoDecoder->setAvCodecContext(videoCodecContext);
    audioDecoder->stream_idx = audio_stream_idx;
    audioDecoder->setAvCodecContext(audioCodecContext);

    //存放压缩数据
    AVPacket *pkt = (AVPacket *) (av_malloc(sizeof(AVPacket)));
    av_init_packet(pkt);

    while (av_read_frame(formatContext, pkt) == 0 && is_running) {//读到每一帧的压缩数据存放在AVPacket
        if (videoDecoder && videoDecoder->isPlay &&
            pkt->stream_index == videoDecoder->stream_idx) {//说明是视频流
            videoDecoder->put(pkt);
        } else if (audioDecoder && audioDecoder->isPlay &&
                   pkt->stream_index == audioDecoder->stream_idx) {
            audioDecoder->put(pkt);
        }
        av_packet_unref(pkt);
    }

    //停止解码
    LOGE("停止解码");
    videoDecoder->stop();
    audioDecoder->stop();
    LOGE("释放解码线程内存");
    delete (videoDecoder);
    delete (audioDecoder);
    videoDecoder = NULL;
    audioDecoder = NULL;
    //释放
    LOGE("释放packet");
    av_free_packet(pkt);
    pkt = NULL;
    LOGE("释放ffmpeg上下文");
    avformat_free_context(formatContext);
    formatContext = NULL;

    is_playing = false;
    pthread_exit(0);
};

extern "C"
JNIEXPORT jint JNICALL
Java_com_aruba_ffmpegsyncapplication_FFmpegPlayView_render(JNIEnv *env, jobject instance,
                                                           jstring filePath_) {
    filePath = env->GetStringUTFChars(filePath_, 0);
    env->ReleaseStringUTFChars(filePath_, filePath);

    videoDecoder = new FFmpegVideoDecoder;
    audioDecoder = new FFmpegAudioDecoder;

    //开始读每一帧
    is_playing = true;
    is_running = true;
    pthread_create(&pid, NULL, getPacket, NULL);
    videoDecoder->play();
    audioDecoder->play();
    return 0;
}

extern "C"
JNIEXPORT void JNICALL
Java_com_aruba_ffmpegsyncapplication_FFmpegPlayView_display(JNIEnv *env, jobject instance,
                                                            jobject surface) {

}

extern "C"
JNIEXPORT void JNICALL
Java_com_aruba_ffmpegsyncapplication_FFmpegPlayView_stopPlay(JNIEnv *env, jobject instance) {

    is_running = false;
    pthread_join(pid, 0);//等待真正的停止
}

extern "C"
JNIEXPORT jboolean JNICALL
Java_com_aruba_ffmpegsyncapplication_FFmpegPlayView_isPlaying(JNIEnv *env, jobject instance) {
    return is_playing;
}

运行效果:

2.开启视频解码线程和音频解码线程
首先需要了解的是:人的听觉比视觉要灵敏,之前音频流解码时提到,人的听觉在20hz-20khz,所以音频一般1秒内采集44100次,而视频流解码时提到,人的视觉在1秒内只能分别60帧。
音频的播放和视频的播放是两个不同的线程,音频的延后或者视频的延后都会降低观看视频的体验,为了观看视频时没有违和感,我们需要做线程同步。由于人的听觉比视觉要灵敏,所以我们以音频为准,做视频流的同步, 当偏移在-90ms(音频滞后于视频)到 20ms(音频超前视频)之间人感觉不到试听质量的变化,这个区域可以认为是同步区域
通过之前OpenSL的使用,喇叭会自动调用回调函数,我们在回调中不断给缓冲区填充数据来实现音频的播放,这时我们记录当前音频帧的时间用于视频帧的同步,来加快或减慢视频流线程的延迟时间(之前播放视频流时,我们每帧都固定sleep了16ms),到达音视频同步
所以音频流的播放和之前差不多,之前我们自定义了一个子类FFmpegAudioDecoder,现在先来实现音频解码和播放
代码语言:javascript复制
//
// Created by aruba on 2020/10/21.
//

#ifndef FFMPEGSYNCAPPLICATION_FFMPEG_ADUIO_DECODER_H
#define FFMPEGSYNCAPPLICATION_FFMPEG_ADUIO_DECODER_H

#include "_ffmpeg_decoder.h"

#define  LOG_TAG_FFMPEG_AUDIO_DECODER    "_ffmpeg_audio_decoder"

class FFmpegAudioDecoder : public FFmpegDecoder {
public:
    OpenslHelper helper;

    uint8_t *out;
    int buff_size;
    AVPacket *avPacket;
    AVFrame *picture;
    SwrContext *swrContext;
    int channel_count;
    int out_size;

    //开始播放
    void play();

    //释放资源
    void release();

    //停止播放
    void stop();
};


#endif //FFMPEGSYNCAPPLICATION_FFMPEG_ADUIO_DECODER_H
代码语言:javascript复制
//
// Created by aruba on 2020/10/21.
//

#include "_ffmpeg_audio_decoder.h"

/**
   * 播放器会不断调用此函数,我们需要在此回调中不断给缓冲区填充数据
   * @param bufferQueueItf 
   * @param pContext 
   */
void playerCallback(SLAndroidSimpleBufferQueueItf bq, void *pContext) {
    __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                        "准备播放");
    //准备播放
    FFmpegAudioDecoder *audioDecoder = (FFmpegAudioDecoder *) (pContext);

    __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                        "helper.playState");
    if (audioDecoder->helper.playState == SL_PLAYSTATE_PLAYING) {
        __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                            "开始解码");
        //解码
        int picture_ptr = 0;

        while (audioDecoder->isPlay) {
            __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                                "准备取出一个packet");
            audioDecoder->get(audioDecoder->avPacket);//从队列获取压缩数据
            __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                                "取出一个packet");
            int ret = avcodec_decode_audio4(audioDecoder->codecContext, audioDecoder->picture,
                                            &picture_ptr,
                                            audioDecoder->avPacket);
            if (ret < 0) {
                __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                                    "Error while decoding. len = %d",
                                    ret);
            }

            if (picture_ptr) {
                //转码
                swr_convert(audioDecoder->swrContext, &audioDecoder->out, audioDecoder->out_size,
                            (const uint8_t **) (audioDecoder->picture->data),
                            audioDecoder->picture->nb_samples);

                //缓冲区真实大小
                audioDecoder->buff_size = av_samples_get_buffer_size(NULL,
                                                                     audioDecoder->channel_count,
                                                                     audioDecoder->picture->nb_samples,
                                                                     AV_SAMPLE_FMT_S16, 1);
                break;
            }
        }

        //播放
        if (audioDecoder->out != NULL && audioDecoder->buff_size != 0 && audioDecoder->isPlay) {
            (*bq)->Enqueue(bq, audioDecoder->out, (SLuint32) (audioDecoder->buff_size));
        }
        __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                            "播放%d", picture_ptr);
    } else if (audioDecoder->helper.playState == SL_PLAYSTATE_STOPPED || !audioDecoder->isPlay) {
        __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                            "停止播放");
        audioDecoder->release();
        audioDecoder->helper.~OpenslHelper();
        delete (audioDecoder);
    }
}

//解码音频数据线程
void *decodeAudio(void *arg) {
    FFmpegAudioDecoder *audioDecoder = (FFmpegAudioDecoder *) (arg);

    //初始化opensl
    SLresult result = audioDecoder->helper.createEngine();
    if (!audioDecoder->helper.isSuccess(result)) {
        __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER, "createEngine失败");
        audioDecoder->isPlay = false;
        pthread_exit(0);
    }
    result = audioDecoder->helper.createMix();
    if (!audioDecoder->helper.isSuccess(result)) {
        __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER, "createMix失败");
        audioDecoder->isPlay = false;
        pthread_exit(0);
    }

    //创建播放器
    result = audioDecoder->helper.createPlayer(2, SL_SAMPLINGRATE_44_1, SL_PCMSAMPLEFORMAT_FIXED_16,
                                               SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT);
    if (!audioDecoder->helper.isSuccess(result)) {
        __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER, "createPlayer失败");
        audioDecoder->isPlay = false;
        pthread_exit(0);
    }

    //初始化解码参数
    audioDecoder->avPacket = (AVPacket *) (av_malloc(sizeof(AVPacket)));
    av_init_packet(audioDecoder->avPacket);
    //存放解压数据
    audioDecoder->picture = av_frame_alloc();
    //音频转码组件上下文
    audioDecoder->swrContext = swr_alloc();
    //AV_CH_LAYOUT_STEREO:双声道  AV_SAMPLE_FMT_S16:量化格式 16位 codecContext->sample_rate:采样率 Hz
    swr_alloc_set_opts(audioDecoder->swrContext, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16,
                       audioDecoder->codecContext->sample_rate,//输出采样率和输入采样率应相同
                       audioDecoder->codecContext->channel_layout,
                       audioDecoder->codecContext->sample_fmt,
                       audioDecoder->codecContext->sample_rate, 0, NULL
    );
    swr_init(audioDecoder->swrContext);

    //原音频通道数
    audioDecoder->channel_count = av_get_channel_layout_nb_channels(
            AV_CH_LAYOUT_STEREO);
    //单通道最大存放转码数据 所占字节 = 采样率*量化格式 / 8
    audioDecoder->out_size = 44100 * 16 / 8;
    audioDecoder->out = (uint8_t *) (av_malloc(audioDecoder->out_size));

    //开始真正的播放音频
    audioDecoder->helper.registerCallback(playerCallback, audioDecoder);
    audioDecoder->helper.play();
    playerCallback(audioDecoder->helper.bufferQueueItf, audioDecoder);

    //退出线程,交给opensl调用回调
    pthread_exit(0);
}

//开始播放
void FFmpegAudioDecoder::play() {
    //开启线程
    startPlay(decodeAudio, this);
}

//停止播放
void FFmpegAudioDecoder::stop() {
    //调用基类方法
    FFmpegDecoder::stop();
    //停止播放
    helper.stop();
    helper.~OpenslHelper();
    release();
    delete (this);
}

//释放资源
void FFmpegAudioDecoder::release() {
    __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                        "释放音频资源");
    //释放资源
    av_packet_unref(avPacket);
    av_free_packet(avPacket);
    av_free(out);
    swr_free(&swrContext);
    av_frame_free(&picture);
    avcodec_close(codecContext);
}
使用我们之前封装的OpenslHelper类,在playerCallback回调方法中不断的取出每一帧数据进行解码,转码后交由OpenSL播放
接下来实现视频解码线程代码
代码语言:javascript复制
//
// Created by aruba on 2020/10/21.
//

#ifndef FFMPEGSYNCAPPLICATION_FFMPEG_VIDEO_DECODER_H
#define FFMPEGSYNCAPPLICATION_FFMPEG_VIDEO_DECODER_H

#include <android/native_window.h>
#include <android/native_window_jni.h>
#include "_ffmpeg_decoder.h"
#include <unistd.h>

#define  LOG_TAG_FFMPEG_VIDEO_DECODER    "_ffmpeg_video_decoder"

class FFmpegVideoDecoder : public FFmpegDecoder {
public:
    //视频缓存区 
    ANativeWindow_Buffer out_buff;
    //存放压缩数据
    AVPacket *pkt;
    //存放解压数据
    AVFrame *picture;
    //存放转码数据
    AVFrame *picture_rgb;
    //转码数据分配的内存
    uint8_t *data_size;
    //转码组件上下文
    SwsContext *swsContext;
    //底层绘制对象
    ANativeWindow *aNativeWindow;
    
public:
    //初始化window
    void initWindow(ANativeWindow *aNativeWindow);

    //设置缓冲区
    void setWindowBuffer();

    //开始播放
    void play();

    //释放资源
    void release();
};


#endif //FFMPEGSYNCAPPLICATION_FFMPEG_VIDEO_DECODER_H
代码语言:javascript复制
//
// Created by aruba on 2020/10/21.
//

#include "_ffmpeg_video_decoder.h"

//解码视频数据线程
void *decodeVideo(void *arg) {
    FFmpegVideoDecoder *videoDecoder = (FFmpegVideoDecoder *) (arg);
    //初始化
    //存放压缩数据
    videoDecoder->pkt = (AVPacket *) (av_malloc(sizeof(AVPacket)));
    av_init_packet(videoDecoder->pkt);

    //存放解压数据
    videoDecoder->picture = av_frame_alloc();

    //存放转码数据
    videoDecoder->picture_rgb = av_frame_alloc();
    //为转码数据分配内存
    videoDecoder->data_size = (uint8_t *) (av_malloc(
            (size_t) avpicture_get_size(AV_PIX_FMT_RGBA, videoDecoder->codecContext->width,
                                        videoDecoder->codecContext->height)));
    avpicture_fill((AVPicture *) videoDecoder->picture_rgb, videoDecoder->data_size,
                   AV_PIX_FMT_RGBA,
                   videoDecoder->codecContext->width,
                   videoDecoder->codecContext->height);

    //为Window配置长宽和像素编码
    videoDecoder->setWindowBuffer();

    //转码组件上下文,前三个参数为原视频的宽高和编码,后三个为转码后的视频宽高和编码,还可以传入过滤器对视频做处理,这边不做处理
    videoDecoder->swsContext = sws_getContext(videoDecoder->codecContext->width,
                                              videoDecoder->codecContext->height,
                                              videoDecoder->codecContext->pix_fmt,
                                              videoDecoder->codecContext->width,
                                              videoDecoder->codecContext->height,
                                              AV_PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL
    );


    //开始解码
    int picture_ptr = 0;
    while (videoDecoder->isPlay) {
        videoDecoder->get(videoDecoder->pkt);//从队列获取压缩数据
        //解码
        avcodec_decode_video2(videoDecoder->codecContext, videoDecoder->picture, &picture_ptr,
                              videoDecoder->pkt);

        if (picture_ptr > 0) {
            //转码 data中存放着真实数据,linesize为一行的数据,0为转码起始位置,高度为整个画面高
            sws_scale(videoDecoder->swsContext, videoDecoder->picture->data,
                      videoDecoder->picture->linesize, 0, videoDecoder->picture->height,
                      videoDecoder->picture_rgb->data, videoDecoder->picture_rgb->linesize);
            //锁定
            ANativeWindow_lock(videoDecoder->aNativeWindow, &videoDecoder->out_buff, NULL);

            //将转码后的frame(picture_rgb)中的每一行数据复制到window的视频缓冲区(out_buff)的每一行
            //picture_rgb中二维数据的首地址
            uint8_t *frame_data_p = videoDecoder->picture_rgb->data[0];
            //视频缓存区中二维数据的首地址
            uint8_t *buff_data_p = (uint8_t *) (videoDecoder->out_buff.bits);
            //视频缓存区有多少个字节 RGBA8888占4个字节
            int destStride = videoDecoder->out_buff.stride * 4;
            for (int i = 0; i < videoDecoder->codecContext->height; i  ) {
                memcpy(buff_data_p, frame_data_p, videoDecoder->picture_rgb->linesize[0]);
                buff_data_p  = destStride;
                frame_data_p  = videoDecoder->picture_rgb->linesize[0];
            }

            ANativeWindow_unlockAndPost(videoDecoder->aNativeWindow);
            //16ms播放一帧
            usleep(16 * 1000);
        }
    }

    videoDecoder->release();
    //内存释放
    delete (videoDecoder);
    pthread_exit(0);
}

//设置缓冲区
void FFmpegVideoDecoder::setWindowBuffer() {
    if (codecContext && aNativeWindow) {
        ANativeWindow_setBuffersGeometry(aNativeWindow, codecContext->width,
                                         codecContext->height,
                                         WINDOW_FORMAT_RGBA_8888);
    }
}

void FFmpegVideoDecoder::initWindow(ANativeWindow *aNativeWindow) {
    if (aNativeWindow == NULL) {
        return;
    }

    this->aNativeWindow = aNativeWindow;
    //设置缓冲区
    setWindowBuffer();
}

void FFmpegVideoDecoder::play() {
    //开启线程
    startPlay(decodeVideo, this);
}

//释放资源
void FFmpegVideoDecoder::release() {
    __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_VIDEO_DECODER,
                        "释放视频资源");
    av_packet_unref(pkt);
    av_free_packet(pkt);
    sws_freeContext(swsContext);
    aNativeWindow = NULL;
    av_frame_free(&picture_rgb);
    av_frame_free(&picture);
    free(data_size);
    avcodec_close(codecContext);
}
在native-lib.cpp中将surface传入给视频解码对象
代码语言:javascript复制
extern "C"
JNIEXPORT void JNICALL
Java_com_aruba_ffmpegsyncapplication_FFmpegPlayView_display(JNIEnv *env, jobject instance,
                                                            jobject surface) {
    if (aNativeWindow) {
        ANativeWindow_release(aNativeWindow);
    }

    //获取底层绘制对象
    aNativeWindow = ANativeWindow_fromSurface(env, surface);
    if (videoDecoder) {
        videoDecoder->initWindow(aNativeWindow);
    }
}
结果如下:
到目前为止,我们实现了视频流的播放和音频流的播放,接下来就要解决音视频同步,由于我们播放视频时固定休眠了16ms,随着时间的推移,视频和音频就会出现不同步现象(画面和声音对不上)
之前也提到想要音视频同步,应该从视频流入手,控制休眠时间,AVStream中有一个成员变量为time_base,它代表了一秒中分成几等分,即一秒中有多少帧,解码时通过av_frame_get_best_effort_timestamp方法可以获取到当前第几帧,所以视频帧的时间就可以得到
在解码基类中增加一个变量time_base,来接受对应流的time_base。

native-lib.cpp

代码语言:javascript复制
//获取帧数据线程
void *getPacket(void *arg) {
  ...
    //获取视频流的编解码器上下文
    AVCodecContext *videoCodecContext = NULL;
    AVCodecContext *audioCodecContext = NULL;
    int video_stream_idx = -1;
    int audio_stream_idx = -1;
    //一秒中分成几等分,即一秒中有多少帧
    AVRational video_time_base = NULL;
    AVRational audio_time_base = NULL;
    for (int i = 0; i < formatContext->nb_streams;   i) {
        if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {//如果是视频流
            videoCodecContext = formatContext->streams[i]->codec;
            video_stream_idx = i;
            video_time_base = formatContext->streams[i]->time_base;
        } else if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {//如果是音频流
            audioCodecContext = formatContext->streams[i]->codec;
            audio_stream_idx = i;
            audio_time_base = formatContext->streams[i]->time_base;
        }
    }

 ...

    videoDecoder->stream_idx = video_stream_idx;
    videoDecoder->time_base = video_time_base;
    videoDecoder->setAvCodecContext(videoCodecContextClone);
    audioDecoder->stream_idx = audio_stream_idx;
    audioDecoder->time_base = audio_time_base;
    audioDecoder->setAvCodecContext(audioCodecContextClone);

...
};
接着先算出音频的播放时间,由于音频packet中包含了多帧,所以与视频帧播放时间算法不同,先算出packet中最前面一帧的时间,再加上packet中包含了多帧的时间,才是这个packet播放完后的实际播放时间

_ffmpeg_audio_decoder.cpp

代码语言:javascript复制
/**
   * 播放器会不断调用此函数,我们需要在此回调中不断给缓冲区填充数据
   * @param bufferQueueItf 
   * @param pContext 
   */
void playerCallback(SLAndroidSimpleBufferQueueItf bq, void *pContext) {
...
      while (audioDecoder->isPlay) {
            audioDecoder->get(audioDecoder->avPacket);//从队列获取压缩数据

            //计算当前帧的播放时间
            if (audioDecoder->avPacket->pts != AV_NOPTS_VALUE) {
                clockTemp = av_q2d(audioDecoder->time_base) * audioDecoder->avPacket->pts;
            }

            int ret = avcodec_decode_audio4(audioDecoder->codecContext, audioDecoder->picture,
                                            &picture_ptr,
                                            audioDecoder->avPacket);
            if (ret < 0) {
                __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                                    "Error while decoding. len = %d",
                                    ret);
            }

            if (picture_ptr) {
                //转码
                swr_convert(audioDecoder->swrContext, &audioDecoder->out, audioDecoder->out_size,
                            (const uint8_t **) (audioDecoder->picture->data),
                            audioDecoder->picture->nb_samples);

                //缓冲区真实大小
                audioDecoder->buff_size = av_samples_get_buffer_size(NULL,
                                                                     audioDecoder->channel_count,
                                                                     audioDecoder->picture->nb_samples,
                                                                     AV_SAMPLE_FMT_S16, 1);
                break;
            }
        }

        //修正实际播放时间
        if (audioDecoder->buff_size > 0) {
            //一秒中的数据量:单通道数据量*声道数
            double data_size = audioDecoder->out_size * audioDecoder->channel_count;
            //当前packet中包含了多帧的数据量
            double time = audioDecoder->buff_size / data_size;
            __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                                "多帧的数据长度%d  播放多帧需要的时间%f ", audioDecoder->buff_size, time);
            audioDecoder->clock = clockTemp   time;
            __android_log_print(ANDROID_LOG_ERROR, LOG_TAG_FFMPEG_AUDIO_DECODER,
                                "实际播放时间%f", audioDecoder->clock);
        }
...
}
上面得出音频的播放时间后,用于与视频的播放时间比较,然后修正睡眠的时间,来达到音视频同步。

_ffmpeg_video_decoder.cpp

代码语言:javascript复制
//解码视频数据线程
void *decodeVideo(void *arg) {
    FFmpegVideoDecoder *videoDecoder = (FFmpegVideoDecoder *) (arg);
    //初始化
    //存放压缩数据
    videoDecoder->pkt = (AVPacket *) (av_malloc(sizeof(AVPacket)));
    av_init_packet(videoDecoder->pkt);

    //存放解压数据
    videoDecoder->picture = av_frame_alloc();

    //存放转码数据
    videoDecoder->picture_rgb = av_frame_alloc();
    //为转码数据分配内存
    videoDecoder->data_size = (uint8_t *) (av_malloc(
            (size_t) avpicture_get_size(AV_PIX_FMT_RGBA, videoDecoder->codecContext->width,
                                        videoDecoder->codecContext->height)));
    avpicture_fill((AVPicture *) videoDecoder->picture_rgb, videoDecoder->data_size,
                   AV_PIX_FMT_RGBA,
                   videoDecoder->codecContext->width,
                   videoDecoder->codecContext->height);

    //为Window配置长宽和像素编码
    videoDecoder->setWindowBuffer();

    //转码组件上下文,前三个参数为原视频的宽高和编码,后三个为转码后的视频宽高和编码,还可以传入过滤器对视频做处理,这边不做处理
    videoDecoder->swsContext = sws_getContext(videoDecoder->codecContext->width,
                                              videoDecoder->codecContext->height,
                                              videoDecoder->codecContext->pix_fmt,
                                              videoDecoder->codecContext->width,
                                              videoDecoder->codecContext->height,
                                              AV_PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL
    );


    //开始解码
    int picture_ptr = 0;
    double last_play  //上一帧的播放时间
    , play             //当前帧的播放时间
    , last_delay    // 上一次播放视频的两帧视频间隔时间
    , delay         //两帧视频间隔时间
    , audio_clock //音频轨道 实际播放时间
    , diff   //音频帧与视频帧相差时间
    , sync_threshold
    , start_time  //从第一帧开始的绝对时间 单位:s
    , pts
    , actual_delay//真正需要延迟时间
    ;//两帧间隔合理间隔时间
    start_time = av_gettime() / 1000000.0;

    while (videoDecoder->isPlay) {
        videoDecoder->get(videoDecoder->pkt);//从队列获取压缩数据
        //解码
        avcodec_decode_video2(videoDecoder->codecContext, videoDecoder->picture, &picture_ptr,
                              videoDecoder->pkt);

        if (picture_ptr > 0) {
            //转码 data中存放着真实数据,linesize为一行的数据,0为转码起始位置,高度为整个画面高
            sws_scale(videoDecoder->swsContext, videoDecoder->picture->data,
                      videoDecoder->picture->linesize, 0, videoDecoder->picture->height,
                      videoDecoder->picture_rgb->data, videoDecoder->picture_rgb->linesize);

            if ((pts = av_frame_get_best_effort_timestamp(videoDecoder->picture)) ==
                AV_NOPTS_VALUE) {
                pts = 0;
            }
            //当前播放时间
            play = pts * av_q2d(videoDecoder->time_base);
            //纠正时间
            play = videoDecoder->synchronize(videoDecoder->picture, play);
            delay = play - last_play;
            if (delay <= 0 || delay > 1) {//用上一帧延迟时间修正
                delay = last_delay;
            }
            audio_clock = videoDecoder->audioDecoder->clock;
            last_delay = delay;
            last_play = play;
            //音频与视频的时间差
            diff = videoDecoder->clock - audio_clock;
            //在合理范围外  才会延迟  加快
            sync_threshold = (delay > 0.01 ? 0.01 : delay);

            if (fabs(diff) < 10) {//时间差一般不会大于10s
                if (diff <= -sync_threshold) {//视频播放慢了,则不休眠,直接播放下一帧
                    delay = 0;
                } else if (diff >= sync_threshold) {//视频播放快了,则休眠的久一些
                    delay = 2 * delay;
                }
            }
            start_time  = delay;
            //真正的延迟时间:需要减去上面代码跑的时间
            actual_delay = start_time - av_gettime() / 1000000.0;
            if (actual_delay < 0.01) {
                actual_delay = 0.01;
            }
            //经验算法 6ms
            av_usleep(actual_delay * 1000000.0   6000);

            //=========绘制========
            //锁定
            ANativeWindow_lock(videoDecoder->aNativeWindow, &videoDecoder->out_buff, NULL);

            //将转码后的frame(picture_rgb)中的每一行数据复制到window的视频缓冲区(out_buff)的每一行
            //picture_rgb中二维数据的首地址
            uint8_t *frame_data_p = videoDecoder->picture_rgb->data[0];
            //视频缓存区中二维数据的首地址
            uint8_t *buff_data_p = (uint8_t *) (videoDecoder->out_buff.bits);
            //视频缓存区有多少个字节 RGBA8888占4个字节
            int destStride = videoDecoder->out_buff.stride * 4;
            for (int i = 0; i < videoDecoder->codecContext->height; i  ) {
                memcpy(buff_data_p, frame_data_p, videoDecoder->picture_rgb->linesize[0]);
                buff_data_p  = destStride;
                frame_data_p  = videoDecoder->picture_rgb->linesize[0];
            }

            ANativeWindow_unlockAndPost(videoDecoder->aNativeWindow);
        }
    }

    videoDecoder->release();
    //内存释放
    delete (videoDecoder);
    pthread_exit(0);
}

double FFmpegVideoDecoder::synchronize(AVFrame *frame, double play) {
    //clock是当前播放的时间位置
    if (play != 0)
        clock = play;
    else //pst为0 则先把pts设为上一帧时间
        play = clock;
    //可能有pts为0 则主动增加clock
    //frame->repeat_pict = 当解码时,这张图片需要要延迟多少
    //需要求出扩展延时:
    //extra_delay = repeat_pict / (2*fps) 显示这样图片需要延迟这么久来显示
    double repeat_pict = frame->repeat_pict;
    //使用AvCodecContext的而不是stream的
    double frame_delay = av_q2d(codecContext->time_base);
    //如果time_base是1,25 把1s分成25份,则fps为25
    //fps = 1/(1/25)
    double fps = 1 / frame_delay;
    //pts 加上 这个延迟 是显示时间
    double extra_delay = repeat_pict / (2 * fps);
    double delay = extra_delay   frame_delay;
    //当前播放变为下一帧播放时间
    clock  = delay;
    return play;
}
音视频同步就到此完成了,实际效果如下:
项目地址:https://gitee.com/aruba/ffmpeg-sync-application.git

0 人点赞