整个工程代码下载地址
http://download.csdn.net/download/gongluck93/10175326
Code
代码语言:javascript复制//#define WIN32_LEAN_AND_MEAN// 从 Windows 头中排除极少使用的资料
#include <WinSock2.h>
#include <Windows.h>
#include <stdio.h>
#include <queue>
#include "VzLPRClientSDK.h"
using namespace std;
// ffmpeg
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodecavcodec.h"
#include "libavformatavformat.h"
#include "libswscaleswscale.h"
#include "libavutilerror.h"
#include "libswresampleswresample.h"
#ifdef __cplusplus
}
#endif
#pragma comment( lib, "libgcc.a")
#pragma comment( lib, "libmingwex.a")
#pragma comment( lib, "libcoldname.a")
#pragma comment( lib, "libavcodec.a")
#pragma comment( lib, "libavformat.a")
#pragma comment( lib, "libavutil.a")
#pragma comment( lib, "libswscale.a")
#pragma comment( lib, "libz.a")
#pragma comment( lib, "libfaac.a")
#pragma comment( lib, "libgsm.a")
#pragma comment( lib, "libmp3lame.a")
#pragma comment( lib, "libogg.a")
#pragma comment( lib, "libspeex.a")
#pragma comment( lib, "libtheora.a")
#pragma comment( lib, "libvorbis.a")
#pragma comment( lib, "libvorbisenc.a")
#pragma comment( lib, "libx264.a")
#pragma comment( lib, "xvidcore.a")
#pragma comment( lib, "libswresample.a")
#pragma comment(lib, "VzLPRSDK.lib")
#pragma comment(lib, "WS2_32.lib")
#define SAFEFREE(ptr, func) if(ptr != NULL) { func(ptr); ptr = NULL; }
#define STOPNUM 150
#define IO_BUFFER_SIZE (32768)
typedef struct st_package
{
unsigned char* buf;
unsigned int len;
unsigned int pre;//记录已经被读取的数据长度
}package;
queue<package*> g_que_pkg;
static FILE* g_fp_out = fopen("test.h264", "wb ");
static FILE* g_fp_H264 = fopen("save.h264", "wb ");
static FILE* g_fp_yuv = fopen("save.yuv", "wb ");
static AVPacket g_packet;
static AVFrame* g_pFra = avcodec_alloc_frame();
static AVCodec* g_pCodec;
static AVCodecContext* g_pCodecCtx;
static int g_frame_index = 0;
static bool g_stopCB1 = false;
static bool g_stopCB2 = false;
int read_buf(void *opaque, uint8_t *buf, int buf_size)
{
static package* pkg = NULL;
unsigned int len = 0;
//故意不加锁的
if(g_frame_index > STOPNUM)
return -1;
if(pkg == NULL)
{
AGAIN:
if(g_que_pkg.size() == 0)
goto AGAIN;
pkg = g_que_pkg.front();
g_que_pkg.pop();
}
len = pkg->len-pkg->pre > buf_size ? buf_size : pkg->len-pkg->pre;
memcpy(buf, pkg->buf pkg->pre, len);
pkg->pre = len;
if(pkg->pre >= pkg->len)
{
free(pkg->buf);
free(pkg);
pkg = NULL;
}
return len;
}
void __stdcall GetFrameCB(VzLPRClientHandle handle, void *pUserData, const VzYUV420P *pFrame)
{
static int i = 0;
if(g_stopCB1)
return;
//YUV420P ---->> h264
// 方法1:算出一帧数据大小,并用avpicture_fill把buf和frame绑定一起,frame中的linesize也会自动设置
// static int picture_size = avpicture_get_size(g_pCodecCtx->pix_fmt, g_pCodecCtx->width, g_pCodecCtx->height);
// static uint8_t* picture_buf = (uint8_t *)malloc(picture_size);
// g_p_buf = picture_buf;
// avpicture_fill((AVPicture *)g_pFra, picture_buf, g_pCodecCtx->pix_fmt, g_pCodecCtx->width, g_pCodecCtx->height);
//把数据拷贝到frame绑定的内存中(YYYYUV-YYYYUV)
// memcpy(g_pFra->data[0], pFrame->pY, pFrame->widthStepY * pFrame->height);
// memcpy(g_pFra->data[1], pFrame->pU, pFrame->widthStepU * pFrame->height/2);
// memcpy(g_pFra->data[2], pFrame->pV, pFrame->widthStepV * pFrame->height/2);
// 方法2:自己填写每个分道的行数据大小
g_pFra->linesize[0] = g_pCodecCtx->width;
g_pFra->linesize[1] = g_pCodecCtx->width/2;
g_pFra->linesize[2] = g_pCodecCtx->width/2;
//不再自行分配存储YUV数据的内存空间,直接用回调函数已经分配的
g_pFra->data[0] = pFrame->pY;
g_pFra->data[1] = pFrame->pU;
g_pFra->data[2] = pFrame->pV;
////////////////////////////////////
g_pFra->pts = i ;
av_init_packet(&g_packet);
g_packet.data = NULL;
g_packet.size = 0;
int got_output;
int ret = avcodec_encode_video2(g_pCodecCtx, &g_packet, g_pFra, &got_output);
if (ret < 0)
{
printf("function _encode_video2 error !n");
return;
}
if (got_output)
{
printf("Succeed to encode frame: ]tsize:]n",i,g_packet.size);
fwrite(g_packet.data, 1, g_packet.size, g_fp_out);
fwrite(pFrame->pY, pFrame->widthStepY*pFrame->height, 1, g_fp_yuv);
fwrite(pFrame->pU, pFrame->widthStepU*pFrame->height/2, 1, g_fp_yuv);
fwrite(pFrame->pV, pFrame->widthStepV*pFrame->height/2, 1, g_fp_yuv);
}
av_free_packet(&g_packet);
if(g_frame_index >= STOPNUM)
g_stopCB1 = true;
}
void __stdcall GetRealDataCB(VzLPRClientHandle handle, void *pUserData, VZ_LPRC_DATA_TYPE eDataType, const VZ_LPRC_DATA_INFO *pData)
{
static int i = 0;
package* pkg = NULL;
if(g_stopCB2)
return;
switch(eDataType)
{
case VZ_LPRC_DATA_ENC_VIDEO:
printf("Succeed to get a frame: ]tsize:] K:%dn", i , pData->uDataSize, pData->uIsKeyFrame);
fwrite(pData->pBuffer, pData->uDataSize, 1, g_fp_H264);
pkg = (package*)malloc(sizeof(package));
pkg->buf = (uint8_t*)malloc(pData->uDataSize);
memcpy(pkg->buf, pData->pBuffer, pData->uDataSize);
pkg->len = pData->uDataSize;
pkg->pre = 0;
g_que_pkg.push(pkg);
break;
default:
break;
}
if(g_frame_index >= STOPNUM)
g_stopCB2 = true;
}
int main()
{
if(VzLPRClient_Setup() == -1)
{
printf("function _Setup error !n");
return -1;
}
av_register_all();
g_pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
g_pCodecCtx = avcodec_alloc_context3(g_pCodec);
g_pCodecCtx->bit_rate = 4000000;
g_pCodecCtx->width = 1280;
g_pCodecCtx->height = 720;
g_pCodecCtx->time_base.num = 1;
g_pCodecCtx->time_base.den = 10;
g_pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
avcodec_open2(g_pCodecCtx, g_pCodec, NULL);
g_pFra->format = g_pCodecCtx->pix_fmt;
g_pFra->width = g_pCodecCtx->width;
g_pFra->height = g_pCodecCtx->height;
VzLPRClientHandle hClient = VzLPRClient_Open("192.168.3.87", 80, "admin", "admin");
int framerate;
VzLPRClient_GetVideoFrameRate(hClient, &framerate);
VzLPRClient_SetVideoFrameCallBack(hClient, GetFrameCB, NULL);
VzLPRClient_SetRealDataCallBack(hClient, GetRealDataCB, NULL);
AVFormatContext* ifmt_ctx=avformat_alloc_context();//用于内存输入格式上下文
//输入流是一个内存buf
unsigned char* inbuffer=(unsigned char*)malloc(IO_BUFFER_SIZE);
AVIOContext *avio_in =avio_alloc_context(inbuffer, IO_BUFFER_SIZE,0,NULL,read_buf,NULL,NULL);
ifmt_ctx->pb=avio_in;
AVInputFormat* ifmt_v = av_find_input_format("h264");
avformat_open_input(&ifmt_ctx, NULL, ifmt_v, NULL);
avformat_find_stream_info(ifmt_ctx, NULL);//这里已经开始读入输入流了,调用read_buf
AVFormatContext* ofmt_ctx = NULL;//输出
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, "test.flv");
AVOutputFormat* ofmt = ofmt_ctx->oformat;
AVStream* out_stream = avformat_new_stream(ofmt_ctx, NULL);
AVStream* in_stream = ifmt_ctx->streams[0];
avcodec_copy_context(out_stream->codec, in_stream->codec);
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
av_dump_format(ofmt_ctx, 0, "test.flv", 1);
if (!(ofmt->flags & AVFMT_NOFILE))
{
if (avio_open(&ofmt_ctx->pb, "test.flv", AVIO_FLAG_WRITE) < 0)
{
printf("Could not open output file '%s'", "test.flv");
}
}
avformat_write_header(ofmt_ctx, NULL);
AVPacket pkt;
AVStream *inin_stream;
while(av_read_frame(ifmt_ctx, &pkt) >= 0)
{
//假设输入流有多个
inin_stream = ifmt_ctx->streams[pkt.stream_index];
if (pkt.stream_index == in_stream->index)
{
if (pkt.pts == AV_NOPTS_VALUE)//H264没有时间戳
{
//由于裸流里本来就没有可靠的pts和timebase等数据,所以pts的计算依靠设置的framerate
double calc_duration = (double)1/framerate;
pkt.pts = (double)(g_frame_index*calc_duration) / (av_q2d(out_stream->time_base));
pkt.dts = pkt.pts;
pkt.duration = calc_duration / av_q2d(out_stream->time_base);
g_frame_index ;
}
}
pkt.pos = -1;
pkt.stream_index = out_stream->index;
printf("Write 1 Packet. size:]tpts:%lldn", pkt.size, pkt.pts);
if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0)
{
printf("Error muxing packetn");
break;
}
av_free_packet(&pkt);
}
av_write_trailer(ofmt_ctx);
while(!g_stopCB1 || !g_stopCB2)
Sleep(100);
SAFEFREE(ofmt_ctx, avformat_free_context);
av_close_input_file(ifmt_ctx);
SAFEFREE(avio_in, av_free);
SAFEFREE(inbuffer, free);
SAFEFREE(g_pCodecCtx, av_free);
SAFEFREE(g_pFra, av_free);
SAFEFREE(g_fp_H264, fclose);
SAFEFREE(g_fp_out, fclose);
SAFEFREE(g_fp_yuv, fclose);
VzLPRClient_Cleanup();
//system("pause");
}
由于这个只是测试的例子,所以是一步贯穿的做法,以后把视频流推送到服务器的话,要使用多线程。