在虚拟机上yuv420可以正常显示 ,而945(D525)模块上却无法显示 ,后来验证了directdraw的yuv420也无法显示 ,由此怀疑显卡不支持 ,后把420转换为422显示。

420显示如下:

/*
编译命令:arm-linux-gcc -o show2642 264showyuv2.c -I/usr/local/ffmpeg_arm/include/   -L/usr/local/ffmpeg_arm/lib/ -lswresample -lavformat -lavutil -lavcodec -lswscale -lx264   libSDL.agcc -o test test.c -I/usr/local/ffmpeg/include/   -L/usr/local/ffmpeg/lib/ -lswresample -lavformat -lavutil -lavcodec -lswscale  -lx264 -lSDL */
#include "stdio.h"
#include "stdlib.h"#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/timestamp.h"
#include "libavutil/bprint.h"
#include "libavutil/time.h"
#include "libavutil/threadmessage.h"
#include "SDL/SDL.h"//#include "libavfilter/avcodec.h"
#include "libavcodec/avcodec.h"#if HAVE_SYS_RESOURCE_H
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
#elif HAVE_GETPROCESSTIMES
#include <windows.h>
#endif
#if HAVE_GETPROCESSMEMORYINFO
#include <windows.h>
#include <psapi.h>
#endif#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif#if HAVE_TERMIOS_H
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <termios.h>
#elif HAVE_KBHIT
#include <conio.h>
#endif#if HAVE_PTHREADS
#include <pthread.h>
#endif#include <time.h>#include "libavutil/avassert.h"#define MAX_LEN  1024 * 50此方法参考官网的例子
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,FILE *f)
{//  FILE *f;int i;// f = fopen(filename,"w");// fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);for (i = 0; i < ysize; i++);//   fwrite(buf + i * wrap, 1, xsize, f);//  fclose(f);
}int main()
{//下面初始化h264解码库//avcodec_init();int w = 720;int h = 576,retu;SDL_Rect rect;av_register_all();AVFrame *pFrame_ = NULL;/* find the video encoder */AVCodec *videoCodec = avcodec_find_decoder(AV_CODEC_ID_H264);//得到264的解码器类if(!videoCodec){printf("avcodec_find_decoder error\n");return -1;}AVCodecParserContext *avParserContext = av_parser_init(AV_CODEC_ID_H264);//得到解析帧类,主要用于后面的帧头查找if(!avParserContext){printf("av_parser_init  error\n");return -1;}AVCodecContext *codec_ = avcodec_alloc_context3(videoCodec);//解码会话层if(!codec_){printf("avcodec_alloc_context3  error\n");return -1;}//初始化参数,下面的参数应该由具体的业务决定codec_->time_base.num = 1;codec_->frame_number = 1; //每包一个视频帧codec_->codec_type = AVMEDIA_TYPE_VIDEO;codec_->bit_rate = 0;codec_->time_base.den = 25;//帧率codec_->width = 720;//视频宽codec_->height = 576;//视频高if(avcodec_open2(codec_, videoCodec, NULL) >= 0)//打开解码器{pFrame_ = av_frame_alloc();// Allocate video frame    成功打开解码器后, 此时可以分配帧内存, 当然你也可以在后面每次都分配、释放, 在此我省功夫, 只在开始分配一次if (!pFrame_) {fprintf(stderr, "Could not allocate video frame\n");exit(1);}}else{printf("avcodec_open2 error\n");return -1;}AVPacket packet = {0};int dwBufsize = 10;int frameFinished = dwBufsize;//这个是随便填入数字,没什么作用av_init_packet(&packet);packet.data = NULL;//这里填入一个指向完整H264数据帧的指针packet.size = 0;//这个填入H264数据帧的大小FILE *myH264 = fopen("1.264", "rb");//解码的文件264if(myH264 == NULL){perror("cant open 264 file\n");return -1;}FILE *yuvfile = fopen("my264.yuv", "wb");//成功解码后保存成的YUV文件, 可以用YUV工具打开浏览if(yuvfile == NULL){perror("cant open YUV file\n");return -1;}int readFileLen = 1;char readBuf[MAX_LEN];unsigned char *parseBuf = malloc(20*MAX_LEN);//这个地方浪费了我一个下午时间, 当时我用的是栈内存,即unsigned char parseBuf[20*MAX_LEN], 结果运行程序一直报错, 此处需要用堆内存才能正常解码int  parseBufLen = 0;int frameCount = 0;printf("begin...\n");printf("readBuf address  is %x\n", readBuf);
/SDL initSDL_Surface* hello = NULL;SDL_Surface* screen = NULL;//Start SDL// SDL_Init( SDL_INIT_EVERYTHING );SDL_Init(SDL_INIT_VIDEO);//Set up screenscreen = SDL_SetVideoMode( 1024, 768, 32, SDL_SWSURFACE );SDL_Overlay* overlay = SDL_CreateYUVOverlay(w, h, SDL_YV12_OVERLAY, screen);SDL_LockSurface(screen);SDL_LockYUVOverlay(overlay);
//while(readFileLen > 0)//开始解码工作{//printf("begin...\n");readFileLen = fread(readBuf, 1, sizeof(readBuf), myH264);//首先从文件里读出数据if(readFileLen <= 0){printf("read over\n");break;}else{int handleLen = 0;int handleFileLen = readFileLen;while(handleFileLen > 0){int nLength = av_parser_parse2(avParserContext, codec_, &parseBuf, &parseBufLen, readBuf + handleLen, handleFileLen, 0, 0, 0);//查找264帧头handleFileLen -= nLength;handleLen += nLength;if(parseBufLen <= 0)//当parseBufLen大于0时,说明查找到了帧头{continue;}packet.size = parseBufLen;//将查找到的帧长度送入packet.data = parseBuf;//将查找到的帧内存送入if(frameCount>100)break;//printf("parseBuf address is %x\n", parseBuf);while(packet.size > 0){//下面开始真正的解码int decodeLen = avcodec_decode_video2(codec_, pFrame_, &frameFinished, &packet);if(decodeLen < 0)break;packet.size -= decodeLen;packet.data += decodeLen;if(frameFinished > 0)//成功解码{int picSize = codec_->height * codec_->width;//int newSize = picSize * 1.5;//申请内存//unsigned char *buf = malloc(newSize);int height = pFrame_->height;int width = pFrame_->width;//printf("OK, get data\n");//printf("Frame height is %d\n", height);//printf("Frame width is %d\n", width);frameCount ++;printf("Frame count is %d\n", frameCount);pgm_save(pFrame_->data[0], pFrame_->linesize[0],//保存Ycodec_->width, codec_->height, yuvfile);pgm_save(pFrame_->data[1], pFrame_->linesize[1],//保存Ucodec_->width/2, codec_->height/2, yuvfile);pgm_save(pFrame_->data[2], pFrame_->linesize[2],//保存Vcodec_->width/2, codec_->height/2, yuvfile);///有了YUV数据, 后面可以用FFMPEG提供的转换方法,将其转成RGB数据,进行后续的显示或其它的图像处理工作sdlint i;for(i=0;i<576;i++){//fwrite(buf + i * wrap, 1, xsize, f);memcpy(overlay->pixels[0]+i*1280, pFrame_->data[0]+i*pFrame_->linesize[0], 720);                               }for(i=0;i<288;i++){memcpy(overlay->pixels[2]+i*640, pFrame_->data[1]+i*pFrame_->linesize[1], 360);memcpy(overlay->pixels[1]+i*640, pFrame_->data[2]+i*pFrame_->linesize[2], 360);                                                                      }SDL_UnlockYUVOverlay(overlay);SDL_UnlockSurface(screen);rect.w = w;rect.h = h;rect.x = rect.y = 0;SDL_DisplayYUVOverlay(overlay, &rect);//sdlSDL_Delay(40);}elseprintf("failed to decodec\n");}}}}//释放工作avcodec_close(codec_);av_free(codec_);av_free_packet(&packet);av_frame_free(&pFrame_);//SDLSDL_FreeYUVOverlay(overlay);SDL_FreeSurface(screen);//Quit SDLSDL_Quit();fclose(yuvfile);fclose(myH264);}

422显示如下:

/*
编译命令:arm-linux-gcc -o show2642 264showyuv2.c -I/usr/local/ffmpeg_arm/include/   -L/usr/local/ffmpeg_arm/lib/ -lswresample -lavformat -lavutil -lavcodec -lswscale -lx264   libSDL.agcc -o test test.c -I/usr/local/ffmpeg/include/   -L/usr/local/ffmpeg/lib/ -lswresample -lavformat -lavutil -lavcodec -lswscale  -lx264 -lSDL */
#include "stdio.h"
#include "stdlib.h"#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/timestamp.h"
#include "libavutil/bprint.h"
#include "libavutil/time.h"
#include "libavutil/threadmessage.h"
#include "SDL/SDL.h"//#include "libavfilter/avcodec.h"
#include "libavcodec/avcodec.h"#if HAVE_SYS_RESOURCE_H
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
#elif HAVE_GETPROCESSTIMES
#include <windows.h>
#endif
#if HAVE_GETPROCESSMEMORYINFO
#include <windows.h>
#include <psapi.h>
#endif#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif#if HAVE_TERMIOS_H
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <termios.h>
#elif HAVE_KBHIT
#include <conio.h>
#endif#if HAVE_PTHREADS
#include <pthread.h>
#endif#include <time.h>#include "libavutil/avassert.h"#define MAX_LEN  1024 * 50此方法参考官网的例子此方法参考官网的例子
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,FILE *f)
{//  FILE *f;int i;// f = fopen(filename,"w");// fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);for (i = 0; i < ysize; i++);// fwrite(buf + i * wrap, 1, xsize, f);//  fclose(f);
}int main()
{//下面初始化h264解码库//avcodec_init();int w = 720;int h = 576,retu;SDL_Rect rect;av_register_all();AVFrame *pFrame_ = NULL;/* find the video encoder */AVCodec *videoCodec = avcodec_find_decoder(AV_CODEC_ID_H264);//得到264的解码器类if(!videoCodec){printf("avcodec_find_decoder error\n");return -1;}AVCodecParserContext *avParserContext = av_parser_init(AV_CODEC_ID_H264);//得到解析帧类,主要用于后面的帧头查找if(!avParserContext){printf("av_parser_init  error\n");return -1;}AVCodecContext *codec_ = avcodec_alloc_context3(videoCodec);//解码会话层if(!codec_){printf("avcodec_alloc_context3  error\n");return -1;}//初始化参数,下面的参数应该由具体的业务决定codec_->time_base.num = 1;codec_->frame_number = 1; //每包一个视频帧codec_->codec_type = AVMEDIA_TYPE_VIDEO;codec_->bit_rate = 0;codec_->time_base.den = 25;//帧率codec_->width = 720;//视频宽codec_->height = 576;//视频高if(avcodec_open2(codec_, videoCodec, NULL) >= 0)//打开解码器{pFrame_ = av_frame_alloc();// Allocate video frame    成功打开解码器后, 此时可以分配帧内存, 当然你也可以在后面每次都分配、释放, 在此我省功夫, 只在开始分配一次if (!pFrame_) {fprintf(stderr, "Could not allocate video frame\n");exit(1);}}else{printf("avcodec_open2 error\n");return -1;}AVPacket packet = {0};int dwBufsize = 10;int frameFinished = dwBufsize;//这个是随便填入数字,没什么作用av_init_packet(&packet);packet.data = NULL;//这里填入一个指向完整H264数据帧的指针packet.size = 0;//这个填入H264数据帧的大小FILE *myH264 = fopen("1.264", "rb");//解码的文件264if(myH264 == NULL){perror("cant open 264 file\n");return -1;}FILE *yuvfile = fopen("my264.yuv", "wb");//成功解码后保存成的YUV文件, 可以用YUV工具打开浏览if(yuvfile == NULL){perror("cant open YUV file\n");return -1;}int readFileLen = 1;char readBuf[MAX_LEN];unsigned char *parseBuf = malloc(20*MAX_LEN);//这个地方浪费了我一个下午时间, 当时我用的是栈内存,即unsigned char parseBuf[20*MAX_LEN], 结果运行程序一直报错, 此处需要用堆内存才能正常解码int  parseBufLen = 0;int frameCount = 0;printf("begin...\n");printf("readBuf address  is %x\n", readBuf);
/SDL initSDL_Surface* hello = NULL;SDL_Surface* screen = NULL;//Start SDL// SDL_Init( SDL_INIT_EVERYTHING );SDL_Init(SDL_INIT_VIDEO);//Set up screenscreen = SDL_SetVideoMode( 720, 576, 32, SDL_SWSURFACE );SDL_Overlay* overlay = SDL_CreateYUVOverlay(w, h, SDL_YUY2_OVERLAY, screen);SDL_LockSurface(screen);SDL_LockYUVOverlay(overlay);
unsigned char yuv422[768*576*2];
//while(readFileLen > 0)//开始解码工作{//printf("begin...\n");readFileLen = fread(readBuf, 1, sizeof(readBuf), myH264);//首先从文件里读出数据if(readFileLen <= 0){printf("read over\n");break;}else{int handleLen = 0;int handleFileLen = readFileLen;while(handleFileLen > 0){int nLength = av_parser_parse2(avParserContext, codec_, &parseBuf, &parseBufLen, readBuf + handleLen, handleFileLen, 0, 0, 0);//查找264帧头handleFileLen -= nLength;handleLen += nLength;if(parseBufLen <= 0)//当parseBufLen大于0时,说明查找到了帧头{continue;}packet.size = parseBufLen;//将查找到的帧长度送入packet.data = parseBuf;//将查找到的帧内存送入if(frameCount>100)break;//printf("parseBuf address is %x\n", parseBuf);while(packet.size > 0){//下面开始真正的解码int decodeLen = avcodec_decode_video2(codec_, pFrame_, &frameFinished, &packet);//if(decodeLen < 0)break;packet.size -= decodeLen;packet.data += decodeLen;if(frameFinished > 0)//成功解码{int picSize = codec_->height * codec_->width;//int newSize = picSize * 1.5;//申请内存//unsigned char *buf = malloc(newSize);int height = pFrame_->height;int width = pFrame_->width;//printf("OK, get data\n");//printf("Frame height is %d\n", height);//printf("Frame width is %d\n", width);frameCount ++;printf("Frame count is %d\n", frameCount);pgm_save(pFrame_->data[0], pFrame_->linesize[0],//保存Ycodec_->width, codec_->height, yuvfile);pgm_save(pFrame_->data[1], pFrame_->linesize[1],//保存Ucodec_->width/2, codec_->height/2, yuvfile);pgm_save(pFrame_->data[2], pFrame_->linesize[2],//保存Vcodec_->width/2, codec_->height/2, yuvfile);///有了YUV数据, 后面可以用FFMPEG提供的转换方法,将其转成RGB数据,进行后续的显示或其它的图像处理工作sdlint i;/*     for(i=0;i<576;i++){//fwrite(buf + i * wrap, 1, xsize, f);memcpy(overlay->pixels[0]+i*720, pFrame_->data[0]+i*pFrame_->linesize[0], 720);                               }for(i=0;i<288;i++){memcpy(overlay->pixels[2]+i*360, pFrame_->data[1]+i*pFrame_->linesize[1], 360);memcpy(overlay->pixels[1]+i*360, pFrame_->data[2]+i*pFrame_->linesize[2], 360);                                                                      }*/int k=0,y,x;   //yuv420  -> yuv422for( y=0;y<576;y++){for( x=0;x<720;x++){yuv422[k++] = pFrame_->data[0][y*pFrame_->linesize[0]+x];yuv422[k++] = x%2==0?pFrame_->data[1][(y/2)*pFrame_->linesize[1]+x/2]:pFrame_->data[2][(y/2)*pFrame_->linesize[2]+x/2];}}memcpy(overlay->pixels[0],yuv422, codec_->width*codec_->height*2);SDL_UnlockYUVOverlay(overlay);SDL_UnlockSurface(screen);rect.w = w;rect.h = h;rect.x = rect.y = 0;SDL_DisplayYUVOverlay(overlay, &rect);//sdlSDL_Delay(40);}elseprintf("failed to decodec\n");}}}}//释放工作avcodec_close(codec_);av_free(codec_);av_free_packet(&packet);av_frame_free(&pFrame_);//SDLSDL_FreeYUVOverlay(overlay);SDL_FreeSurface(screen);//Quit SDLSDL_Quit();fclose(yuvfile);fclose(myH264);}

采用sws_scale 实现的数据转换

/*
编译命令:arm-linux-gcc -o show2642 264showyuv2.c -I/usr/local/ffmpeg_arm/include/   -L/usr/local/ffmpeg_arm/lib/ -lswresample -lavformat -lavutil -lavcodec -lswscale -lx264   libSDL.agcc -o test test.c -I/usr/local/ffmpeg/include/   -L/usr/local/ffmpeg/lib/ -lswresample -lavformat -lavutil -lavcodec -lswscale  -lx264 -lSDL */
#include "stdio.h"
#include "stdlib.h"#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/timestamp.h"
#include "libavutil/bprint.h"
#include "libavutil/time.h"
#include "libavutil/threadmessage.h"
#include "SDL/SDL.h"//#include "libavfilter/avcodec.h"
#include "libavcodec/avcodec.h"#if HAVE_SYS_RESOURCE_H
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
#elif HAVE_GETPROCESSTIMES
#include <windows.h>
#endif
#if HAVE_GETPROCESSMEMORYINFO
#include <windows.h>
#include <psapi.h>
#endif#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif#if HAVE_TERMIOS_H
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <termios.h>
#elif HAVE_KBHIT
#include <conio.h>
#endif#if HAVE_PTHREADS
#include <pthread.h>
#endif#include <time.h>#include "libavutil/avassert.h"#define MAX_LEN  1024 * 50此方法参考官网的例子
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,FILE *f)
{//  FILE *f;int i;// f = fopen(filename,"w");// fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);for (i = 0; i < ysize; i++);//   fwrite(buf + i * wrap, 1, xsize, f);//  fclose(f);
}int main()
{//下面初始化h264解码库//avcodec_init();int w = 720;int h = 576,retu;SDL_Rect rect;av_register_all();AVFrame *pFrame_ = NULL,*pFrameYUV;struct SwsContext *img_convert_ctx =NULL;pFrameYUV =av_frame_alloc();/* find the video encoder */AVCodec *videoCodec = avcodec_find_decoder(AV_CODEC_ID_H264);//得到264的解码器类if(!videoCodec){printf("avcodec_find_decoder error\n");return -1;}AVCodecParserContext *avParserContext = av_parser_init(AV_CODEC_ID_H264);//得到解析帧类,主要用于后面的帧头查找if(!avParserContext){printf("av_parser_init  error\n");return -1;}AVCodecContext *codec_ = avcodec_alloc_context3(videoCodec);//解码会话层if(!codec_){printf("avcodec_alloc_context3  error\n");return -1;}//初始化参数,下面的参数应该由具体的业务决定codec_->time_base.num = 1;codec_->frame_number = 1; //每包一个视频帧codec_->codec_type = AVMEDIA_TYPE_VIDEO;codec_->bit_rate = 0;codec_->time_base.den = 25;//帧率codec_->width = 720;//视频宽codec_->height = 576;//视频高if(avcodec_open2(codec_, videoCodec, NULL) >= 0)//打开解码器{pFrame_ = av_frame_alloc();// Allocate video frame    成功打开解码器后, 此时可以分配帧内存, 当然你也可以在后面每次都分配、释放, 在此我省功夫, 只在开始分配一次if (!pFrame_) {fprintf(stderr, "Could not allocate video frame\n");exit(1);}}else{printf("avcodec_open2 error\n");return -1;}AVPacket packet = {0};int dwBufsize = 10;int frameFinished = dwBufsize;//这个是随便填入数字,没什么作用av_init_packet(&packet);packet.data = NULL;//这里填入一个指向完整H264数据帧的指针packet.size = 0;//这个填入H264数据帧的大小FILE *myH264 = fopen("1.264", "rb");//解码的文件264if(myH264 == NULL){perror("cant open 264 file\n");return -1;}FILE *yuvfile = fopen("my264.yuv", "wb");//成功解码后保存成的YUV文件, 可以用YUV工具打开浏览if(yuvfile == NULL){perror("cant open YUV file\n");return -1;}int readFileLen = 1;char readBuf[MAX_LEN];unsigned char *parseBuf = malloc(20*MAX_LEN);//这个地方浪费了我一个下午时间, 当时我用的是栈内存,即unsigned char parseBuf[20*MAX_LEN], 结果运行程序一直报错, 此处需要用堆内存才能正常解码int  parseBufLen = 0;int frameCount = 0;printf("begin...\n");printf("readBuf address  is %x\n", readBuf);
/SDL initSDL_Surface* hello = NULL;SDL_Surface* screen = NULL;//Start SDL// SDL_Init( SDL_INIT_EVERYTHING );SDL_Init(SDL_INIT_VIDEO);//Set up screenscreen = SDL_SetVideoMode( 720, 576, 32, SDL_SWSURFACE );SDL_Overlay* overlay = SDL_CreateYUVOverlay(w, h, SDL_YUY2_OVERLAY, screen);SDL_LockSurface(screen);SDL_LockYUVOverlay(overlay);
//
//int numBytes = avpicture_get_size(AV_PIX_FMT_YUYV422, codec_->width,  codec_->height);  uint8_t* yuv422 = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));  avpicture_fill((AVPicture *)pFrameYUV, yuv422, AV_PIX_FMT_YUYV422,  codec_->width, codec_->height);
///while(readFileLen > 0)//开始解码工作{//printf("begin...\n");readFileLen = fread(readBuf, 1, sizeof(readBuf), myH264);//首先从文件里读出数据if(readFileLen <= 0){printf("read over\n");break;}else{int handleLen = 0;int handleFileLen = readFileLen;while(handleFileLen > 0){int nLength = av_parser_parse2(avParserContext, codec_, &parseBuf, &parseBufLen, readBuf + handleLen, handleFileLen, 0, 0, 0);//查找264帧头handleFileLen -= nLength;handleLen += nLength;if(parseBufLen <= 0)//当parseBufLen大于0时,说明查找到了帧头{continue;}packet.size = parseBufLen;//将查找到的帧长度送入packet.data = parseBuf;//将查找到的帧内存送入if(frameCount>100)break;//printf("parseBuf address is %x\n", parseBuf);while(packet.size > 0){//下面开始真正的解码int decodeLen = avcodec_decode_video2(codec_, pFrame_, &frameFinished, &packet);if(decodeLen < 0)break;packet.size -= decodeLen;packet.data += decodeLen;if(frameFinished > 0)//成功解码{int picSize = codec_->height * codec_->width;//int newSize = picSize * 1.5;//申请内存//unsigned char *buf = malloc(newSize);int height = pFrame_->height;int width = pFrame_->width;//printf("OK, get data\n");//printf("Frame height is %d\n", height);//printf("Frame width is %d\n", width);frameCount ++;printf("Frame count is %d\n", frameCount);pgm_save(pFrame_->data[0], pFrame_->linesize[0],//保存Ycodec_->width, codec_->height, yuvfile);pgm_save(pFrame_->data[1], pFrame_->linesize[1],//保存Ucodec_->width/2, codec_->height/2, yuvfile);pgm_save(pFrame_->data[2], pFrame_->linesize[2],//保存Vcodec_->width/2, codec_->height/2, yuvfile);///有了YUV数据, 后面可以用FFMPEG提供的转换方法,将其转成RGB数据,进行后续的显示或其它的图像处理工作sdlint i;/*      for(i=0;i<576;i++){//fwrite(buf + i * wrap, 1, xsize, f);memcpy(overlay->pixels[0]+i*720, pFrame_->data[0]+i*pFrame_->linesize[0], 720);                               }for(i=0;i<288;i++){memcpy(overlay->pixels[2]+i*360, pFrame_->data[1]+i*pFrame_->linesize[1], 360);memcpy(overlay->pixels[1]+i*360, pFrame_->data[2]+i*pFrame_->linesize[2], 360);                                                                      }*/img_convert_ctx = sws_getContext(codec_->width, codec_->height, codec_->pix_fmt, codec_->width, codec_->height, AV_PIX_FMT_YUYV422, 2, NULL, NULL, NULL);   sws_scale(img_convert_ctx, (const uint8_t* const*) pFrame_->data,  pFrame_->linesize, 0, codec_->height, pFrameYUV->data,pFrameYUV->linesize);  memcpy(overlay->pixels[0],yuv422, codec_->width*codec_->height*2);SDL_UnlockYUVOverlay(overlay);SDL_UnlockSurface(screen);rect.w = w;rect.h = h;rect.x = rect.y = 0;SDL_DisplayYUVOverlay(overlay, &rect);//sdlSDL_Delay(40);}elseprintf("failed to decodec\n");}}}}//释放工作avcodec_close(codec_);av_free(codec_);av_free_packet(&packet);av_frame_free(&pFrame_);//SDLSDL_FreeYUVOverlay(overlay);SDL_FreeSurface(screen);//Quit SDLSDL_Quit();fclose(yuvfile);fclose(myH264);}

转载于:https://www.cnblogs.com/mao0504/p/5589743.html

linux之x86裁剪移植---ffmpeg的H264解码显示(420、422)相关推荐

  1. linux之x86裁剪移植---字符界面sdl开发入门

    linux下有没有TurboC2.0那样的画点.线.圆的图形函数库,有没有grapihcs.h,或者与之相对应或相似的函数库是什么?有没有DirectX这样的游戏开发库?SDL就是其中之一. SDL( ...

  2. linux内核的裁剪和移植实验,实验5 linux内核的裁剪移植.doc

    实验5 linux内核的裁剪移植 一.实验目的: 学习利用menuconfig配置文件进行裁减内核,编译内核并移植到开发板上. 二.实验内容 一.开发环境 宿主机:ubuntu10.04 开发板:ti ...

  3. 嵌入式Linux下LCD应用编程: 调用giflib库解码显示GIF动态图

    一.开发环境介绍 开发板:友善之臂Tiny4412 LCD型号: S702 .分辨率: 800*480 Linux内核版本: Linux 3.5 交叉编译器: arm-linux-gcc 4.5.1 ...

  4. [QT+FFMPEG]使用QT自带的MinGW编译器编译FFMPEG生成LIB库(H264解码)

    [一]软件运行环境: 操作系统:win10 QT版本:qt-opensource-windows-x86-5.9.1.exe(MinGW32 5.3.0版本) 编译工具:msys2-x86_64-20 ...

  5. FFMPEG编译裁剪移植

    FFMPEG 编译裁剪移植 author:lyn date:2022.09.26 version: ffmpeg4.1.3 1.ffmpeg文件结构说明 2.ffmpeg交叉编译 3.ffmpeg移植 ...

  6. ffmpeg编译裁剪移植到arm-linux

    源码裁剪参考: ffmpeg源码裁剪移植到arm-linux ffmpeg可以通过配置不同的编译选项来裁剪ffmpeg本身的大小,下面我基于2.1.3版本来说明. 至于支持哪些编译选项,可输入 ./c ...

  7. 基于ARM的嵌入式linux 内核的裁剪与移植

    0 引言     微处理器的产生为价格低廉.结构小巧的CPU和外设的连 接提供了稳定可靠的硬件架构,这样,限制嵌入式系统发展的瓶颈就突出表现在了软件方面.尽管从八十年代末开始,已经陆续出现了一些嵌入式 ...

  8. Linux内核的裁剪和移植

    linux内核的裁剪和移植具体都在这个网址里面. https://blog.csdn.net/xie0812/article/details/10816059 https://blog.csdn.ne ...

  9. Hisi3516交叉编译ffmpeg支持h264编码

    前言 Hi3516是海思半导体针对高清IPCamera产品应用开发的一款专业高端SOC芯片,具有1080P@30fps H264多码流编码性能.而ffmpeg是众多播放器的底层解.编码库,x264是一 ...

最新文章

  1. DF-SLAM:一种深度特征提取方法
  2. Linux文件句柄限制总结
  3. python变量命名可以有特殊符号吗,和孩子一起学习python之变量命名规则
  4. linux配ipv6 ipv4 双栈,RouterOS配置原生IPv6(电信IPv4/IPv6双栈)
  5. 和 8 个程序员聊了一下午,集齐了这些经验!
  6. windows环境中JDK环境变量配置
  7. 我的电脑已经禁ping,应该如何启用该功能?
  8. 山西计算机职业学校排名2015,2015山西专科学校排名及排行榜
  9. [HAOI2006] 聪明的猴子
  10. axacropdf 服务器pdf_C#显示PDF文件
  11. 斐讯k2p梅林忘记密码恢复出厂之后的解决办法
  12. 批量压缩pdf文件大小,pdf批量压缩步骤
  13. Word和Excel怎样进入安全模式?
  14. 180902 逆向-网鼎(4-dalao)
  15. 最全app上传渠道入口 拿去即用
  16. 为什么自学前端容易失败?数千名新手程序员得出结论
  17. java第三方类库Guava开源组件使用
  18. “微信之父”张小龙首次演讲实录:详解微信平台四大价值观
  19. 提高写作能力与表达能力
  20. ffmpeg 解码本地无封装裸音频流 AAC MP3 复制代码就可以运行

热门文章

  1. 数据仓库建设中的数据建模方法(转)
  2. Java - 文件(IO流)
  3. [CareerCup] 9.6 Generate Parentheses 生成括号
  4. Tab标签页接口---使用Intent对象
  5. 使用SharePoint 2010新增的文档集内容类型来管理文档
  6. 体验最火的敏捷——SCRUM(厦门,2014.1.4)
  7. 搭建struts2框架
  8. 关于控件postback 后viewstate加载失败的问题
  9. 1000以内完数c语言程序_C语言经典面试题目及答案详解(二)
  10. Eigen(3)矩阵Matrix及其简单操作