ffmpeg: demuxing_decode_video.c

pixel format 所支持内容

enum AVPixelFormat {AV_PIX_FMT_NONE = -1,AV_PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)AV_PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 CrAV_PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...AV_PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...AV_PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)AV_PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)AV_PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)AV_PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)AV_PIX_FMT_GRAY8,     ///<        Y        ,  8bppAV_PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsbAV_PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsbAV_PIX_FMT_PAL8,      ///< 8 bits with AV_PIX_FMT_RGB32 paletteAV_PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_rangeAV_PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_rangeAV_PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_rangeAV_PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3AV_PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)AV_PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bitsAV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)AV_PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)AV_PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bitsAV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)AV_PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)AV_PIX_FMT_NV21,      ///< as above, but U and V bytes are swappedAV_PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...AV_PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...AV_PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...AV_PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...AV_PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endianAV_PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endianAV_PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)AV_PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_rangeAV_PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)AV_PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endianAV_PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endianAV_PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endianAV_PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endianAV_PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian   , X=unused/undefinedAV_PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefinedAV_PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endianAV_PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endianAV_PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian   , X=unused/undefinedAV_PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined#if FF_API_VAAPI/** @name Deprecated pixel formats *//**@{*/AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headersAV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headersAV_PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID/**@}*/AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
#else/***  Hardware acceleration through VA-API, data[3] contains a*  VASurfaceID.*/AV_PIX_FMT_VAAPI,
#endifAV_PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endianAV_PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endianAV_PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endianAV_PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endianAV_PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endianAV_PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endianAV_PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointerAV_PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefinedAV_PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian,    X=unused/undefinedAV_PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefinedAV_PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian,    X=unused/undefinedAV_PIX_FMT_YA8,       ///< 8 bits gray, 8 bits alphaAV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8AV_PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endianAV_PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian/*** The following 12 formats have the disadvantage of needing 1 format for each bit depth.* Notice that each 9/10 bits sample is stored in 16 bits with extra padding.* If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.*/AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endianAV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endianAV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endianAV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endianAV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endianAV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endianAV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endianAV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endianAV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endianAV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endianAV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endianAV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endianAV_PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bppAV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRPAV_PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big-endianAV_PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little-endianAV_PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big-endianAV_PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little-endianAV_PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big-endianAV_PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little-endianAV_PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)AV_PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)AV_PIX_FMT_YUVA420P9BE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endianAV_PIX_FMT_YUVA420P9LE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endianAV_PIX_FMT_YUVA422P9BE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endianAV_PIX_FMT_YUVA422P9LE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endianAV_PIX_FMT_YUVA444P9BE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endianAV_PIX_FMT_YUVA444P9LE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endianAV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)AV_PIX_FMT_VDPAU,     ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurfaceAV_PIX_FMT_XYZ12LE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0AV_PIX_FMT_XYZ12BE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0AV_PIX_FMT_NV16,         ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)AV_PIX_FMT_NV20LE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endianAV_PIX_FMT_NV20BE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endianAV_PIX_FMT_RGBA64BE,     ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endianAV_PIX_FMT_RGBA64LE,     ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endianAV_PIX_FMT_BGRA64BE,     ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endianAV_PIX_FMT_BGRA64LE,     ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endianAV_PIX_FMT_YVYU422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 CbAV_PIX_FMT_YA16BE,       ///< 16 bits gray, 16 bits alpha (big-endian)AV_PIX_FMT_YA16LE,       ///< 16 bits gray, 16 bits alpha (little-endian)AV_PIX_FMT_GBRAP,        ///< planar GBRA 4:4:4:4 32bppAV_PIX_FMT_GBRAP16BE,    ///< planar GBRA 4:4:4:4 64bpp, big-endianAV_PIX_FMT_GBRAP16LE,    ///< planar GBRA 4:4:4:4 64bpp, little-endian/***  HW acceleration through QSV, data[3] contains a pointer to the*  mfxFrameSurface1 structure.*/AV_PIX_FMT_QSV,/*** HW acceleration though MMAL, data[3] contains a pointer to the* MMAL_BUFFER_HEADER_T structure.*/AV_PIX_FMT_MMAL,AV_PIX_FMT_D3D11VA_VLD,  ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer/*** HW acceleration through CUDA. data[i] contain CUdeviceptr pointers* exactly as for system memory frames.*/AV_PIX_FMT_CUDA,AV_PIX_FMT_0RGB,        ///< packed RGB 8:8:8, 32bpp, XRGBXRGB...   X=unused/undefinedAV_PIX_FMT_RGB0,        ///< packed RGB 8:8:8, 32bpp, RGBXRGBX...   X=unused/undefinedAV_PIX_FMT_0BGR,        ///< packed BGR 8:8:8, 32bpp, XBGRXBGR...   X=unused/undefinedAV_PIX_FMT_BGR0,        ///< packed BGR 8:8:8, 32bpp, BGRXBGRX...   X=unused/undefinedAV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endianAV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endianAV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endianAV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endianAV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endianAV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endianAV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endianAV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endianAV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endianAV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endianAV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endianAV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endianAV_PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big-endianAV_PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little-endianAV_PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big-endianAV_PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little-endianAV_PIX_FMT_YUVJ411P,    ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_rangeAV_PIX_FMT_BAYER_BGGR8,    ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */AV_PIX_FMT_BAYER_RGGB8,    ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */AV_PIX_FMT_BAYER_GBRG8,    ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */AV_PIX_FMT_BAYER_GRBG8,    ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passingAV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endianAV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endianAV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endianAV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endianAV_PIX_FMT_AYUV64LE,    ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endianAV_PIX_FMT_AYUV64BE,    ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endianAV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through VideotoolboxAV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endianAV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endianAV_PIX_FMT_GBRAP12BE,  ///< planar GBR 4:4:4:4 48bpp, big-endianAV_PIX_FMT_GBRAP12LE,  ///< planar GBR 4:4:4:4 48bpp, little-endianAV_PIX_FMT_GBRAP10BE,  ///< planar GBR 4:4:4:4 40bpp, big-endianAV_PIX_FMT_GBRAP10LE,  ///< planar GBR 4:4:4:4 40bpp, little-endianAV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodecAV_PIX_FMT_GRAY12BE,   ///<        Y        , 12bpp, big-endianAV_PIX_FMT_GRAY12LE,   ///<        Y        , 12bpp, little-endianAV_PIX_FMT_GRAY10BE,   ///<        Y        , 10bpp, big-endianAV_PIX_FMT_GRAY10LE,   ///<        Y        , 10bpp, little-endianAV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endianAV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian/*** Hardware surfaces for Direct3D11.** This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11* hwaccel API and filtering support AV_PIX_FMT_D3D11 only.** data[0] contains a ID3D11Texture2D pointer, and data[1] contains the* texture array index of the frame as intptr_t if the ID3D11Texture2D is* an array texture (or always 0 if it's a normal texture).*/AV_PIX_FMT_D3D11,AV_PIX_FMT_GRAY9BE,   ///<        Y        , 9bpp, big-endianAV_PIX_FMT_GRAY9LE,   ///<        Y        , 9bpp, little-endianAV_PIX_FMT_GBRPF32BE,  ///< IEEE-754 single precision planar GBR 4:4:4,     96bpp, big-endianAV_PIX_FMT_GBRPF32LE,  ///< IEEE-754 single precision planar GBR 4:4:4,     96bpp, little-endianAV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endianAV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian/*** DRM-managed buffers exposed through PRIME buffer sharing.** data[0] points to an AVDRMFrameDescriptor.*/AV_PIX_FMT_DRM_PRIME,/*** Hardware surfaces for OpenCL.** data[i] contain 2D image objects (typed in C as cl_mem, used* in OpenCL as image2d_t) for each plane of the surface.*/AV_PIX_FMT_OPENCL,AV_PIX_FMT_GRAY14BE,   ///<        Y        , 14bpp, big-endianAV_PIX_FMT_GRAY14LE,   ///<        Y        , 14bpp, little-endianAV_PIX_FMT_GRAYF32BE,  ///< IEEE-754 single precision Y, 32bpp, big-endianAV_PIX_FMT_GRAYF32LE,  ///< IEEE-754 single precision Y, 32bpp, little-endianAV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endianAV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endianAV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endianAV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endianAV_PIX_FMT_NV24,      ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)AV_PIX_FMT_NV42,      ///< as above, but U and V bytes are swappedAV_PIX_FMT_NB         ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>#include <libavutil/imgutils.h>
#include <libavutil/log.h>#include <stdio.h>
#include <stdlib.h>static uint8_t *video_dst_data[4] = {NULL};
static int      video_dst_linesize[4];
static int     video_dst_bufsize = 0;
static FILE *fp = NULL;int decode_video_pkt(AVCodecContext *codec_context,AVPacket *pkt,AVFrame *frame,int width,int height,enum AVPixelFormat pixel_fmt)
{int ret = 0;ret = avcodec_send_packet(codec_context, pkt);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "fail to send packet.\n");goto fail;}while (ret >= 0) {ret = avcodec_receive_frame(codec_context, frame);if (ret < 0) {if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) break;else goto fail;}if (frame->width != width || frame->height != height|| frame->format != pixel_fmt) {av_log(NULL, AV_LOG_ERROR, "wrong format, height: %d, \width: %d, format: %s\n",frame->height,frame->width,av_get_pix_fmt_name(frame->format));continue;}av_image_copy(video_dst_data, video_dst_linesize,(const uint8_t **)frame->data,frame->linesize, pixel_fmt,width, height);fwrite(video_dst_data[0], 1, video_dst_bufsize, fp);}return 0;
fail: return -1;
}int main(int argc, char **argv)
{AVFormatContext *fmt_context = NULL;int ret = 0, frame_count = 0, count = 0;char *src_video_file = NULL;char *dst_decoded_file = NULL;AVStream *video_st = NULL;AVCodec *video_codec = NULL;AVCodecContext *codec_context = NULL;AVFrame *frame = NULL;AVPacket pkt;enum AVPixelFormat pixel_fmt;int width = 0;int height = 0;int video_stream_idx = 0;av_log_set_level(AV_LOG_INFO);if (argc != 4) {av_log(NULL, AV_LOG_ERROR, "demuxing_decode_video \input_video_file output_video_file frame_count.");return -1;}src_video_file = argv[1];dst_decoded_file = argv[2];frame_count = atoi(argv[3]);ret = avformat_open_input(&fmt_context, src_video_file, NULL, NULL);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "avformat open input fail.\n");return -1;}av_dump_format(fmt_context, 0, NULL, 0);ret = avformat_find_stream_info(fmt_context, NULL);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "fail to find stream info.\n");goto fail;}ret = av_find_best_stream(fmt_context, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "fail to find best stream.\n");goto fail;}video_stream_idx = ret;video_st = fmt_context->streams[video_stream_idx];video_codec = avcodec_find_decoder(video_st->codecpar->codec_id);if (!video_codec) {av_log(NULL, AV_LOG_ERROR, "fail to find codec.\n");goto fail;}codec_context = avcodec_alloc_context3(video_codec);if (!codec_context) {av_log(NULL, AV_LOG_ERROR, "fail to alloc avcodec context.\n");goto fail;}ret = avcodec_parameters_to_context(codec_context, video_st->codecpar);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "fail to copy parameters.\n");goto fail;}ret = avcodec_open2(codec_context, video_codec, NULL);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "fail to open codec.\n");goto fail;}fp = fopen(dst_decoded_file, "w");if (!fp) {av_log(NULL, AV_LOG_ERROR, "fail to open dst file.\n");goto fail;}frame = av_frame_alloc();if (!frame) {av_log(NULL, AV_LOG_ERROR, "fail to alloc frame.\n");goto fail;}av_init_packet(&pkt);pkt.data = NULL;pkt.size = 0;height = codec_context->height;width = codec_context->width;pixel_fmt = codec_context->pix_fmt;av_log(NULL, AV_LOG_INFO, "picture source format,width: %d,height: %d,format: %s\n",width, height,av_get_pix_fmt_name(pixel_fmt));ret = av_image_alloc(video_dst_data,video_dst_linesize,width,height,pixel_fmt,1);if (ret < 0) {av_log(NULL, AV_LOG_ERROR, "fail to alloc image.\n");goto fail;}video_dst_bufsize = ret;av_log(NULL, AV_LOG_INFO, "%s format, raw picture %d x %d x %d x %d, total %d\n",av_get_pix_fmt_name(pixel_fmt),video_dst_linesize[0],video_dst_linesize[1],video_dst_linesize[2],video_dst_linesize[3],video_dst_bufsize);while (av_read_frame(fmt_context, &pkt) >= 0) {if (pkt.stream_index != video_stream_idx )continue;ret = decode_video_pkt(codec_context,&pkt,frame,width,height,pixel_fmt);if (ret < 0) {continue;}count ++;if (count == frame_count)break;}pkt.data = NULL;pkt.size = 0;decode_video_pkt(codec_context,&pkt,frame,width,height,pixel_fmt);fail:if (fmt_context)avformat_close_input(&fmt_context);if (codec_context) {avcodec_free_context(&codec_context);} if (frame)av_frame_free(&frame); if (video_dst_data[0]) {av_free(video_dst_data[0]);}  if (fp)fclose(fp);
}
gcc -o demuxing_decode_video demuxing_decode_video.c `pkg-config --cflags --libs libavformat libavutil libavcodec`

video_dst_data[4] 指针数组大小为4,对应的是raw图像最多4个plane, 相应plane的长度在数组video_dst_linesize[4]中。

ffmpeg: demuxing_decode_video.c相关推荐

  1. FFMPEG 视频图像解封装解码

    FFMPEG4.0 音频解码解封装 FFMPEG 音频封装编码 下面的函数方法基于最新的FFMPEG 4.0(4.X): 本文讲是如何从一个视频文件中提取出其中的图像数据,并将图像数据保存到文件中. ...

  2. Ubuntu 安装 ffmpeg

    sudo add-apt-repository ppa:djcj/hybrid sudo apt-get update sudo apt-get install ffmpeg 在安装 ffmpeg 之 ...

  3. Linux实现ffmpeg H.265视频编码

    Linux实现ffmpeg H.265视频编码 几乎所有观看的视频,数字地面电视,电缆,卫星或互联网上的压缩.原始的,未压缩的视频太大,会浪费太多的带宽.在DVD和Blu-ray之前,有视频CD(VC ...

  4. NVIDIA FFmpeg 转码技术分析

    NVIDIA FFmpeg 转码技术分析 所有从 Kepler 一代开始的 NVIDIA GPUs 都支持完全加速的硬件视频编码,而从费米一代开始的所有 GPUs 都支持完全加速的硬件视频解码.截至 ...

  5. H265与ffmpeg改进开发

    H265与ffmpeg改进开发 Introduction KSC265是集编码.解码于一体的H.265编解码软件,完全遵循H.265协议标准.符合H.265编码规范的视频都可以通过KSC265进行解码 ...

  6. FFmpeg扩展开发

    FFmpeg扩展开发 对FFmpeg RTMP/FLV部分做了扩展,用于支持H.265. 针对<video_file_format_spec_v10_1> VIDEODATA部分扩展如下: ...

  7. ffmpeg architecture(下)

    ffmpeg architecture(下) 第3章-转码 TLDR:给我看代码和执行. $ make run_transcoding 我们将跳过一些细节,但是请放心:源代码可在github上找到. ...

  8. ffmpeg architecture(中)

    ffmpeg architecture(中) 艰苦学习FFmpeg libav 您是否不奇怪有时会发出声音和视觉? 由于FFmpeg作为命令行工具非常有用,可以对媒体文件执行基本任务,因此如何在程序中 ...

  9. ffmpeg architecture(上)

    ffmpeg architecture(上) · 视频-您看到的是什么! · 如果您有一系列图像序列,并以给定的频率(例如每秒24张图像)进行更改,则会产生运动的错觉.总之,这是视频背后的基本概念:一 ...

最新文章

  1. CV:利用cv2+dlib库自带frontal_face_detector(人脸征检测器)实现人脸检测与人脸标记之《极限男人帮》和《NBA全明星球员》
  2. 《江南百景图》游戏设计小思考:留边占角“小烦恼”
  3. 华为鸿蒙系统腾讯首测,华为鸿蒙系统首测,速度超安卓60%?谷歌该咋想,咱也不敢问呐...
  4. ajax数据交互代码,Django中使用jquery的ajax进行数据交互的实例代码
  5. 哈希表取模选择素数分析
  6. 《R与Hadoop大数据分析实战》一2.6 小结
  7. CVE-2020-15999:Chrome FreeType 字体库堆溢出原理分析
  8. DTD vs. XML Schema
  9. html嵌入flv格式和swf格式视频
  10. C++调用c#的.net Standard类库流程
  11. JAVA高级应用课程设计(网上书城系统——会员登陆模块的设计与实现)
  12. 黑喵桌面音乐播放器汉化版
  13. VSCode下载很慢解决方法
  14. python安装包下载太慢解决
  15. TypeScript14 - 泛型
  16. 毕业设计-基于深度学习的单幅图像超分辨率重建算法
  17. 12306订票助手java_GitHub - zhilongxia/12306-hunter: 开源免费Java Swing C/S版本12306订票助手...
  18. python个人bug修炼手册
  19. android-x86编译
  20. oracle安装出现原因: 所选 Oracle 主目录包含目录或文件。

热门文章

  1. 神一样的存在者——拉马努金
  2. html特效代码大全免费,html特效代码大全
  3. echarts-map 地图打点小案例
  4. 向科技要智慧,人脸识别智能门禁解锁智慧社区新未来
  5. 支持向量机SVM(上)
  6. 如何将SpringBoot项目部署到阿里云Linux服务器中
  7. 计算机班级的简介,简短的班级介绍,50字的班级简介
  8. CAD在线转换PDF格式怎么操作?
  9. 机器学习实验《多源数据集成、清洗和统计》 C++ 实现
  10. 药企产品年度质量回顾的分析工具解读---控制图的判异准则