1 /* 2 * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> 3 * 4 * This file is part of FFmpeg. 5 * 6 * FFmpeg is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * FFmpeg is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with FFmpeg; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 */ 20 21 module ffmpeg.libavutil.pixfmt; 22 23 extern (C): 24 import ffmpeg; @nogc nothrow: 25 26 /** 27 * @file 28 * pixel format definitions 29 */ 30 31 enum AVPALETTE_SIZE = 1024; 32 enum AVPALETTE_COUNT = 256; 33 34 /** 35 * Pixel format. 36 * 37 * @note 38 * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA 39 * color is put together as: 40 * (A << 24) | (R << 16) | (G << 8) | B 41 * This is stored as BGRA on little-endian CPU architectures and ARGB on 42 * big-endian CPUs. 43 * 44 * @note 45 * If the resolution is not a multiple of the chroma subsampling factor 46 * then the chroma plane resolution must be rounded up. 47 * 48 * @par 49 * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized 50 * image data is stored in AVFrame.data[0]. The palette is transported in 51 * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is 52 * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is 53 * also endian-specific). Note also that the individual RGB32 palette 54 * components stored in AVFrame.data[1] should be in the range 0..255. 55 * This is important as many custom PAL8 video codecs that were designed 56 * to run on the IBM VGA graphics adapter use 6-bit palette components. 57 * 58 * @par 59 * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like 60 * for pal8. This palette is filled in automatically by the function 61 * allocating the picture. 62 */ 63 enum AVPixelFormat 64 { 65 AV_PIX_FMT_NONE = -1, 66 AV_PIX_FMT_YUV420P = 0, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) 67 AV_PIX_FMT_YUYV422 = 1, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr 68 AV_PIX_FMT_RGB24 = 2, ///< packed RGB 8:8:8, 24bpp, RGBRGB... 69 AV_PIX_FMT_BGR24 = 3, ///< packed RGB 8:8:8, 24bpp, BGRBGR... 70 AV_PIX_FMT_YUV422P = 4, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) 71 AV_PIX_FMT_YUV444P = 5, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) 72 AV_PIX_FMT_YUV410P = 6, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) 73 AV_PIX_FMT_YUV411P = 7, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) 74 AV_PIX_FMT_GRAY8 = 8, ///< Y , 8bpp 75 AV_PIX_FMT_MONOWHITE = 9, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb 76 AV_PIX_FMT_MONOBLACK = 10, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb 77 AV_PIX_FMT_PAL8 = 11, ///< 8 bits with AV_PIX_FMT_RGB32 palette 78 AV_PIX_FMT_YUVJ420P = 12, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range 79 AV_PIX_FMT_YUVJ422P = 13, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range 80 AV_PIX_FMT_YUVJ444P = 14, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range 81 AV_PIX_FMT_UYVY422 = 15, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 82 AV_PIX_FMT_UYYVYY411 = 16, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 83 AV_PIX_FMT_BGR8 = 17, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) 84 AV_PIX_FMT_BGR4 = 18, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits 85 AV_PIX_FMT_BGR4_BYTE = 19, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) 86 AV_PIX_FMT_RGB8 = 20, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) 87 AV_PIX_FMT_RGB4 = 21, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits 88 AV_PIX_FMT_RGB4_BYTE = 22, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) 89 AV_PIX_FMT_NV12 = 23, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) 90 AV_PIX_FMT_NV21 = 24, ///< as above, but U and V bytes are swapped 91 92 AV_PIX_FMT_ARGB = 25, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... 93 AV_PIX_FMT_RGBA = 26, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... 94 AV_PIX_FMT_ABGR = 27, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... 95 AV_PIX_FMT_BGRA = 28, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... 96 97 AV_PIX_FMT_GRAY16BE = 29, ///< Y , 16bpp, big-endian 98 AV_PIX_FMT_GRAY16LE = 30, ///< Y , 16bpp, little-endian 99 AV_PIX_FMT_YUV440P = 31, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) 100 AV_PIX_FMT_YUVJ440P = 32, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range 101 AV_PIX_FMT_YUVA420P = 33, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) 102 AV_PIX_FMT_RGB48BE = 34, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian 103 AV_PIX_FMT_RGB48LE = 35, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian 104 105 AV_PIX_FMT_RGB565BE = 36, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian 106 AV_PIX_FMT_RGB565LE = 37, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian 107 AV_PIX_FMT_RGB555BE = 38, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined 108 AV_PIX_FMT_RGB555LE = 39, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined 109 110 AV_PIX_FMT_BGR565BE = 40, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian 111 AV_PIX_FMT_BGR565LE = 41, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian 112 AV_PIX_FMT_BGR555BE = 42, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined 113 AV_PIX_FMT_BGR555LE = 43, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined 114 115 /** @name Deprecated pixel formats */ 116 /**@{*/ 117 AV_PIX_FMT_VAAPI_MOCO = 44, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers 118 AV_PIX_FMT_VAAPI_IDCT = 45, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers 119 AV_PIX_FMT_VAAPI_VLD = 46, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID 120 /**@}*/ 121 AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD, 122 123 /** 124 * Hardware acceleration through VA-API, data[3] contains a 125 * VASurfaceID. 126 */ 127 128 AV_PIX_FMT_YUV420P16LE = 47, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian 129 AV_PIX_FMT_YUV420P16BE = 48, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian 130 AV_PIX_FMT_YUV422P16LE = 49, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian 131 AV_PIX_FMT_YUV422P16BE = 50, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian 132 AV_PIX_FMT_YUV444P16LE = 51, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian 133 AV_PIX_FMT_YUV444P16BE = 52, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian 134 AV_PIX_FMT_DXVA2_VLD = 53, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer 135 136 AV_PIX_FMT_RGB444LE = 54, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined 137 AV_PIX_FMT_RGB444BE = 55, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined 138 AV_PIX_FMT_BGR444LE = 56, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined 139 AV_PIX_FMT_BGR444BE = 57, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined 140 AV_PIX_FMT_YA8 = 58, ///< 8 bits gray, 8 bits alpha 141 142 AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 143 AV_PIX_FMT_GRAY8A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 144 145 AV_PIX_FMT_BGR48BE = 59, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian 146 AV_PIX_FMT_BGR48LE = 60, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian 147 148 /** 149 * The following 12 formats have the disadvantage of needing 1 format for each bit depth. 150 * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. 151 * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. 152 */ 153 AV_PIX_FMT_YUV420P9BE = 61, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian 154 AV_PIX_FMT_YUV420P9LE = 62, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian 155 AV_PIX_FMT_YUV420P10BE = 63, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian 156 AV_PIX_FMT_YUV420P10LE = 64, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian 157 AV_PIX_FMT_YUV422P10BE = 65, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian 158 AV_PIX_FMT_YUV422P10LE = 66, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian 159 AV_PIX_FMT_YUV444P9BE = 67, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian 160 AV_PIX_FMT_YUV444P9LE = 68, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian 161 AV_PIX_FMT_YUV444P10BE = 69, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian 162 AV_PIX_FMT_YUV444P10LE = 70, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian 163 AV_PIX_FMT_YUV422P9BE = 71, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian 164 AV_PIX_FMT_YUV422P9LE = 72, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian 165 AV_PIX_FMT_GBRP = 73, ///< planar GBR 4:4:4 24bpp 166 AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP 167 AV_PIX_FMT_GBRP9BE = 74, ///< planar GBR 4:4:4 27bpp, big-endian 168 AV_PIX_FMT_GBRP9LE = 75, ///< planar GBR 4:4:4 27bpp, little-endian 169 AV_PIX_FMT_GBRP10BE = 76, ///< planar GBR 4:4:4 30bpp, big-endian 170 AV_PIX_FMT_GBRP10LE = 77, ///< planar GBR 4:4:4 30bpp, little-endian 171 AV_PIX_FMT_GBRP16BE = 78, ///< planar GBR 4:4:4 48bpp, big-endian 172 AV_PIX_FMT_GBRP16LE = 79, ///< planar GBR 4:4:4 48bpp, little-endian 173 AV_PIX_FMT_YUVA422P = 80, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) 174 AV_PIX_FMT_YUVA444P = 81, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) 175 AV_PIX_FMT_YUVA420P9BE = 82, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian 176 AV_PIX_FMT_YUVA420P9LE = 83, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian 177 AV_PIX_FMT_YUVA422P9BE = 84, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian 178 AV_PIX_FMT_YUVA422P9LE = 85, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian 179 AV_PIX_FMT_YUVA444P9BE = 86, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian 180 AV_PIX_FMT_YUVA444P9LE = 87, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian 181 AV_PIX_FMT_YUVA420P10BE = 88, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) 182 AV_PIX_FMT_YUVA420P10LE = 89, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) 183 AV_PIX_FMT_YUVA422P10BE = 90, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) 184 AV_PIX_FMT_YUVA422P10LE = 91, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) 185 AV_PIX_FMT_YUVA444P10BE = 92, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) 186 AV_PIX_FMT_YUVA444P10LE = 93, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) 187 AV_PIX_FMT_YUVA420P16BE = 94, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) 188 AV_PIX_FMT_YUVA420P16LE = 95, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) 189 AV_PIX_FMT_YUVA422P16BE = 96, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) 190 AV_PIX_FMT_YUVA422P16LE = 97, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) 191 AV_PIX_FMT_YUVA444P16BE = 98, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) 192 AV_PIX_FMT_YUVA444P16LE = 99, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) 193 194 AV_PIX_FMT_VDPAU = 100, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface 195 196 AV_PIX_FMT_XYZ12LE = 101, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 197 AV_PIX_FMT_XYZ12BE = 102, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 198 AV_PIX_FMT_NV16 = 103, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) 199 AV_PIX_FMT_NV20LE = 104, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian 200 AV_PIX_FMT_NV20BE = 105, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian 201 202 AV_PIX_FMT_RGBA64BE = 106, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian 203 AV_PIX_FMT_RGBA64LE = 107, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian 204 AV_PIX_FMT_BGRA64BE = 108, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian 205 AV_PIX_FMT_BGRA64LE = 109, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian 206 207 AV_PIX_FMT_YVYU422 = 110, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb 208 209 AV_PIX_FMT_YA16BE = 111, ///< 16 bits gray, 16 bits alpha (big-endian) 210 AV_PIX_FMT_YA16LE = 112, ///< 16 bits gray, 16 bits alpha (little-endian) 211 212 AV_PIX_FMT_GBRAP = 113, ///< planar GBRA 4:4:4:4 32bpp 213 AV_PIX_FMT_GBRAP16BE = 114, ///< planar GBRA 4:4:4:4 64bpp, big-endian 214 AV_PIX_FMT_GBRAP16LE = 115, ///< planar GBRA 4:4:4:4 64bpp, little-endian 215 /** 216 * HW acceleration through QSV, data[3] contains a pointer to the 217 * mfxFrameSurface1 structure. 218 */ 219 AV_PIX_FMT_QSV = 116, 220 /** 221 * HW acceleration though MMAL, data[3] contains a pointer to the 222 * MMAL_BUFFER_HEADER_T structure. 223 */ 224 AV_PIX_FMT_MMAL = 117, 225 226 AV_PIX_FMT_D3D11VA_VLD = 118, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer 227 228 /** 229 * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers 230 * exactly as for system memory frames. 231 */ 232 AV_PIX_FMT_CUDA = 119, 233 234 AV_PIX_FMT_0RGB = 120, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined 235 AV_PIX_FMT_RGB0 = 121, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined 236 AV_PIX_FMT_0BGR = 122, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined 237 AV_PIX_FMT_BGR0 = 123, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined 238 239 AV_PIX_FMT_YUV420P12BE = 124, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian 240 AV_PIX_FMT_YUV420P12LE = 125, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian 241 AV_PIX_FMT_YUV420P14BE = 126, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian 242 AV_PIX_FMT_YUV420P14LE = 127, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian 243 AV_PIX_FMT_YUV422P12BE = 128, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian 244 AV_PIX_FMT_YUV422P12LE = 129, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian 245 AV_PIX_FMT_YUV422P14BE = 130, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian 246 AV_PIX_FMT_YUV422P14LE = 131, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian 247 AV_PIX_FMT_YUV444P12BE = 132, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian 248 AV_PIX_FMT_YUV444P12LE = 133, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian 249 AV_PIX_FMT_YUV444P14BE = 134, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian 250 AV_PIX_FMT_YUV444P14LE = 135, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian 251 AV_PIX_FMT_GBRP12BE = 136, ///< planar GBR 4:4:4 36bpp, big-endian 252 AV_PIX_FMT_GBRP12LE = 137, ///< planar GBR 4:4:4 36bpp, little-endian 253 AV_PIX_FMT_GBRP14BE = 138, ///< planar GBR 4:4:4 42bpp, big-endian 254 AV_PIX_FMT_GBRP14LE = 139, ///< planar GBR 4:4:4 42bpp, little-endian 255 AV_PIX_FMT_YUVJ411P = 140, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range 256 257 AV_PIX_FMT_BAYER_BGGR8 = 141, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ 258 AV_PIX_FMT_BAYER_RGGB8 = 142, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ 259 AV_PIX_FMT_BAYER_GBRG8 = 143, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ 260 AV_PIX_FMT_BAYER_GRBG8 = 144, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ 261 AV_PIX_FMT_BAYER_BGGR16LE = 145, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ 262 AV_PIX_FMT_BAYER_BGGR16BE = 146, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ 263 AV_PIX_FMT_BAYER_RGGB16LE = 147, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ 264 AV_PIX_FMT_BAYER_RGGB16BE = 148, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ 265 AV_PIX_FMT_BAYER_GBRG16LE = 149, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ 266 AV_PIX_FMT_BAYER_GBRG16BE = 150, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ 267 AV_PIX_FMT_BAYER_GRBG16LE = 151, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ 268 AV_PIX_FMT_BAYER_GRBG16BE = 152, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ 269 270 AV_PIX_FMT_XVMC = 153, ///< XVideo Motion Acceleration via common packet passing 271 272 AV_PIX_FMT_YUV440P10LE = 154, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian 273 AV_PIX_FMT_YUV440P10BE = 155, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian 274 AV_PIX_FMT_YUV440P12LE = 156, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian 275 AV_PIX_FMT_YUV440P12BE = 157, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian 276 AV_PIX_FMT_AYUV64LE = 158, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian 277 AV_PIX_FMT_AYUV64BE = 159, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian 278 279 AV_PIX_FMT_VIDEOTOOLBOX = 160, ///< hardware decoding through Videotoolbox 280 281 AV_PIX_FMT_P010LE = 161, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian 282 AV_PIX_FMT_P010BE = 162, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian 283 284 AV_PIX_FMT_GBRAP12BE = 163, ///< planar GBR 4:4:4:4 48bpp, big-endian 285 AV_PIX_FMT_GBRAP12LE = 164, ///< planar GBR 4:4:4:4 48bpp, little-endian 286 287 AV_PIX_FMT_GBRAP10BE = 165, ///< planar GBR 4:4:4:4 40bpp, big-endian 288 AV_PIX_FMT_GBRAP10LE = 166, ///< planar GBR 4:4:4:4 40bpp, little-endian 289 290 AV_PIX_FMT_MEDIACODEC = 167, ///< hardware decoding through MediaCodec 291 292 AV_PIX_FMT_GRAY12BE = 168, ///< Y , 12bpp, big-endian 293 AV_PIX_FMT_GRAY12LE = 169, ///< Y , 12bpp, little-endian 294 AV_PIX_FMT_GRAY10BE = 170, ///< Y , 10bpp, big-endian 295 AV_PIX_FMT_GRAY10LE = 171, ///< Y , 10bpp, little-endian 296 297 AV_PIX_FMT_P016LE = 172, ///< like NV12, with 16bpp per component, little-endian 298 AV_PIX_FMT_P016BE = 173, ///< like NV12, with 16bpp per component, big-endian 299 300 /** 301 * Hardware surfaces for Direct3D11. 302 * 303 * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11 304 * hwaccel API and filtering support AV_PIX_FMT_D3D11 only. 305 * 306 * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the 307 * texture array index of the frame as intptr_t if the ID3D11Texture2D is 308 * an array texture (or always 0 if it's a normal texture). 309 */ 310 AV_PIX_FMT_D3D11 = 174, 311 312 AV_PIX_FMT_GRAY9BE = 175, ///< Y , 9bpp, big-endian 313 AV_PIX_FMT_GRAY9LE = 176, ///< Y , 9bpp, little-endian 314 315 AV_PIX_FMT_GBRPF32BE = 177, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian 316 AV_PIX_FMT_GBRPF32LE = 178, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian 317 AV_PIX_FMT_GBRAPF32BE = 179, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian 318 AV_PIX_FMT_GBRAPF32LE = 180, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian 319 320 /** 321 * DRM-managed buffers exposed through PRIME buffer sharing. 322 * 323 * data[0] points to an AVDRMFrameDescriptor. 324 */ 325 AV_PIX_FMT_DRM_PRIME = 181, 326 /** 327 * Hardware surfaces for OpenCL. 328 * 329 * data[i] contain 2D image objects (typed in C as cl_mem, used 330 * in OpenCL as image2d_t) for each plane of the surface. 331 */ 332 AV_PIX_FMT_OPENCL = 182, 333 334 AV_PIX_FMT_GRAY14BE = 183, ///< Y , 14bpp, big-endian 335 AV_PIX_FMT_GRAY14LE = 184, ///< Y , 14bpp, little-endian 336 337 AV_PIX_FMT_GRAYF32BE = 185, ///< IEEE-754 single precision Y, 32bpp, big-endian 338 AV_PIX_FMT_GRAYF32LE = 186, ///< IEEE-754 single precision Y, 32bpp, little-endian 339 340 AV_PIX_FMT_NB = 187 ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions 341 } 342 343 extern (D) string AV_PIX_FMT_NE(T0, T1)(auto ref T0 be, auto ref T1 le) 344 { 345 import std.conv : to; 346 347 return "AV_PIX_FMT_" ~ to!string(le); 348 } 349 350 enum AV_PIX_FMT_RGB32 = AVPixelFormat.AV_PIX_FMT_BGRA; 351 enum AV_PIX_FMT_RGB32_1 = AVPixelFormat.AV_PIX_FMT_ABGR; 352 enum AV_PIX_FMT_BGR32 = AVPixelFormat.AV_PIX_FMT_RGBA; 353 enum AV_PIX_FMT_BGR32_1 = AVPixelFormat.AV_PIX_FMT_ARGB; 354 enum AV_PIX_FMT_0RGB32 = AVPixelFormat.AV_PIX_FMT_BGR0; 355 enum AV_PIX_FMT_0BGR32 = AVPixelFormat.AV_PIX_FMT_RGB0; 356 357 enum AV_PIX_FMT_GRAY9 = AVPixelFormat.AV_PIX_FMT_GRAY9LE; 358 enum AV_PIX_FMT_GRAY10 = AVPixelFormat.AV_PIX_FMT_GRAY10LE; 359 enum AV_PIX_FMT_GRAY12 = AVPixelFormat.AV_PIX_FMT_GRAY12LE; 360 enum AV_PIX_FMT_GRAY14 = AVPixelFormat.AV_PIX_FMT_GRAY14LE; 361 enum AV_PIX_FMT_GRAY16 = AVPixelFormat.AV_PIX_FMT_GRAY16LE; 362 enum AV_PIX_FMT_YA16 = AVPixelFormat.AV_PIX_FMT_YA16LE; 363 enum AV_PIX_FMT_RGB48 = AVPixelFormat.AV_PIX_FMT_RGB48LE; 364 enum AV_PIX_FMT_RGB565 = AVPixelFormat.AV_PIX_FMT_RGB565LE; 365 enum AV_PIX_FMT_RGB555 = AVPixelFormat.AV_PIX_FMT_RGB555LE; 366 enum AV_PIX_FMT_RGB444 = AVPixelFormat.AV_PIX_FMT_RGB444LE; 367 enum AV_PIX_FMT_RGBA64 = AVPixelFormat.AV_PIX_FMT_RGBA64LE; 368 enum AV_PIX_FMT_BGR48 = AVPixelFormat.AV_PIX_FMT_BGR48LE; 369 enum AV_PIX_FMT_BGR565 = AVPixelFormat.AV_PIX_FMT_BGR565LE; 370 enum AV_PIX_FMT_BGR555 = AVPixelFormat.AV_PIX_FMT_BGR555LE; 371 enum AV_PIX_FMT_BGR444 = AVPixelFormat.AV_PIX_FMT_BGR444LE; 372 enum AV_PIX_FMT_BGRA64 = AVPixelFormat.AV_PIX_FMT_BGRA64LE; 373 374 enum AV_PIX_FMT_YUV420P9 = AVPixelFormat.AV_PIX_FMT_YUV420P9LE; 375 enum AV_PIX_FMT_YUV422P9 = AVPixelFormat.AV_PIX_FMT_YUV422P9LE; 376 enum AV_PIX_FMT_YUV444P9 = AVPixelFormat.AV_PIX_FMT_YUV444P9LE; 377 enum AV_PIX_FMT_YUV420P10 = AVPixelFormat.AV_PIX_FMT_YUV420P10LE; 378 enum AV_PIX_FMT_YUV422P10 = AVPixelFormat.AV_PIX_FMT_YUV422P10LE; 379 enum AV_PIX_FMT_YUV440P10 = AVPixelFormat.AV_PIX_FMT_YUV440P10LE; 380 enum AV_PIX_FMT_YUV444P10 = AVPixelFormat.AV_PIX_FMT_YUV444P10LE; 381 enum AV_PIX_FMT_YUV420P12 = AVPixelFormat.AV_PIX_FMT_YUV420P12LE; 382 enum AV_PIX_FMT_YUV422P12 = AVPixelFormat.AV_PIX_FMT_YUV422P12LE; 383 enum AV_PIX_FMT_YUV440P12 = AVPixelFormat.AV_PIX_FMT_YUV440P12LE; 384 enum AV_PIX_FMT_YUV444P12 = AVPixelFormat.AV_PIX_FMT_YUV444P12LE; 385 enum AV_PIX_FMT_YUV420P14 = AVPixelFormat.AV_PIX_FMT_YUV420P14LE; 386 enum AV_PIX_FMT_YUV422P14 = AVPixelFormat.AV_PIX_FMT_YUV422P14LE; 387 enum AV_PIX_FMT_YUV444P14 = AVPixelFormat.AV_PIX_FMT_YUV444P14LE; 388 enum AV_PIX_FMT_YUV420P16 = AVPixelFormat.AV_PIX_FMT_YUV420P16LE; 389 enum AV_PIX_FMT_YUV422P16 = AVPixelFormat.AV_PIX_FMT_YUV422P16LE; 390 enum AV_PIX_FMT_YUV444P16 = AVPixelFormat.AV_PIX_FMT_YUV444P16LE; 391 392 enum AV_PIX_FMT_GBRP9 = AVPixelFormat.AV_PIX_FMT_GBRP9LE; 393 enum AV_PIX_FMT_GBRP10 = AVPixelFormat.AV_PIX_FMT_GBRP10LE; 394 enum AV_PIX_FMT_GBRP12 = AVPixelFormat.AV_PIX_FMT_GBRP12LE; 395 enum AV_PIX_FMT_GBRP14 = AVPixelFormat.AV_PIX_FMT_GBRP14LE; 396 enum AV_PIX_FMT_GBRP16 = AVPixelFormat.AV_PIX_FMT_GBRP16LE; 397 enum AV_PIX_FMT_GBRAP10 = AVPixelFormat.AV_PIX_FMT_GBRAP10LE; 398 enum AV_PIX_FMT_GBRAP12 = AVPixelFormat.AV_PIX_FMT_GBRAP12LE; 399 enum AV_PIX_FMT_GBRAP16 = AVPixelFormat.AV_PIX_FMT_GBRAP16LE; 400 401 enum AV_PIX_FMT_BAYER_BGGR16 = AVPixelFormat.AV_PIX_FMT_BAYER_BGGR16LE; 402 enum AV_PIX_FMT_BAYER_RGGB16 = AVPixelFormat.AV_PIX_FMT_BAYER_RGGB16LE; 403 enum AV_PIX_FMT_BAYER_GBRG16 = AVPixelFormat.AV_PIX_FMT_BAYER_GBRG16LE; 404 enum AV_PIX_FMT_BAYER_GRBG16 = AVPixelFormat.AV_PIX_FMT_BAYER_GRBG16LE; 405 406 enum AV_PIX_FMT_GBRPF32 = AVPixelFormat.AV_PIX_FMT_GBRPF32LE; 407 enum AV_PIX_FMT_GBRAPF32 = AVPixelFormat.AV_PIX_FMT_GBRAPF32LE; 408 409 enum AV_PIX_FMT_GRAYF32 = AVPixelFormat.AV_PIX_FMT_GRAYF32LE; 410 411 enum AV_PIX_FMT_YUVA420P9 = AVPixelFormat.AV_PIX_FMT_YUVA420P9LE; 412 enum AV_PIX_FMT_YUVA422P9 = AVPixelFormat.AV_PIX_FMT_YUVA422P9LE; 413 enum AV_PIX_FMT_YUVA444P9 = AVPixelFormat.AV_PIX_FMT_YUVA444P9LE; 414 enum AV_PIX_FMT_YUVA420P10 = AVPixelFormat.AV_PIX_FMT_YUVA420P10LE; 415 enum AV_PIX_FMT_YUVA422P10 = AVPixelFormat.AV_PIX_FMT_YUVA422P10LE; 416 enum AV_PIX_FMT_YUVA444P10 = AVPixelFormat.AV_PIX_FMT_YUVA444P10LE; 417 enum AV_PIX_FMT_YUVA420P16 = AVPixelFormat.AV_PIX_FMT_YUVA420P16LE; 418 enum AV_PIX_FMT_YUVA422P16 = AVPixelFormat.AV_PIX_FMT_YUVA422P16LE; 419 enum AV_PIX_FMT_YUVA444P16 = AVPixelFormat.AV_PIX_FMT_YUVA444P16LE; 420 421 enum AV_PIX_FMT_XYZ12 = AVPixelFormat.AV_PIX_FMT_XYZ12LE; 422 enum AV_PIX_FMT_NV20 = AVPixelFormat.AV_PIX_FMT_NV20LE; 423 enum AV_PIX_FMT_AYUV64 = AVPixelFormat.AV_PIX_FMT_AYUV64LE; 424 enum AV_PIX_FMT_P010 = AVPixelFormat.AV_PIX_FMT_P010LE; 425 enum AV_PIX_FMT_P016 = AVPixelFormat.AV_PIX_FMT_P016LE; 426 427 /** 428 * Chromaticity coordinates of the source primaries. 429 * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1. 430 */ 431 enum AVColorPrimaries 432 { 433 AVCOL_PRI_RESERVED0 = 0, 434 AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B 435 AVCOL_PRI_UNSPECIFIED = 2, 436 AVCOL_PRI_RESERVED = 3, 437 AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) 438 439 AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM 440 AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC 441 AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above 442 AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C 443 AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 444 AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) 445 AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, 446 AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 447 AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 448 AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors 449 AVCOL_PRI_NB = 23 ///< Not part of ABI 450 } 451 452 /** 453 * Color Transfer Characteristic. 454 * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2. 455 */ 456 enum AVColorTransferCharacteristic 457 { 458 AVCOL_TRC_RESERVED0 = 0, 459 AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 460 AVCOL_TRC_UNSPECIFIED = 2, 461 AVCOL_TRC_RESERVED = 3, 462 AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM 463 AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG 464 AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC 465 AVCOL_TRC_SMPTE240M = 7, 466 AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" 467 AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" 468 AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" 469 AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 470 AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut 471 AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) 472 AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system 473 AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system 474 AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems 475 AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, 476 AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 477 AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, 478 AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" 479 AVCOL_TRC_NB = 19 ///< Not part of ABI 480 } 481 482 /** 483 * YUV colorspace type. 484 * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3. 485 */ 486 enum AVColorSpace 487 { 488 AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) 489 AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B 490 AVCOL_SPC_UNSPECIFIED = 2, 491 AVCOL_SPC_RESERVED = 3, 492 AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) 493 AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 494 AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC 495 AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above 496 AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 497 AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, 498 AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system 499 AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system 500 AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x 501 AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system 502 AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system 503 AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp 504 AVCOL_SPC_NB = 15 ///< Not part of ABI 505 } 506 507 /** 508 * MPEG vs JPEG YUV range. 509 */ 510 enum AVColorRange 511 { 512 AVCOL_RANGE_UNSPECIFIED = 0, 513 AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges 514 AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges 515 AVCOL_RANGE_NB = 3 ///< Not part of ABI 516 } 517 518 /** 519 * Location of chroma samples. 520 * 521 * Illustration showing the location of the first (top left) chroma sample of the 522 * image, the left shows only luma, the right 523 * shows the location of the chroma sample, the 2 could be imagined to overlay 524 * each other but are drawn separately due to limitations of ASCII 525 * 526 * 1st 2nd 1st 2nd horizontal luma sample positions 527 * v v v v 528 * ______ ______ 529 *1st luma line > |X X ... |3 4 X ... X are luma samples, 530 * | |1 2 1-6 are possible chroma positions 531 *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position 532 */ 533 enum AVChromaLocation 534 { 535 AVCHROMA_LOC_UNSPECIFIED = 0, 536 AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0 537 AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 538 AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 539 AVCHROMA_LOC_TOP = 4, 540 AVCHROMA_LOC_BOTTOMLEFT = 5, 541 AVCHROMA_LOC_BOTTOM = 6, 542 AVCHROMA_LOC_NB = 7 ///< Not part of ABI 543 } 544 545 /* AVUTIL_PIXFMT_H */