FFmpeg  4.3
dpxenc.c
Go to the documentation of this file.
1 /*
2  * DPX (.dpx) image encoder
3  * Copyright (c) 2011 Peter Ross <pross@xvid.org>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/common.h"
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/imgutils.h"
25 #include "avcodec.h"
26 #include "internal.h"
27 
28 typedef struct DPXContext {
33  int planar;
34 } DPXContext;
35 
37 {
38  DPXContext *s = avctx->priv_data;
40 
41  s->big_endian = !!(desc->flags & AV_PIX_FMT_FLAG_BE);
42  s->bits_per_component = desc->comp[0].depth;
43  s->num_components = desc->nb_components;
44  s->descriptor = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) ? 51 : 50;
45  s->planar = !!(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
46 
47  switch (avctx->pix_fmt) {
48  case AV_PIX_FMT_ABGR:
49  s->descriptor = 52;
50  break;
53  case AV_PIX_FMT_GRAY8:
54  s->descriptor = 6;
55  break;
60  case AV_PIX_FMT_RGB24:
63  case AV_PIX_FMT_RGBA:
64  break;
65  case AV_PIX_FMT_RGB48LE:
66  case AV_PIX_FMT_RGB48BE:
67  if (avctx->bits_per_raw_sample)
68  s->bits_per_component = avctx->bits_per_raw_sample;
69  break;
70  default:
71  av_log(avctx, AV_LOG_INFO, "unsupported pixel format\n");
72  return -1;
73  }
74 
75  return 0;
76 }
77 
78 static av_always_inline void write16_internal(int big_endian, void *p, int value)
79 {
80  if (big_endian) AV_WB16(p, value);
81  else AV_WL16(p, value);
82 }
83 
84 static av_always_inline void write32_internal(int big_endian, void *p, int value)
85 {
86  if (big_endian) AV_WB32(p, value);
87  else AV_WL32(p, value);
88 }
89 
90 #define write16(p, value) write16_internal(s->big_endian, p, value)
91 #define write32(p, value) write32_internal(s->big_endian, p, value)
92 
93 static void encode_rgb48_10bit(AVCodecContext *avctx, const AVFrame *pic,
94  uint8_t *dst)
95 {
96  DPXContext *s = avctx->priv_data;
97  const uint8_t *src = pic->data[0];
98  int x, y;
99 
100  for (y = 0; y < avctx->height; y++) {
101  for (x = 0; x < avctx->width; x++) {
102  int value;
103  if (s->big_endian) {
104  value = ((AV_RB16(src + 6*x + 4) & 0xFFC0U) >> 4)
105  | ((AV_RB16(src + 6*x + 2) & 0xFFC0U) << 6)
106  | ((AV_RB16(src + 6*x + 0) & 0xFFC0U) << 16);
107  } else {
108  value = ((AV_RL16(src + 6*x + 4) & 0xFFC0U) >> 4)
109  | ((AV_RL16(src + 6*x + 2) & 0xFFC0U) << 6)
110  | ((AV_RL16(src + 6*x + 0) & 0xFFC0U) << 16);
111  }
112  write32(dst, value);
113  dst += 4;
114  }
115  src += pic->linesize[0];
116  }
117 }
118 
119 static void encode_gbrp10(AVCodecContext *avctx, const AVFrame *pic, uint8_t *dst)
120 {
121  DPXContext *s = avctx->priv_data;
122  const uint8_t *src[3] = {pic->data[0], pic->data[1], pic->data[2]};
123  int x, y, i;
124 
125  for (y = 0; y < avctx->height; y++) {
126  for (x = 0; x < avctx->width; x++) {
127  int value;
128  if (s->big_endian) {
129  value = (AV_RB16(src[0] + 2*x) << 12)
130  | (AV_RB16(src[1] + 2*x) << 2)
131  | ((unsigned)AV_RB16(src[2] + 2*x) << 22);
132  } else {
133  value = (AV_RL16(src[0] + 2*x) << 12)
134  | (AV_RL16(src[1] + 2*x) << 2)
135  | ((unsigned)AV_RL16(src[2] + 2*x) << 22);
136  }
137  write32(dst, value);
138  dst += 4;
139  }
140  for (i = 0; i < 3; i++)
141  src[i] += pic->linesize[i];
142  }
143 }
144 
145 static void encode_gbrp12(AVCodecContext *avctx, const AVFrame *pic, uint16_t *dst)
146 {
147  DPXContext *s = avctx->priv_data;
148  const uint16_t *src[3] = {(uint16_t*)pic->data[0],
149  (uint16_t*)pic->data[1],
150  (uint16_t*)pic->data[2]};
151  int x, y, i, pad;
152  pad = avctx->width*6;
153  pad = (FFALIGN(pad, 4) - pad) >> 1;
154  for (y = 0; y < avctx->height; y++) {
155  for (x = 0; x < avctx->width; x++) {
156  uint16_t value[3];
157  if (s->big_endian) {
158  value[1] = AV_RB16(src[0] + x) << 4;
159  value[2] = AV_RB16(src[1] + x) << 4;
160  value[0] = AV_RB16(src[2] + x) << 4;
161  } else {
162  value[1] = AV_RL16(src[0] + x) << 4;
163  value[2] = AV_RL16(src[1] + x) << 4;
164  value[0] = AV_RL16(src[2] + x) << 4;
165  }
166  for (i = 0; i < 3; i++)
167  write16(dst++, value[i]);
168  }
169  for (i = 0; i < pad; i++)
170  *dst++ = 0;
171  for (i = 0; i < 3; i++)
172  src[i] += pic->linesize[i]/2;
173  }
174 }
175 
177  const AVFrame *frame, int *got_packet)
178 {
179  DPXContext *s = avctx->priv_data;
180  int size, ret, need_align, len;
181  uint8_t *buf;
182 
183 #define HEADER_SIZE 1664 /* DPX Generic header */
184  if (s->bits_per_component == 10)
185  size = avctx->height * avctx->width * 4;
186  else if (s->bits_per_component == 12) {
187  // 3 components, 12 bits put on 16 bits
188  len = avctx->width*6;
189  size = FFALIGN(len, 4);
190  need_align = size - len;
191  size *= avctx->height;
192  } else {
193  // N components, M bits
194  len = avctx->width * s->num_components * s->bits_per_component >> 3;
195  size = FFALIGN(len, 4);
196  need_align = size - len;
197  size *= avctx->height;
198  }
199  if ((ret = ff_alloc_packet2(avctx, pkt, size + HEADER_SIZE, 0)) < 0)
200  return ret;
201  buf = pkt->data;
202 
203  memset(buf, 0, HEADER_SIZE);
204 
205  /* File information header */
206  write32(buf, MKBETAG('S','D','P','X'));
207  write32(buf + 4, HEADER_SIZE);
208  memcpy (buf + 8, "V1.0", 4);
209  write32(buf + 20, 1); /* new image */
210  write32(buf + 24, HEADER_SIZE);
211  if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT))
212  memcpy (buf + 160, LIBAVCODEC_IDENT, FFMIN(sizeof(LIBAVCODEC_IDENT), 100));
213  write32(buf + 660, 0xFFFFFFFF); /* unencrypted */
214 
215  /* Image information header */
216  write16(buf + 768, 0); /* orientation; left to right, top to bottom */
217  write16(buf + 770, 1); /* number of elements */
218  write32(buf + 772, avctx->width);
219  write32(buf + 776, avctx->height);
220  buf[800] = s->descriptor;
221  buf[801] = 2; /* linear transfer */
222  buf[802] = 2; /* linear colorimetric */
223  buf[803] = s->bits_per_component;
224  write16(buf + 804, (s->bits_per_component == 10 || s->bits_per_component == 12) ?
225  1 : 0); /* packing method */
226  write32(buf + 808, HEADER_SIZE); /* data offset */
227 
228  /* Image source information header */
229  write32(buf + 1628, avctx->sample_aspect_ratio.num);
230  write32(buf + 1632, avctx->sample_aspect_ratio.den);
231 
232  switch(s->bits_per_component) {
233  case 8:
234  case 16:
235  if (need_align) {
236  int j;
237  const uint8_t *src = frame->data[0];
238  uint8_t *dst = pkt->data + HEADER_SIZE;
239  size = (len + need_align) * avctx->height;
240  for (j=0; j<avctx->height; j++) {
241  memcpy(dst, src, len);
242  memset(dst + len, 0, need_align);
243  dst += len + need_align;
244  src += frame->linesize[0];
245  }
246  } else {
248  (const uint8_t**)frame->data, frame->linesize,
249  avctx->pix_fmt,
250  avctx->width, avctx->height, 1);
251  }
252  if (size < 0)
253  return size;
254  break;
255  case 10:
256  if (s->planar)
257  encode_gbrp10(avctx, frame, buf + HEADER_SIZE);
258  else
259  encode_rgb48_10bit(avctx, frame, buf + HEADER_SIZE);
260  break;
261  case 12:
262  encode_gbrp12(avctx, frame, (uint16_t*)(buf + HEADER_SIZE));
263  break;
264  default:
265  av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", s->bits_per_component);
266  return -1;
267  }
268 
269  size += HEADER_SIZE;
270 
271  write32(buf + 16, size); /* file size */
272 
274  *got_packet = 1;
275 
276  return 0;
277 }
278 
280  .name = "dpx",
281  .long_name = NULL_IF_CONFIG_SMALL("DPX (Digital Picture Exchange) image"),
282  .type = AVMEDIA_TYPE_VIDEO,
283  .id = AV_CODEC_ID_DPX,
284  .priv_data_size = sizeof(DPXContext),
285  .init = encode_init,
286  .encode2 = encode_frame,
287  .pix_fmts = (const enum AVPixelFormat[]){
296 };
AVCodec
AVCodec.
Definition: codec.h:190
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_CODEC_ID_DPX
@ AV_CODEC_ID_DPX
Definition: codec_id.h:177
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:42
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AV_PIX_FMT_GBRP10BE
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:172
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
write16
#define write16(p, value)
Definition: dpxenc.c:90
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
DPXContext::descriptor
int descriptor
Definition: dpxenc.c:32
AV_RB16
#define AV_RB16
Definition: intreadwrite.h:53
write16_internal
static av_always_inline void write16_internal(int big_endian, void *p, int value)
Definition: dpxenc.c:78
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
DPXContext::bits_per_component
int bits_per_component
Definition: dpxenc.c:30
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: dpxenc.c:176
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
U
#define U(x)
Definition: vp56_arith.h:37
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
ff_dpx_encoder
AVCodec ff_dpx_encoder
Definition: dpxenc.c:279
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
AVRational::num
int num
Numerator.
Definition: rational.h:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
write32_internal
static av_always_inline void write32_internal(int big_endian, void *p, int value)
Definition: dpxenc.c:84
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRP12LE
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
Definition: pixfmt.h:255
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:177
write32
#define write32(p, value)
Definition: dpxenc.c:91
AV_RL16
#define AV_RL16
Definition: intreadwrite.h:42
AV_PIX_FMT_GBRP10LE
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:173
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
DPXContext::planar
int planar
Definition: dpxenc.c:33
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
src
#define src
Definition: vp8dsp.c:254
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
encode_gbrp12
static void encode_gbrp12(AVCodecContext *avctx, const AVFrame *pic, uint16_t *dst)
Definition: dpxenc.c:145
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DPXContext::num_components
int num_components
Definition: dpxenc.c:31
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
DPXContext
Definition: dpxenc.c:28
desc
const char * desc
Definition: nvenc.c:79
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
size
int size
Definition: twinvq_data.h:11134
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: common.h:407
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
HEADER_SIZE
#define HEADER_SIZE
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
common.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:128
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_GBRP12BE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
Definition: pixfmt.h:254
AVCodecContext
main external API structure.
Definition: avcodec.h:526
encode_gbrp10
static void encode_gbrp10(AVCodecContext *avctx, const AVFrame *pic, uint8_t *dst)
Definition: dpxenc.c:119
encode_rgb48_10bit
static void encode_rgb48_10bit(AVCodecContext *avctx, const AVFrame *pic, uint8_t *dst)
Definition: dpxenc.c:93
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: dpxenc.c:36
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:453
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
DPXContext::big_endian
int big_endian
Definition: dpxenc.c:29