FFmpeg  4.3
rtpdec_jpeg.c
Go to the documentation of this file.
1 /*
2  * RTP JPEG-compressed Video Depacketizer, RFC 2435
3  * Copyright (c) 2012 Samuel Pitoiset
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "avformat.h"
23 #include "avio_internal.h"
24 #include "rtpdec.h"
25 #include "rtpdec_formats.h"
26 #include "libavutil/intreadwrite.h"
27 #include "libavcodec/jpegtables.h"
28 #include "libavcodec/mjpeg.h"
29 #include "libavcodec/bytestream.h"
30 
31 /**
32  * RTP/JPEG specific private data.
33  */
34 struct PayloadContext {
35  AVIOContext *frame; ///< current frame buffer
36  uint32_t timestamp; ///< current frame timestamp
37  int hdr_size; ///< size of the current frame header
38  uint8_t qtables[128][128];
40 };
41 
42 static const uint8_t default_quantizers[128] = {
43  /* luma table */
44  16, 11, 12, 14, 12, 10, 16, 14,
45  13, 14, 18, 17, 16, 19, 24, 40,
46  26, 24, 22, 22, 24, 49, 35, 37,
47  29, 40, 58, 51, 61, 60, 57, 51,
48  56, 55, 64, 72, 92, 78, 64, 68,
49  87, 69, 55, 56, 80, 109, 81, 87,
50  95, 98, 103, 104, 103, 62, 77, 113,
51  121, 112, 100, 120, 92, 101, 103, 99,
52 
53  /* chroma table */
54  17, 18, 18, 24, 21, 24, 47, 26,
55  26, 47, 99, 66, 56, 66, 99, 99,
56  99, 99, 99, 99, 99, 99, 99, 99,
57  99, 99, 99, 99, 99, 99, 99, 99,
58  99, 99, 99, 99, 99, 99, 99, 99,
59  99, 99, 99, 99, 99, 99, 99, 99,
60  99, 99, 99, 99, 99, 99, 99, 99,
61  99, 99, 99, 99, 99, 99, 99, 99
62 };
63 
65 {
66  ffio_free_dyn_buf(&jpeg->frame);
67 }
68 
69 static int jpeg_create_huffman_table(PutByteContext *p, int table_class,
70  int table_id, const uint8_t *bits_table,
71  const uint8_t *value_table)
72 {
73  int i, n = 0;
74 
75  bytestream2_put_byte(p, table_class << 4 | table_id);
76 
77  for (i = 1; i <= 16; i++) {
78  n += bits_table[i];
79  bytestream2_put_byte(p, bits_table[i]);
80  }
81 
82  for (i = 0; i < n; i++) {
83  bytestream2_put_byte(p, value_table[i]);
84  }
85  return n + 17;
86 }
87 
88 static void jpeg_put_marker(PutByteContext *pbc, int code)
89 {
90  bytestream2_put_byte(pbc, 0xff);
91  bytestream2_put_byte(pbc, code);
92 }
93 
94 static int jpeg_create_header(uint8_t *buf, int size, uint32_t type, uint32_t w,
95  uint32_t h, const uint8_t *qtable, int nb_qtable,
96  int dri)
97 {
98  PutByteContext pbc;
99  uint8_t *dht_size_ptr;
100  int dht_size, i;
101 
102  bytestream2_init_writer(&pbc, buf, size);
103 
104  /* Convert from blocks to pixels. */
105  w <<= 3;
106  h <<= 3;
107 
108  /* SOI */
109  jpeg_put_marker(&pbc, SOI);
110 
111  /* JFIF header */
112  jpeg_put_marker(&pbc, APP0);
113  bytestream2_put_be16(&pbc, 16);
114  bytestream2_put_buffer(&pbc, "JFIF", 5);
115  bytestream2_put_be16(&pbc, 0x0201);
116  bytestream2_put_byte(&pbc, 0);
117  bytestream2_put_be16(&pbc, 1);
118  bytestream2_put_be16(&pbc, 1);
119  bytestream2_put_byte(&pbc, 0);
120  bytestream2_put_byte(&pbc, 0);
121 
122  if (dri) {
123  jpeg_put_marker(&pbc, DRI);
124  bytestream2_put_be16(&pbc, 4);
125  bytestream2_put_be16(&pbc, dri);
126  }
127 
128  /* DQT */
129  jpeg_put_marker(&pbc, DQT);
130  bytestream2_put_be16(&pbc, 2 + nb_qtable * (1 + 64));
131 
132  for (i = 0; i < nb_qtable; i++) {
133  bytestream2_put_byte(&pbc, i);
134 
135  /* Each table is an array of 64 values given in zig-zag
136  * order, identical to the format used in a JFIF DQT
137  * marker segment. */
138  bytestream2_put_buffer(&pbc, qtable + 64 * i, 64);
139  }
140 
141  /* DHT */
142  jpeg_put_marker(&pbc, DHT);
143  dht_size_ptr = pbc.buffer;
144  bytestream2_put_be16(&pbc, 0);
145 
146  dht_size = 2;
155  AV_WB16(dht_size_ptr, dht_size);
156 
157  /* SOF0 */
158  jpeg_put_marker(&pbc, SOF0);
159  bytestream2_put_be16(&pbc, 17); /* size */
160  bytestream2_put_byte(&pbc, 8); /* bits per component */
161  bytestream2_put_be16(&pbc, h);
162  bytestream2_put_be16(&pbc, w);
163  bytestream2_put_byte(&pbc, 3); /* number of components */
164  bytestream2_put_byte(&pbc, 1); /* component number */
165  bytestream2_put_byte(&pbc, (2 << 4) | (type ? 2 : 1)); /* hsample/vsample */
166  bytestream2_put_byte(&pbc, 0); /* matrix number */
167  bytestream2_put_byte(&pbc, 2); /* component number */
168  bytestream2_put_byte(&pbc, 1 << 4 | 1); /* hsample/vsample */
169  bytestream2_put_byte(&pbc, nb_qtable == 2 ? 1 : 0); /* matrix number */
170  bytestream2_put_byte(&pbc, 3); /* component number */
171  bytestream2_put_byte(&pbc, 1 << 4 | 1); /* hsample/vsample */
172  bytestream2_put_byte(&pbc, nb_qtable == 2 ? 1 : 0); /* matrix number */
173 
174  /* SOS */
175  jpeg_put_marker(&pbc, SOS);
176  bytestream2_put_be16(&pbc, 12);
177  bytestream2_put_byte(&pbc, 3);
178  bytestream2_put_byte(&pbc, 1);
179  bytestream2_put_byte(&pbc, 0);
180  bytestream2_put_byte(&pbc, 2);
181  bytestream2_put_byte(&pbc, 17);
182  bytestream2_put_byte(&pbc, 3);
183  bytestream2_put_byte(&pbc, 17);
184  bytestream2_put_byte(&pbc, 0);
185  bytestream2_put_byte(&pbc, 63);
186  bytestream2_put_byte(&pbc, 0);
187 
188  /* Return the length in bytes of the JPEG header. */
189  return bytestream2_tell_p(&pbc);
190 }
191 
192 static void create_default_qtables(uint8_t *qtables, uint8_t q)
193 {
194  int factor = q;
195  int i;
196  uint16_t S;
197 
198  factor = av_clip(q, 1, 99);
199 
200  if (q < 50)
201  S = 5000 / factor;
202  else
203  S = 200 - factor * 2;
204 
205  for (i = 0; i < 128; i++) {
206  int val = (default_quantizers[i] * S + 50) / 100;
207 
208  /* Limit the quantizers to 1 <= q <= 255. */
209  val = av_clip(val, 1, 255);
210  qtables[i] = val;
211  }
212 }
213 
215  AVStream *st, AVPacket *pkt, uint32_t *timestamp,
216  const uint8_t *buf, int len, uint16_t seq,
217  int flags)
218 {
219  uint8_t type, q, width, height;
220  const uint8_t *qtables = NULL;
221  uint16_t qtable_len;
222  uint32_t off;
223  int ret, dri = 0;
224 
225  if (len < 8) {
226  av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
227  return AVERROR_INVALIDDATA;
228  }
229 
230  /* Parse the main JPEG header. */
231  off = AV_RB24(buf + 1); /* fragment byte offset */
232  type = AV_RB8(buf + 4); /* id of jpeg decoder params */
233  q = AV_RB8(buf + 5); /* quantization factor (or table id) */
234  width = AV_RB8(buf + 6); /* frame width in 8 pixel blocks */
235  height = AV_RB8(buf + 7); /* frame height in 8 pixel blocks */
236  buf += 8;
237  len -= 8;
238 
239  if (type & 0x40) {
240  if (len < 4) {
241  av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
242  return AVERROR_INVALIDDATA;
243  }
244  dri = AV_RB16(buf);
245  buf += 4;
246  len -= 4;
247  type &= ~0x40;
248  }
249  if (type > 1) {
250  avpriv_report_missing_feature(ctx, "RTP/JPEG type %"PRIu8, type);
251  return AVERROR_PATCHWELCOME;
252  }
253 
254  /* Parse the quantization table header. */
255  if (off == 0) {
256  /* Start of JPEG data packet. */
257  uint8_t new_qtables[128];
258  uint8_t hdr[1024];
259 
260  if (q > 127) {
261  uint8_t precision;
262  if (len < 4) {
263  av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
264  return AVERROR_INVALIDDATA;
265  }
266 
267  /* The first byte is reserved for future use. */
268  precision = AV_RB8(buf + 1); /* size of coefficients */
269  qtable_len = AV_RB16(buf + 2); /* length in bytes */
270  buf += 4;
271  len -= 4;
272 
273  if (precision)
274  av_log(ctx, AV_LOG_WARNING, "Only 8-bit precision is supported.\n");
275 
276  if (qtable_len > 0) {
277  if (len < qtable_len) {
278  av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
279  return AVERROR_INVALIDDATA;
280  }
281  qtables = buf;
282  buf += qtable_len;
283  len -= qtable_len;
284  if (q < 255) {
285  if (jpeg->qtables_len[q - 128] &&
286  (jpeg->qtables_len[q - 128] != qtable_len ||
287  memcmp(qtables, &jpeg->qtables[q - 128][0], qtable_len))) {
289  "Quantization tables for q=%d changed\n", q);
290  } else if (!jpeg->qtables_len[q - 128] && qtable_len <= 128) {
291  memcpy(&jpeg->qtables[q - 128][0], qtables,
292  qtable_len);
293  jpeg->qtables_len[q - 128] = qtable_len;
294  }
295  }
296  } else {
297  if (q == 255) {
299  "Invalid RTP/JPEG packet. Quantization tables not found.\n");
300  return AVERROR_INVALIDDATA;
301  }
302  if (!jpeg->qtables_len[q - 128]) {
304  "No quantization tables known for q=%d yet.\n", q);
305  return AVERROR_INVALIDDATA;
306  }
307  qtables = &jpeg->qtables[q - 128][0];
308  qtable_len = jpeg->qtables_len[q - 128];
309  }
310  } else { /* q <= 127 */
311  if (q == 0 || q > 99) {
312  av_log(ctx, AV_LOG_ERROR, "Reserved q value %d\n", q);
313  return AVERROR_INVALIDDATA;
314  }
315  create_default_qtables(new_qtables, q);
316  qtables = new_qtables;
317  qtable_len = sizeof(new_qtables);
318  }
319 
320  /* Skip the current frame in case of the end packet
321  * has been lost somewhere. */
322  ffio_free_dyn_buf(&jpeg->frame);
323 
324  if ((ret = avio_open_dyn_buf(&jpeg->frame)) < 0)
325  return ret;
326  jpeg->timestamp = *timestamp;
327 
328  /* Generate a frame and scan headers that can be prepended to the
329  * RTP/JPEG data payload to produce a JPEG compressed image in
330  * interchange format. */
331  jpeg->hdr_size = jpeg_create_header(hdr, sizeof(hdr), type, width,
332  height, qtables,
333  qtable_len / 64, dri);
334 
335  /* Copy JPEG header to frame buffer. */
336  avio_write(jpeg->frame, hdr, jpeg->hdr_size);
337  }
338 
339  if (!jpeg->frame) {
341  "Received packet without a start chunk; dropping frame.\n");
342  return AVERROR(EAGAIN);
343  }
344 
345  if (jpeg->timestamp != *timestamp) {
346  /* Skip the current frame if timestamp is incorrect.
347  * A start packet has been lost somewhere. */
348  ffio_free_dyn_buf(&jpeg->frame);
349  av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match.\n");
350  return AVERROR_INVALIDDATA;
351  }
352 
353  if (off != avio_tell(jpeg->frame) - jpeg->hdr_size) {
355  "Missing packets; dropping frame.\n");
356  return AVERROR(EAGAIN);
357  }
358 
359  /* Copy data to frame buffer. */
360  avio_write(jpeg->frame, buf, len);
361 
362  if (flags & RTP_FLAG_MARKER) {
363  /* End of JPEG data packet. */
364  uint8_t buf[2] = { 0xff, EOI };
365 
366  /* Put EOI marker. */
367  avio_write(jpeg->frame, buf, sizeof(buf));
368 
369  /* Prepare the JPEG packet. */
370  if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) {
372  "Error occurred when getting frame buffer.\n");
373  return ret;
374  }
375 
376  return 0;
377  }
378 
379  return AVERROR(EAGAIN);
380 }
381 
383  .enc_name = "JPEG",
384  .codec_type = AVMEDIA_TYPE_VIDEO,
385  .codec_id = AV_CODEC_ID_MJPEG,
386  .priv_data_size = sizeof(PayloadContext),
387  .close = jpeg_close_context,
389  .static_payload_id = 26,
390 };
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
jpegtables.h
mjpeg.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
SOS
@ SOS
Definition: mjpeg.h:72
rtpdec_formats.h
SOF0
@ SOF0
Definition: mjpeg.h:39
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
RTP_FLAG_MARKER
#define RTP_FLAG_MARKER
RTP marker bit was set for this packet.
Definition: rtpdec.h:93
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
AV_RB16
#define AV_RB16
Definition: intreadwrite.h:53
AV_RB8
#define AV_RB8(x)
Definition: intreadwrite.h:395
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
ff_rtp_finalize_packet
int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx)
Close the dynamic buffer and make a packet from it.
Definition: rtpdec.c:925
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PayloadContext::timestamp
uint32_t timestamp
current frame timestamp
Definition: rtpdec_ac3.c:31
RTPDynamicProtocolHandler::enc_name
const char * enc_name
Definition: rtpdec.h:116
jpeg_close_context
static void jpeg_close_context(PayloadContext *jpeg)
Definition: rtpdec_jpeg.c:64
ff_jpeg_dynamic_handler
const RTPDynamicProtocolHandler ff_jpeg_dynamic_handler
Definition: rtpdec_jpeg.c:382
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
PayloadContext::frame
AVIOContext * frame
current frame buffer
Definition: rtpdec_jpeg.c:35
val
static double val(void *priv, double ch)
Definition: aeval.c:76
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_RB24
#define AV_RB24
Definition: intreadwrite.h:64
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
jpeg_put_marker
static void jpeg_put_marker(PutByteContext *pbc, int code)
Definition: rtpdec_jpeg.c:88
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
avio_open_dyn_buf
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:1356
PayloadContext::qtables_len
uint8_t qtables_len[128]
Definition: rtpdec_jpeg.c:39
width
#define width
intreadwrite.h
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
ctx
AVFormatContext * ctx
Definition: movenc.c:48
parse_packet
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index, int flush)
Parse a packet, add all split parts to parse_queue.
Definition: utils.c:1443
AVFormatContext
Format I/O context.
Definition: avformat.h:1335
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
create_default_qtables
static void create_default_qtables(uint8_t *qtables, uint8_t q)
Definition: rtpdec_jpeg.c:192
PayloadContext::qtables
uint8_t qtables[128][128]
Definition: rtpdec_jpeg.c:38
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
rtpdec.h
PutByteContext
Definition: bytestream.h:37
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
PutByteContext::buffer
uint8_t * buffer
Definition: bytestream.h:38
jpeg_create_header
static int jpeg_create_header(uint8_t *buf, int size, uint32_t type, uint32_t w, uint32_t h, const uint8_t *qtable, int nb_qtable, int dri)
Definition: rtpdec_jpeg.c:94
size
int size
Definition: twinvq_data.h:11134
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:213
height
#define height
DQT
@ DQT
Definition: mjpeg.h:73
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
avio_internal.h
uint8_t
uint8_t
Definition: audio_convert.c:194
len
int len
Definition: vorbis_enc_data.h:452
DHT
@ DHT
Definition: mjpeg.h:56
ffio_free_dyn_buf
void ffio_free_dyn_buf(AVIOContext **s)
Free a dynamic buffer.
Definition: aviobuf.c:1431
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:865
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
avformat.h
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:866
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
factor
static const int factor[16]
Definition: vf_pp7.c:75
APP0
@ APP0
Definition: mjpeg.h:79
PayloadContext::hdr_size
int hdr_size
size of the current frame header
Definition: rtpdec_jpeg.c:37
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
SOI
@ SOI
Definition: mjpeg.h:70
AVPacket
This structure stores compressed data.
Definition: packet.h:332
default_quantizers
static const uint8_t default_quantizers[128]
Definition: rtpdec_jpeg.c:42
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:564
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
PayloadContext
RTP/JPEG specific private data.
Definition: rdt.c:83
jpeg_parse_packet
static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags)
Definition: rtpdec_jpeg.c:214
RTPDynamicProtocolHandler
Definition: rtpdec.h:115
jpeg_create_huffman_table
static int jpeg_create_huffman_table(PutByteContext *p, int table_class, int table_id, const uint8_t *bits_table, const uint8_t *value_table)
Definition: rtpdec_jpeg.c:69