FFmpeg  4.3
hapdec.c
Go to the documentation of this file.
1 /*
2  * Vidvox Hap decoder
3  * Copyright (C) 2015 Vittorio Giovara <vittorio.giovara@gmail.com>
4  * Copyright (C) 2015 Tom Butterworth <bangnoise@gmail.com>
5  *
6  * HapQA and HAPAlphaOnly added by Jokyo Images
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * Hap decoder
28  *
29  * Fourcc: Hap1, Hap5, HapY, HapA, HapM
30  *
31  * https://github.com/Vidvox/hap/blob/master/documentation/HapVideoDRAFT.md
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/imgutils.h"
37 
38 #include "avcodec.h"
39 #include "bytestream.h"
40 #include "hap.h"
41 #include "internal.h"
42 #include "snappy.h"
43 #include "texturedsp.h"
44 #include "thread.h"
45 
47 {
48  GetByteContext *gbc = &ctx->gbc;
49  int section_size;
50  enum HapSectionType section_type;
51  int is_first_table = 1, had_offsets = 0, had_compressors = 0, had_sizes = 0;
52  int i, ret;
53 
54  while (size > 0) {
55  int stream_remaining = bytestream2_get_bytes_left(gbc);
56  ret = ff_hap_parse_section_header(gbc, &section_size, &section_type);
57  if (ret != 0)
58  return ret;
59 
60  size -= stream_remaining - bytestream2_get_bytes_left(gbc);
61 
62  switch (section_type) {
64  ret = ff_hap_set_chunk_count(ctx, section_size, is_first_table);
65  if (ret != 0)
66  return ret;
67  for (i = 0; i < section_size; i++) {
68  ctx->chunks[i].compressor = bytestream2_get_byte(gbc) << 4;
69  }
70  had_compressors = 1;
71  is_first_table = 0;
72  break;
73  case HAP_ST_SIZE_TABLE:
74  ret = ff_hap_set_chunk_count(ctx, section_size / 4, is_first_table);
75  if (ret != 0)
76  return ret;
77  for (i = 0; i < section_size / 4; i++) {
78  ctx->chunks[i].compressed_size = bytestream2_get_le32(gbc);
79  }
80  had_sizes = 1;
81  is_first_table = 0;
82  break;
84  ret = ff_hap_set_chunk_count(ctx, section_size / 4, is_first_table);
85  if (ret != 0)
86  return ret;
87  for (i = 0; i < section_size / 4; i++) {
88  ctx->chunks[i].compressed_offset = bytestream2_get_le32(gbc);
89  }
90  had_offsets = 1;
91  is_first_table = 0;
92  break;
93  default:
94  break;
95  }
96  size -= section_size;
97  }
98 
99  if (!had_sizes || !had_compressors)
100  return AVERROR_INVALIDDATA;
101 
102  /* The offsets table is optional. If not present than calculate offsets by
103  * summing the sizes of preceding chunks. */
104  if (!had_offsets) {
105  size_t running_size = 0;
106  for (i = 0; i < ctx->chunk_count; i++) {
107  ctx->chunks[i].compressed_offset = running_size;
108  running_size += ctx->chunks[i].compressed_size;
109  }
110  }
111 
112  return 0;
113 }
114 
116 {
117  int i;
118  size_t running_offset = 0;
119  for (i = 0; i < ctx->chunk_count; i++) {
120  if (ctx->chunks[i].compressed_offset != running_offset
121  || ctx->chunks[i].compressor != HAP_COMP_NONE)
122  return 0;
123  running_offset += ctx->chunks[i].compressed_size;
124  }
125  return 1;
126 }
127 
129 {
130  HapContext *ctx = avctx->priv_data;
131  GetByteContext *gbc = &ctx->gbc;
132  int section_size;
133  enum HapSectionType section_type;
134  const char *compressorstr;
135  int i, ret;
136 
137  ret = ff_hap_parse_section_header(gbc, &ctx->texture_section_size, &section_type);
138  if (ret != 0)
139  return ret;
140 
141  if ((avctx->codec_tag == MKTAG('H','a','p','1') && (section_type & 0x0F) != HAP_FMT_RGBDXT1) ||
142  (avctx->codec_tag == MKTAG('H','a','p','5') && (section_type & 0x0F) != HAP_FMT_RGBADXT5) ||
143  (avctx->codec_tag == MKTAG('H','a','p','Y') && (section_type & 0x0F) != HAP_FMT_YCOCGDXT5) ||
144  (avctx->codec_tag == MKTAG('H','a','p','A') && (section_type & 0x0F) != HAP_FMT_RGTC1) ||
145  ((avctx->codec_tag == MKTAG('H','a','p','M') && (section_type & 0x0F) != HAP_FMT_RGTC1) &&
146  (section_type & 0x0F) != HAP_FMT_YCOCGDXT5)) {
147  av_log(avctx, AV_LOG_ERROR,
148  "Invalid texture format %#04x.\n", section_type & 0x0F);
149  return AVERROR_INVALIDDATA;
150  }
151 
152  switch (section_type & 0xF0) {
153  case HAP_COMP_NONE:
154  case HAP_COMP_SNAPPY:
155  ret = ff_hap_set_chunk_count(ctx, 1, 1);
156  if (ret == 0) {
157  ctx->chunks[0].compressor = section_type & 0xF0;
158  ctx->chunks[0].compressed_offset = 0;
159  ctx->chunks[0].compressed_size = ctx->texture_section_size;
160  }
161  if (ctx->chunks[0].compressor == HAP_COMP_NONE) {
162  compressorstr = "none";
163  } else {
164  compressorstr = "snappy";
165  }
166  break;
167  case HAP_COMP_COMPLEX:
168  ret = ff_hap_parse_section_header(gbc, &section_size, &section_type);
169  if (ret == 0 && section_type != HAP_ST_DECODE_INSTRUCTIONS)
171  if (ret == 0)
172  ret = hap_parse_decode_instructions(ctx, section_size);
173  compressorstr = "complex";
174  break;
175  default:
177  break;
178  }
179 
180  if (ret != 0)
181  return ret;
182 
183  /* Check the frame is valid and read the uncompressed chunk sizes */
184  ctx->tex_size = 0;
185  for (i = 0; i < ctx->chunk_count; i++) {
186  HapChunk *chunk = &ctx->chunks[i];
187 
188  /* Check the compressed buffer is valid */
190  return AVERROR_INVALIDDATA;
191 
192  /* Chunks are unpacked sequentially, ctx->tex_size is the uncompressed
193  * size thus far */
194  chunk->uncompressed_offset = ctx->tex_size;
195 
196  /* Fill out uncompressed size */
197  if (chunk->compressor == HAP_COMP_SNAPPY) {
198  GetByteContext gbc_tmp;
199  int64_t uncompressed_size;
200  bytestream2_init(&gbc_tmp, gbc->buffer + chunk->compressed_offset,
201  chunk->compressed_size);
202  uncompressed_size = ff_snappy_peek_uncompressed_length(&gbc_tmp);
203  if (uncompressed_size < 0) {
204  return uncompressed_size;
205  }
206  chunk->uncompressed_size = uncompressed_size;
207  } else if (chunk->compressor == HAP_COMP_NONE) {
208  chunk->uncompressed_size = chunk->compressed_size;
209  } else {
210  return AVERROR_INVALIDDATA;
211  }
212  ctx->tex_size += chunk->uncompressed_size;
213  }
214 
215  av_log(avctx, AV_LOG_DEBUG, "%s compressor\n", compressorstr);
216 
217  return ret;
218 }
219 
220 static int decompress_chunks_thread(AVCodecContext *avctx, void *arg,
221  int chunk_nb, int thread_nb)
222 {
223  HapContext *ctx = avctx->priv_data;
224 
225  HapChunk *chunk = &ctx->chunks[chunk_nb];
226  GetByteContext gbc;
227  uint8_t *dst = ctx->tex_buf + chunk->uncompressed_offset;
228 
229  bytestream2_init(&gbc, ctx->gbc.buffer + chunk->compressed_offset, chunk->compressed_size);
230 
231  if (chunk->compressor == HAP_COMP_SNAPPY) {
232  int ret;
233  int64_t uncompressed_size = ctx->tex_size;
234 
235  /* Uncompress the frame */
236  ret = ff_snappy_uncompress(&gbc, dst, &uncompressed_size);
237  if (ret < 0) {
238  av_log(avctx, AV_LOG_ERROR, "Snappy uncompress error\n");
239  return ret;
240  }
241  } else if (chunk->compressor == HAP_COMP_NONE) {
242  bytestream2_get_buffer(&gbc, dst, chunk->compressed_size);
243  }
244 
245  return 0;
246 }
247 
249  int slice, int thread_nb, int texture_num)
250 {
251  HapContext *ctx = avctx->priv_data;
252  AVFrame *frame = arg;
253  const uint8_t *d = ctx->tex_data;
254  int w_block = avctx->coded_width / TEXTURE_BLOCK_W;
255  int h_block = avctx->coded_height / TEXTURE_BLOCK_H;
256  int x, y;
257  int start_slice, end_slice;
258  int base_blocks_per_slice = h_block / ctx->slice_count;
259  int remainder_blocks = h_block % ctx->slice_count;
260 
261  /* When the frame height (in blocks) doesn't divide evenly between the
262  * number of slices, spread the remaining blocks evenly between the first
263  * operations */
264  start_slice = slice * base_blocks_per_slice;
265  /* Add any extra blocks (one per slice) that have been added before this slice */
266  start_slice += FFMIN(slice, remainder_blocks);
267 
268  end_slice = start_slice + base_blocks_per_slice;
269  /* Add an extra block if there are still remainder blocks to be accounted for */
270  if (slice < remainder_blocks)
271  end_slice++;
272 
273  for (y = start_slice; y < end_slice; y++) {
274  uint8_t *p = frame->data[0] + y * frame->linesize[0] * TEXTURE_BLOCK_H;
275  int off = y * w_block;
276  for (x = 0; x < w_block; x++) {
277  if (texture_num == 0) {
278  ctx->tex_fun(p + x * 4 * ctx->uncompress_pix_size, frame->linesize[0],
279  d + (off + x) * ctx->tex_rat);
280  } else {
281  ctx->tex_fun2(p + x * 4 * ctx->uncompress_pix_size, frame->linesize[0],
282  d + (off + x) * ctx->tex_rat2);
283  }
284  }
285  }
286 
287  return 0;
288 }
289 
291  int slice, int thread_nb)
292 {
293  return decompress_texture_thread_internal(avctx, arg, slice, thread_nb, 0);
294 }
295 
297  int slice, int thread_nb)
298 {
299  return decompress_texture_thread_internal(avctx, arg, slice, thread_nb, 1);
300 }
301 
302 static int hap_decode(AVCodecContext *avctx, void *data,
303  int *got_frame, AVPacket *avpkt)
304 {
305  HapContext *ctx = avctx->priv_data;
306  ThreadFrame tframe;
307  int ret, i, t;
308  int section_size;
309  enum HapSectionType section_type;
310  int start_texture_section = 0;
311  int tex_rat[2] = {0, 0};
312 
313  bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);
314 
315  tex_rat[0] = ctx->tex_rat;
316 
317  /* check for multi texture header */
318  if (ctx->texture_count == 2) {
319  ret = ff_hap_parse_section_header(&ctx->gbc, &section_size, &section_type);
320  if (ret != 0)
321  return ret;
322  if ((section_type & 0x0F) != 0x0D) {
323  av_log(avctx, AV_LOG_ERROR, "Invalid section type in 2 textures mode %#04x.\n", section_type);
324  return AVERROR_INVALIDDATA;
325  }
326  start_texture_section = 4;
327  tex_rat[1] = ctx->tex_rat2;
328  }
329 
330  /* Get the output frame ready to receive data */
331  tframe.f = data;
332  ret = ff_thread_get_buffer(avctx, &tframe, 0);
333  if (ret < 0)
334  return ret;
335 
336  for (t = 0; t < ctx->texture_count; t++) {
337  bytestream2_seek(&ctx->gbc, start_texture_section, SEEK_SET);
338 
339  /* Check for section header */
340  ret = hap_parse_frame_header(avctx);
341  if (ret < 0)
342  return ret;
343 
344  if (ctx->tex_size != (avctx->coded_width / TEXTURE_BLOCK_W)
345  *(avctx->coded_height / TEXTURE_BLOCK_H)
346  *tex_rat[t]) {
347  av_log(avctx, AV_LOG_ERROR, "uncompressed size mismatches\n");
348  return AVERROR_INVALIDDATA;
349  }
350 
351  start_texture_section += ctx->texture_section_size + 4;
352 
353  if (avctx->codec->update_thread_context)
354  ff_thread_finish_setup(avctx);
355 
356  /* Unpack the DXT texture */
358  int tex_size;
359  /* Only DXTC texture compression in a contiguous block */
360  ctx->tex_data = ctx->gbc.buffer;
361  tex_size = FFMIN(ctx->texture_section_size, bytestream2_get_bytes_left(&ctx->gbc));
362  if (tex_size < (avctx->coded_width / TEXTURE_BLOCK_W)
363  *(avctx->coded_height / TEXTURE_BLOCK_H)
364  *tex_rat[t]) {
365  av_log(avctx, AV_LOG_ERROR, "Insufficient data\n");
366  return AVERROR_INVALIDDATA;
367  }
368  } else {
369  /* Perform the second-stage decompression */
370  ret = av_reallocp(&ctx->tex_buf, ctx->tex_size);
371  if (ret < 0)
372  return ret;
373 
374  avctx->execute2(avctx, decompress_chunks_thread, NULL,
375  ctx->chunk_results, ctx->chunk_count);
376 
377  for (i = 0; i < ctx->chunk_count; i++) {
378  if (ctx->chunk_results[i] < 0)
379  return ctx->chunk_results[i];
380  }
381 
382  ctx->tex_data = ctx->tex_buf;
383  }
384 
385  /* Use the decompress function on the texture, one block per thread */
386  if (t == 0){
387  avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);
388  } else{
389  tframe.f = data;
390  avctx->execute2(avctx, decompress_texture2_thread, tframe.f, NULL, ctx->slice_count);
391  }
392  }
393 
394  /* Frame is ready to be output */
395  tframe.f->pict_type = AV_PICTURE_TYPE_I;
396  tframe.f->key_frame = 1;
397  *got_frame = 1;
398 
399  return avpkt->size;
400 }
401 
402 static av_cold int hap_init(AVCodecContext *avctx)
403 {
404  HapContext *ctx = avctx->priv_data;
405  const char *texture_name;
406  int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
407 
408  if (ret < 0) {
409  av_log(avctx, AV_LOG_ERROR, "Invalid video size %dx%d.\n",
410  avctx->width, avctx->height);
411  return ret;
412  }
413 
414  /* Since codec is based on 4x4 blocks, size is aligned to 4 */
415  avctx->coded_width = FFALIGN(avctx->width, TEXTURE_BLOCK_W);
416  avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);
417 
418  ff_texturedsp_init(&ctx->dxtc);
419 
420  ctx->texture_count = 1;
421  ctx->uncompress_pix_size = 4;
422 
423  switch (avctx->codec_tag) {
424  case MKTAG('H','a','p','1'):
425  texture_name = "DXT1";
426  ctx->tex_rat = 8;
427  ctx->tex_fun = ctx->dxtc.dxt1_block;
428  avctx->pix_fmt = AV_PIX_FMT_RGB0;
429  break;
430  case MKTAG('H','a','p','5'):
431  texture_name = "DXT5";
432  ctx->tex_rat = 16;
433  ctx->tex_fun = ctx->dxtc.dxt5_block;
434  avctx->pix_fmt = AV_PIX_FMT_RGBA;
435  break;
436  case MKTAG('H','a','p','Y'):
437  texture_name = "DXT5-YCoCg-scaled";
438  ctx->tex_rat = 16;
439  ctx->tex_fun = ctx->dxtc.dxt5ys_block;
440  avctx->pix_fmt = AV_PIX_FMT_RGB0;
441  break;
442  case MKTAG('H','a','p','A'):
443  texture_name = "RGTC1";
444  ctx->tex_rat = 8;
445  ctx->tex_fun = ctx->dxtc.rgtc1u_gray_block;
446  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
447  ctx->uncompress_pix_size = 1;
448  break;
449  case MKTAG('H','a','p','M'):
450  texture_name = "DXT5-YCoCg-scaled / RGTC1";
451  ctx->tex_rat = 16;
452  ctx->tex_rat2 = 8;
453  ctx->tex_fun = ctx->dxtc.dxt5ys_block;
454  ctx->tex_fun2 = ctx->dxtc.rgtc1u_alpha_block;
455  avctx->pix_fmt = AV_PIX_FMT_RGBA;
456  ctx->texture_count = 2;
457  break;
458  default:
460  }
461 
462  av_log(avctx, AV_LOG_DEBUG, "%s texture\n", texture_name);
463 
464  ctx->slice_count = av_clip(avctx->thread_count, 1,
465  avctx->coded_height / TEXTURE_BLOCK_H);
466 
467  return 0;
468 }
469 
470 static av_cold int hap_close(AVCodecContext *avctx)
471 {
472  HapContext *ctx = avctx->priv_data;
473 
475 
476  return 0;
477 }
478 
480  .name = "hap",
481  .long_name = NULL_IF_CONFIG_SMALL("Vidvox Hap"),
482  .type = AVMEDIA_TYPE_VIDEO,
483  .id = AV_CODEC_ID_HAP,
484  .init = hap_init,
485  .decode = hap_decode,
486  .close = hap_close,
487  .priv_data_size = sizeof(HapContext),
490  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
492  .codec_tags = (const uint32_t []){
493  MKTAG('H','a','p','1'),
494  MKTAG('H','a','p','5'),
495  MKTAG('H','a','p','Y'),
496  MKTAG('H','a','p','A'),
497  MKTAG('H','a','p','M'),
499  },
500 };
AVCodec
AVCodec.
Definition: codec.h:190
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
HAP_FMT_YCOCGDXT5
@ HAP_FMT_YCOCGDXT5
Definition: hap.h:36
TEXTURE_BLOCK_H
#define TEXTURE_BLOCK_H
Definition: texturedsp.h:43
GetByteContext
Definition: bytestream.h:33
AVCodec::update_thread_context
int(* update_thread_context)(struct AVCodecContext *dst, const struct AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: codec.h:251
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:406
HapChunk::compressed_size
size_t compressed_size
Definition: hap.h:56
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
internal.h
HapChunk::compressed_offset
int compressed_offset
Definition: hap.h:55
AVPacket::data
uint8_t * data
Definition: packet.h:355
data
const char data[16]
Definition: mxf.c:91
ff_hap_parse_section_header
int ff_hap_parse_section_header(GetByteContext *gbc, int *section_size, enum HapSectionType *section_type)
Definition: hap.c:57
hap_decode
static int hap_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: hapdec.c:302
thread.h
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
HapContext
Definition: hap.h:61
texturedsp.h
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
hap_parse_decode_instructions
static int hap_parse_decode_instructions(HapContext *ctx, int size)
Definition: hapdec.c:46
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1785
hap.h
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
HAP_FMT_RGBADXT5
@ HAP_FMT_RGBADXT5
Definition: hap.h:35
HAP_COMP_SNAPPY
@ HAP_COMP_SNAPPY
Definition: hap.h:42
HAP_FMT_RGTC1
@ HAP_FMT_RGTC1
Definition: hap.h:37
hap_close
static av_cold int hap_close(AVCodecContext *avctx)
Definition: hapdec.c:470
HAP_COMP_NONE
@ HAP_COMP_NONE
Definition: hap.h:41
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
FF_CODEC_TAGS_END
#define FF_CODEC_TAGS_END
AVCodec.codec_tags termination value.
Definition: internal.h:80
ff_hap_free_context
av_cold void ff_hap_free_context(HapContext *ctx)
Definition: hap.c:50
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
HAP_ST_SIZE_TABLE
@ HAP_ST_SIZE_TABLE
Definition: hap.h:49
hap_init
static av_cold int hap_init(AVCodecContext *avctx)
Definition: hapdec.c:402
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ff_texturedsp_init
av_cold void ff_texturedsp_init(TextureDSPContext *c)
Definition: texturedsp.c:637
snappy.h
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:66
decompress_texture_thread_internal
static int decompress_texture_thread_internal(AVCodecContext *avctx, void *arg, int slice, int thread_nb, int texture_num)
Definition: hapdec.c:248
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
NULL
#define NULL
Definition: coverity.c:32
decompress_chunks_thread
static int decompress_chunks_thread(AVCodecContext *avctx, void *arg, int chunk_nb, int thread_nb)
Definition: hapdec.c:220
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
HAP_ST_DECODE_INSTRUCTIONS
@ HAP_ST_DECODE_INSTRUCTIONS
Definition: hap.h:47
HAP_COMP_COMPLEX
@ HAP_COMP_COMPLEX
Definition: hap.h:43
ff_hap_set_chunk_count
int ff_hap_set_chunk_count(HapContext *ctx, int count, int first_in_frame)
Definition: hap.c:28
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
HapChunk::uncompressed_size
size_t uncompressed_size
Definition: hap.h:58
ff_hap_decoder
AVCodec ff_hap_decoder
Definition: hapdec.c:479
HapSectionType
HapSectionType
Definition: hap.h:46
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
HapChunk::compressor
enum HapCompressor compressor
Definition: hap.h:54
size
int size
Definition: twinvq_data.h:11134
av_reallocp
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Definition: mem.c:161
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
HAP_ST_COMPRESSOR_TABLE
@ HAP_ST_COMPRESSOR_TABLE
Definition: hap.h:48
decompress_texture_thread
static int decompress_texture_thread(AVCodecContext *avctx, void *arg, int slice, int thread_nb)
Definition: hapdec.c:290
HapChunk
Definition: hap.h:53
HapChunk::uncompressed_offset
int uncompressed_offset
Definition: hap.h:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
uint8_t
uint8_t
Definition: audio_convert.c:194
TEXTURE_BLOCK_W
#define TEXTURE_BLOCK_W
Definition: texturedsp.h:42
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AV_CODEC_ID_HAP
@ AV_CODEC_ID_HAP
Definition: codec_id.h:238
HAP_ST_OFFSET_TABLE
@ HAP_ST_OFFSET_TABLE
Definition: hap.h:50
ThreadFrame
Definition: thread.h:34
HAP_FMT_RGBDXT1
@ HAP_FMT_RGBDXT1
Definition: hap.h:34
decompress_texture2_thread
static int decompress_texture2_thread(AVCodecContext *avctx, void *arg, int slice, int thread_nb)
Definition: hapdec.c:296
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
ff_snappy_peek_uncompressed_length
int64_t ff_snappy_peek_uncompressed_length(GetByteContext *gb)
Get the uncompressed length of an input buffer compressed using the Snappy algorithm.
Definition: snappy.c:133
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
hap_can_use_tex_in_place
static int hap_can_use_tex_in_place(HapContext *ctx)
Definition: hapdec.c:115
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
ff_snappy_uncompress
int ff_snappy_uncompress(GetByteContext *gb, uint8_t *buf, int64_t *size)
Decompress an input buffer using Snappy algorithm.
Definition: snappy.c:143
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1845
hap_parse_frame_header
static int hap_parse_frame_header(AVCodecContext *avctx)
Definition: hapdec.c:128