FFmpeg  2.6.9
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mss2.c
Go to the documentation of this file.
1 /*
2  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "error_resilience.h"
28 #include "internal.h"
29 #include "mpeg_er.h"
30 #include "msmpeg4data.h"
31 #include "qpeldsp.h"
32 #include "vc1.h"
33 #include "mss12.h"
34 #include "mss2dsp.h"
35 
36 typedef struct MSS2Context {
44 } MSS2Context;
45 
47 {
48  while ((c->high >> 15) - (c->low >> 15) < 2) {
49  if ((c->low ^ c->high) & 0x10000) {
50  c->high ^= 0x8000;
51  c->value ^= 0x8000;
52  c->low ^= 0x8000;
53  }
54  c->high = c->high << 8 & 0xFFFFFF | 0xFF;
55  c->value = c->value << 8 & 0xFFFFFF | bytestream2_get_byte(c->gbc.gB);
56  c->low = c->low << 8 & 0xFFFFFF;
57  }
58 }
59 
60 ARITH_GET_BIT(arith2)
61 
62 /* L. Stuiver and A. Moffat: "Piecewise Integer Mapping for Arithmetic Coding."
63  * In Proc. 8th Data Compression Conference (DCC '98), pp. 3-12, Mar. 1998 */
64 
65 static int arith2_get_scaled_value(int value, int n, int range)
66 {
67  int split = (n << 1) - range;
68 
69  if (value > split)
70  return split + (value - split >> 1);
71  else
72  return value;
73 }
74 
75 static void arith2_rescale_interval(ArithCoder *c, int range,
76  int low, int high, int n)
77 {
78  int split = (n << 1) - range;
79 
80  if (high > split)
81  c->high = split + (high - split << 1);
82  else
83  c->high = high;
84 
85  c->high += c->low - 1;
86 
87  if (low > split)
88  c->low += split + (low - split << 1);
89  else
90  c->low += low;
91 }
92 
93 static int arith2_get_number(ArithCoder *c, int n)
94 {
95  int range = c->high - c->low + 1;
96  int scale = av_log2(range) - av_log2(n);
97  int val;
98 
99  if (n << scale > range)
100  scale--;
101 
102  n <<= scale;
103 
104  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
105 
106  arith2_rescale_interval(c, range, val << scale, (val + 1) << scale, n);
107 
108  arith2_normalise(c);
109 
110  return val;
111 }
112 
113 static int arith2_get_prob(ArithCoder *c, int16_t *probs)
114 {
115  int range = c->high - c->low + 1, n = *probs;
116  int scale = av_log2(range) - av_log2(n);
117  int i = 0, val;
118 
119  if (n << scale > range)
120  scale--;
121 
122  n <<= scale;
123 
124  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
125  while (probs[++i] > val) ;
126 
127  arith2_rescale_interval(c, range,
128  probs[i] << scale, probs[i - 1] << scale, n);
129 
130  return i;
131 }
132 
133 ARITH_GET_MODEL_SYM(arith2)
134 
136 {
137  int diff = (c->high >> 16) - (c->low >> 16);
138  int bp = bytestream2_tell(c->gbc.gB) - 3 << 3;
139  int bits = 1;
140 
141  while (!(diff & 0x80)) {
142  bits++;
143  diff <<= 1;
144  }
145 
146  return (bits + bp + 7 >> 3) + ((c->low >> 16) + 1 == c->high >> 16);
147 }
148 
150 {
151  c->low = 0;
152  c->high = 0xFFFFFF;
153  c->value = bytestream2_get_be24(gB);
154  c->gbc.gB = gB;
155  c->get_model_sym = arith2_get_model_sym;
157 }
158 
159 static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
160 {
161  int i, ncol;
162  uint32_t *pal = ctx->pal + 256 - ctx->free_colours;
163 
164  if (!ctx->free_colours)
165  return 0;
166 
167  ncol = *buf++;
168  if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3)
169  return AVERROR_INVALIDDATA;
170  for (i = 0; i < ncol; i++)
171  *pal++ = AV_RB24(buf + 3 * i);
172 
173  return 1 + ncol * 3;
174 }
175 
176 static int decode_555(GetByteContext *gB, uint16_t *dst, int stride,
177  int keyframe, int w, int h)
178 {
179  int last_symbol = 0, repeat = 0, prev_avail = 0;
180 
181  if (!keyframe) {
182  int x, y, endx, endy, t;
183 
184 #define READ_PAIR(a, b) \
185  a = bytestream2_get_byte(gB) << 4; \
186  t = bytestream2_get_byte(gB); \
187  a |= t >> 4; \
188  b = (t & 0xF) << 8; \
189  b |= bytestream2_get_byte(gB); \
190 
191  READ_PAIR(x, endx)
192  READ_PAIR(y, endy)
193 
194  if (endx >= w || endy >= h || x > endx || y > endy)
195  return AVERROR_INVALIDDATA;
196  dst += x + stride * y;
197  w = endx - x + 1;
198  h = endy - y + 1;
199  if (y)
200  prev_avail = 1;
201  }
202 
203  do {
204  uint16_t *p = dst;
205  do {
206  if (repeat-- < 1) {
207  int b = bytestream2_get_byte(gB);
208  if (b < 128)
209  last_symbol = b << 8 | bytestream2_get_byte(gB);
210  else if (b > 129) {
211  repeat = 0;
212  while (b-- > 130) {
213  if (repeat >= (INT_MAX >> 8) - 1) {
214  av_log(NULL, AV_LOG_ERROR, "repeat overflow\n");
215  return AVERROR_INVALIDDATA;
216  }
217  repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
218  }
219  if (last_symbol == -2) {
220  int skip = FFMIN((unsigned)repeat, dst + w - p);
221  repeat -= skip;
222  p += skip;
223  }
224  } else
225  last_symbol = 127 - b;
226  }
227  if (last_symbol >= 0)
228  *p = last_symbol;
229  else if (last_symbol == -1 && prev_avail)
230  *p = *(p - stride);
231  } while (++p < dst + w);
232  dst += stride;
233  prev_avail = 1;
234  } while (--h);
235 
236  return 0;
237 }
238 
239 static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, int pal_stride,
240  uint8_t *rgb_dst, int rgb_stride, uint32_t *pal,
241  int keyframe, int kf_slipt, int slice, int w, int h)
242 {
243  uint8_t bits[270] = { 0 };
244  uint32_t codes[270];
245  VLC vlc;
246 
247  int current_length = 0, read_codes = 0, next_code = 0, current_codes = 0;
248  int remaining_codes, surplus_codes, i;
249 
250  const int alphabet_size = 270 - keyframe;
251 
252  int last_symbol = 0, repeat = 0, prev_avail = 0;
253 
254  if (!keyframe) {
255  int x, y, clipw, cliph;
256 
257  x = get_bits(gb, 12);
258  y = get_bits(gb, 12);
259  clipw = get_bits(gb, 12) + 1;
260  cliph = get_bits(gb, 12) + 1;
261 
262  if (x + clipw > w || y + cliph > h)
263  return AVERROR_INVALIDDATA;
264  pal_dst += pal_stride * y + x;
265  rgb_dst += rgb_stride * y + x * 3;
266  w = clipw;
267  h = cliph;
268  if (y)
269  prev_avail = 1;
270  } else {
271  if (slice > 0) {
272  pal_dst += pal_stride * kf_slipt;
273  rgb_dst += rgb_stride * kf_slipt;
274  prev_avail = 1;
275  h -= kf_slipt;
276  } else
277  h = kf_slipt;
278  }
279 
280  /* read explicit codes */
281  do {
282  while (current_codes--) {
283  int symbol = get_bits(gb, 8);
284  if (symbol >= 204 - keyframe)
285  symbol += 14 - keyframe;
286  else if (symbol > 189)
287  symbol = get_bits1(gb) + (symbol << 1) - 190;
288  if (bits[symbol])
289  return AVERROR_INVALIDDATA;
290  bits[symbol] = current_length;
291  codes[symbol] = next_code++;
292  read_codes++;
293  }
294  current_length++;
295  next_code <<= 1;
296  remaining_codes = (1 << current_length) - next_code;
297  current_codes = get_bits(gb, av_ceil_log2(remaining_codes + 1));
298  if (current_length > 22 || current_codes > remaining_codes)
299  return AVERROR_INVALIDDATA;
300  } while (current_codes != remaining_codes);
301 
302  remaining_codes = alphabet_size - read_codes;
303 
304  /* determine the minimum length to fit the rest of the alphabet */
305  while ((surplus_codes = (2 << current_length) -
306  (next_code << 1) - remaining_codes) < 0) {
307  current_length++;
308  next_code <<= 1;
309  }
310 
311  /* add the rest of the symbols lexicographically */
312  for (i = 0; i < alphabet_size; i++)
313  if (!bits[i]) {
314  if (surplus_codes-- == 0) {
315  current_length++;
316  next_code <<= 1;
317  }
318  bits[i] = current_length;
319  codes[i] = next_code++;
320  }
321 
322  if (next_code != 1 << current_length)
323  return AVERROR_INVALIDDATA;
324 
325  if (i = init_vlc(&vlc, 9, alphabet_size, bits, 1, 1, codes, 4, 4, 0))
326  return i;
327 
328  /* frame decode */
329  do {
330  uint8_t *pp = pal_dst;
331  uint8_t *rp = rgb_dst;
332  do {
333  if (repeat-- < 1) {
334  int b = get_vlc2(gb, vlc.table, 9, 3);
335  if (b < 256)
336  last_symbol = b;
337  else if (b < 268) {
338  b -= 256;
339  if (b == 11)
340  b = get_bits(gb, 4) + 10;
341 
342  if (!b)
343  repeat = 0;
344  else
345  repeat = get_bits(gb, b);
346 
347  repeat += (1 << b) - 1;
348 
349  if (last_symbol == -2) {
350  int skip = FFMIN(repeat, pal_dst + w - pp);
351  repeat -= skip;
352  pp += skip;
353  rp += skip * 3;
354  }
355  } else
356  last_symbol = 267 - b;
357  }
358  if (last_symbol >= 0) {
359  *pp = last_symbol;
360  AV_WB24(rp, pal[last_symbol]);
361  } else if (last_symbol == -1 && prev_avail) {
362  *pp = *(pp - pal_stride);
363  memcpy(rp, rp - rgb_stride, 3);
364  }
365  rp += 3;
366  } while (++pp < pal_dst + w);
367  pal_dst += pal_stride;
368  rgb_dst += rgb_stride;
369  prev_avail = 1;
370  } while (--h);
371 
372  ff_free_vlc(&vlc);
373  return 0;
374 }
375 
376 static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
377  int x, int y, int w, int h, int wmv9_mask)
378 {
379  MSS2Context *ctx = avctx->priv_data;
380  MSS12Context *c = &ctx->c;
381  VC1Context *v = avctx->priv_data;
382  MpegEncContext *s = &v->s;
383  AVFrame *f;
384  int ret;
385 
386  ff_mpeg_flush(avctx);
387 
388  if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
389  return ret;
390 
392 
393  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
394  av_log(v->s.avctx, AV_LOG_ERROR, "header error\n");
395  return AVERROR_INVALIDDATA;
396  }
397 
398  if (s->pict_type != AV_PICTURE_TYPE_I) {
399  av_log(v->s.avctx, AV_LOG_ERROR, "expected I-frame\n");
400  return AVERROR_INVALIDDATA;
401  }
402 
403  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
404 
405  if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
406  av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
407  avctx->pix_fmt = AV_PIX_FMT_RGB24;
408  return ret;
409  }
410 
412 
413  v->bits = buf_size * 8;
414 
415  v->end_mb_x = (w + 15) >> 4;
416  s->end_mb_y = (h + 15) >> 4;
417  if (v->respic & 1)
418  v->end_mb_x = v->end_mb_x + 1 >> 1;
419  if (v->respic & 2)
420  s->end_mb_y = s->end_mb_y + 1 >> 1;
421 
423 
424  ff_er_frame_end(&s->er);
425 
426  ff_mpv_frame_end(s);
427 
428  f = s->current_picture.f;
429 
430  if (v->respic == 3) {
431  ctx->dsp.upsample_plane(f->data[0], f->linesize[0], w, h);
432  ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w+1 >> 1, h+1 >> 1);
433  ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w+1 >> 1, h+1 >> 1);
434  } else if (v->respic)
436  "Asymmetric WMV9 rectangle subsampling");
437 
438  av_assert0(f->linesize[1] == f->linesize[2]);
439 
440  if (wmv9_mask != -1)
441  ctx->dsp.mss2_blit_wmv9_masked(c->rgb_pic + y * c->rgb_stride + x * 3,
442  c->rgb_stride, wmv9_mask,
443  c->pal_pic + y * c->pal_stride + x,
444  c->pal_stride,
445  f->data[0], f->linesize[0],
446  f->data[1], f->data[2], f->linesize[1],
447  w, h);
448  else
449  ctx->dsp.mss2_blit_wmv9(c->rgb_pic + y * c->rgb_stride + x * 3,
450  c->rgb_stride,
451  f->data[0], f->linesize[0],
452  f->data[1], f->data[2], f->linesize[1],
453  w, h);
454 
455  avctx->pix_fmt = AV_PIX_FMT_RGB24;
456 
457  return 0;
458 }
459 
460 typedef struct Rectangle {
461  int coded, x, y, w, h;
462 } Rectangle;
463 
464 #define MAX_WMV9_RECTANGLES 20
465 #define ARITH2_PADDING 2
466 
467 static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
468  AVPacket *avpkt)
469 {
470  const uint8_t *buf = avpkt->data;
471  int buf_size = avpkt->size;
472  MSS2Context *ctx = avctx->priv_data;
473  MSS12Context *c = &ctx->c;
474  AVFrame *frame = data;
475  GetBitContext gb;
476  GetByteContext gB;
477  ArithCoder acoder;
478 
479  int keyframe, has_wmv9, has_mv, is_rle, is_555, ret;
480 
481  Rectangle wmv9rects[MAX_WMV9_RECTANGLES], *r;
482  int used_rects = 0, i, implicit_rect = 0, av_uninit(wmv9_mask);
483 
485  ARITH2_PADDING + (MIN_CACHE_BITS + 7) / 8);
486 
487  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
488  return ret;
489 
490  if (keyframe = get_bits1(&gb))
491  skip_bits(&gb, 7);
492  has_wmv9 = get_bits1(&gb);
493  has_mv = keyframe ? 0 : get_bits1(&gb);
494  is_rle = get_bits1(&gb);
495  is_555 = is_rle && get_bits1(&gb);
496  if (c->slice_split > 0)
497  ctx->split_position = c->slice_split;
498  else if (c->slice_split < 0) {
499  if (get_bits1(&gb)) {
500  if (get_bits1(&gb)) {
501  if (get_bits1(&gb))
502  ctx->split_position = get_bits(&gb, 16);
503  else
504  ctx->split_position = get_bits(&gb, 12);
505  } else
506  ctx->split_position = get_bits(&gb, 8) << 4;
507  } else {
508  if (keyframe)
509  ctx->split_position = avctx->height / 2;
510  }
511  } else
512  ctx->split_position = avctx->height;
513 
514  if (c->slice_split && (ctx->split_position < 1 - is_555 ||
515  ctx->split_position > avctx->height - 1))
516  return AVERROR_INVALIDDATA;
517 
518  align_get_bits(&gb);
519  buf += get_bits_count(&gb) >> 3;
520  buf_size -= get_bits_count(&gb) >> 3;
521 
522  if (buf_size < 1)
523  return AVERROR_INVALIDDATA;
524 
525  if (is_555 && (has_wmv9 || has_mv || c->slice_split && ctx->split_position))
526  return AVERROR_INVALIDDATA;
527 
528  avctx->pix_fmt = is_555 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24;
529  if (ctx->last_pic->format != avctx->pix_fmt)
530  av_frame_unref(ctx->last_pic);
531 
532  if (has_wmv9) {
533  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
534  arith2_init(&acoder, &gB);
535 
536  implicit_rect = !arith2_get_bit(&acoder);
537 
538  while (arith2_get_bit(&acoder)) {
539  if (used_rects == MAX_WMV9_RECTANGLES)
540  return AVERROR_INVALIDDATA;
541  r = &wmv9rects[used_rects];
542  if (!used_rects)
543  r->x = arith2_get_number(&acoder, avctx->width);
544  else
545  r->x = arith2_get_number(&acoder, avctx->width -
546  wmv9rects[used_rects - 1].x) +
547  wmv9rects[used_rects - 1].x;
548  r->y = arith2_get_number(&acoder, avctx->height);
549  r->w = arith2_get_number(&acoder, avctx->width - r->x) + 1;
550  r->h = arith2_get_number(&acoder, avctx->height - r->y) + 1;
551  used_rects++;
552  }
553 
554  if (implicit_rect && used_rects) {
555  av_log(avctx, AV_LOG_ERROR, "implicit_rect && used_rects > 0\n");
556  return AVERROR_INVALIDDATA;
557  }
558 
559  if (implicit_rect) {
560  wmv9rects[0].x = 0;
561  wmv9rects[0].y = 0;
562  wmv9rects[0].w = avctx->width;
563  wmv9rects[0].h = avctx->height;
564 
565  used_rects = 1;
566  }
567  for (i = 0; i < used_rects; i++) {
568  if (!implicit_rect && arith2_get_bit(&acoder)) {
569  av_log(avctx, AV_LOG_ERROR, "Unexpected grandchildren\n");
570  return AVERROR_INVALIDDATA;
571  }
572  if (!i) {
573  wmv9_mask = arith2_get_bit(&acoder) - 1;
574  if (!wmv9_mask)
575  wmv9_mask = arith2_get_number(&acoder, 256);
576  }
577  wmv9rects[i].coded = arith2_get_number(&acoder, 2);
578  }
579 
580  buf += arith2_get_consumed_bytes(&acoder);
581  buf_size -= arith2_get_consumed_bytes(&acoder);
582  if (buf_size < 1)
583  return AVERROR_INVALIDDATA;
584  }
585 
586  c->mvX = c->mvY = 0;
587  if (keyframe && !is_555) {
588  if ((i = decode_pal_v2(c, buf, buf_size)) < 0)
589  return AVERROR_INVALIDDATA;
590  buf += i;
591  buf_size -= i;
592  } else if (has_mv) {
593  buf += 4;
594  buf_size -= 4;
595  if (buf_size < 1)
596  return AVERROR_INVALIDDATA;
597  c->mvX = AV_RB16(buf - 4) - avctx->width;
598  c->mvY = AV_RB16(buf - 2) - avctx->height;
599  }
600 
601  if (c->mvX < 0 || c->mvY < 0) {
602  FFSWAP(uint8_t *, c->pal_pic, c->last_pal_pic);
603 
604  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
605  return ret;
606 
607  if (ctx->last_pic->data[0]) {
608  av_assert0(frame->linesize[0] == ctx->last_pic->linesize[0]);
609  c->last_rgb_pic = ctx->last_pic->data[0] +
610  ctx->last_pic->linesize[0] * (avctx->height - 1);
611  } else {
612  av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n");
613  return AVERROR_INVALIDDATA;
614  }
615  } else {
616  if ((ret = ff_reget_buffer(avctx, ctx->last_pic)) < 0)
617  return ret;
618  if ((ret = av_frame_ref(frame, ctx->last_pic)) < 0)
619  return ret;
620 
621  c->last_rgb_pic = NULL;
622  }
623  c->rgb_pic = frame->data[0] +
624  frame->linesize[0] * (avctx->height - 1);
625  c->rgb_stride = -frame->linesize[0];
626 
627  frame->key_frame = keyframe;
628  frame->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
629 
630  if (is_555) {
631  bytestream2_init(&gB, buf, buf_size);
632 
633  if (decode_555(&gB, (uint16_t *)c->rgb_pic, c->rgb_stride >> 1,
634  keyframe, avctx->width, avctx->height))
635  return AVERROR_INVALIDDATA;
636 
637  buf_size -= bytestream2_tell(&gB);
638  } else {
639  if (keyframe) {
640  c->corrupted = 0;
642  if (c->slice_split)
644  }
645  if (is_rle) {
646  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
647  return ret;
648  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
649  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
650  ctx->split_position, 0,
651  avctx->width, avctx->height))
652  return ret;
653  align_get_bits(&gb);
654 
655  if (c->slice_split)
656  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
657  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
658  ctx->split_position, 1,
659  avctx->width, avctx->height))
660  return ret;
661 
662  align_get_bits(&gb);
663  buf += get_bits_count(&gb) >> 3;
664  buf_size -= get_bits_count(&gb) >> 3;
665  } else if (!implicit_rect || wmv9_mask != -1) {
666  if (c->corrupted)
667  return AVERROR_INVALIDDATA;
668  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
669  arith2_init(&acoder, &gB);
670  c->keyframe = keyframe;
671  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[0], &acoder, 0, 0,
672  avctx->width,
673  ctx->split_position))
674  return AVERROR_INVALIDDATA;
675 
676  buf += arith2_get_consumed_bytes(&acoder);
677  buf_size -= arith2_get_consumed_bytes(&acoder);
678  if (c->slice_split) {
679  if (buf_size < 1)
680  return AVERROR_INVALIDDATA;
681  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
682  arith2_init(&acoder, &gB);
683  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[1], &acoder, 0,
684  ctx->split_position,
685  avctx->width,
686  avctx->height - ctx->split_position))
687  return AVERROR_INVALIDDATA;
688 
689  buf += arith2_get_consumed_bytes(&acoder);
690  buf_size -= arith2_get_consumed_bytes(&acoder);
691  }
692  } else
693  memset(c->pal_pic, 0, c->pal_stride * avctx->height);
694  }
695 
696  if (has_wmv9) {
697  for (i = 0; i < used_rects; i++) {
698  int x = wmv9rects[i].x;
699  int y = wmv9rects[i].y;
700  int w = wmv9rects[i].w;
701  int h = wmv9rects[i].h;
702  if (wmv9rects[i].coded) {
703  int WMV9codedFrameSize;
704  if (buf_size < 4 || !(WMV9codedFrameSize = AV_RL24(buf)))
705  return AVERROR_INVALIDDATA;
706  if (ret = decode_wmv9(avctx, buf + 3, buf_size - 3,
707  x, y, w, h, wmv9_mask))
708  return ret;
709  buf += WMV9codedFrameSize + 3;
710  buf_size -= WMV9codedFrameSize + 3;
711  } else {
712  uint8_t *dst = c->rgb_pic + y * c->rgb_stride + x * 3;
713  if (wmv9_mask != -1) {
714  ctx->dsp.mss2_gray_fill_masked(dst, c->rgb_stride,
715  wmv9_mask,
716  c->pal_pic + y * c->pal_stride + x,
717  c->pal_stride,
718  w, h);
719  } else {
720  do {
721  memset(dst, 0x80, w * 3);
722  dst += c->rgb_stride;
723  } while (--h);
724  }
725  }
726  }
727  }
728 
729  if (buf_size)
730  av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n");
731 
732  if (c->mvX < 0 || c->mvY < 0) {
733  av_frame_unref(ctx->last_pic);
734  ret = av_frame_ref(ctx->last_pic, frame);
735  if (ret < 0)
736  return ret;
737  }
738 
739  *got_frame = 1;
740 
741  return avpkt->size;
742 }
743 
744 static av_cold int wmv9_init(AVCodecContext *avctx)
745 {
746  VC1Context *v = avctx->priv_data;
747  int ret;
748 
749  v->s.avctx = avctx;
750 
751  if ((ret = ff_vc1_init_common(v)) < 0)
752  return ret;
753  ff_vc1dsp_init(&v->vc1dsp);
754 
755  v->profile = PROFILE_MAIN;
756 
759  v->res_y411 = 0;
760  v->res_sprite = 0;
761 
762  v->frmrtq_postproc = 7;
763  v->bitrtq_postproc = 31;
764 
765  v->res_x8 = 0;
766  v->multires = 0;
767  v->res_fasttx = 1;
768 
769  v->fastuvmc = 0;
770 
771  v->extended_mv = 0;
772 
773  v->dquant = 1;
774  v->vstransform = 1;
775 
776  v->res_transtab = 0;
777 
778  v->overlap = 0;
779 
780  v->resync_marker = 0;
781  v->rangered = 0;
782 
783  v->s.max_b_frames = avctx->max_b_frames = 0;
784  v->quantizer_mode = 0;
785 
786  v->finterpflag = 0;
787 
788  v->res_rtm_flag = 1;
789 
791 
792  if ((ret = ff_msmpeg4_decode_init(avctx)) < 0 ||
793  (ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
794  return ret;
795 
796  /* error concealment */
799 
800  return 0;
801 }
802 
804 {
805  MSS2Context *const ctx = avctx->priv_data;
806 
807  av_frame_free(&ctx->last_pic);
808 
809  ff_mss12_decode_end(&ctx->c);
810  av_freep(&ctx->c.pal_pic);
811  av_freep(&ctx->c.last_pal_pic);
812  ff_vc1_decode_end(avctx);
813 
814  return 0;
815 }
816 
818 {
819  MSS2Context * const ctx = avctx->priv_data;
820  MSS12Context *c = &ctx->c;
821  int ret;
822  c->avctx = avctx;
823  if (ret = ff_mss12_decode_init(c, 1, &ctx->sc[0], &ctx->sc[1]))
824  return ret;
825  ctx->last_pic = av_frame_alloc();
826  c->pal_stride = c->mask_stride;
827  c->pal_pic = av_mallocz(c->pal_stride * avctx->height);
828  c->last_pal_pic = av_mallocz(c->pal_stride * avctx->height);
829  if (!c->pal_pic || !c->last_pal_pic || !ctx->last_pic) {
830  mss2_decode_end(avctx);
831  return AVERROR(ENOMEM);
832  }
833  if (ret = wmv9_init(avctx)) {
834  mss2_decode_end(avctx);
835  return ret;
836  }
837  ff_mss2dsp_init(&ctx->dsp);
838  ff_qpeldsp_init(&ctx->qdsp);
839 
840  avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555
842 
843 
844  return 0;
845 }
846 
848  .name = "mss2",
849  .long_name = NULL_IF_CONFIG_SMALL("MS Windows Media Video V9 Screen"),
850  .type = AVMEDIA_TYPE_VIDEO,
851  .id = AV_CODEC_ID_MSS2,
852  .priv_data_size = sizeof(MSS2Context),
854  .close = mss2_decode_end,
856  .capabilities = CODEC_CAP_DR1,
857 };
static av_cold int mss2_decode_init(AVCodecContext *avctx)
Definition: mss2.c:817
#define NULL
Definition: coverity.c:32
MSS12Context c
Definition: mss2.c:40
const char const char void * val
Definition: avisynth_c.h:672
float v
#define ARITH2_PADDING
Definition: mss2.c:465
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
const char * s
Definition: avisynth_c.h:669
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int arith2_get_scaled_value(int value, int n, int range)
Definition: mss2.c:65
QpelDSPContext qdsp
Definition: mss2.c:42
The VC1 Context.
Definition: vc1.h:173
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, int pal_stride, uint8_t *rgb_dst, int rgb_stride, uint32_t *pal, int keyframe, int kf_slipt, int slice, int w, int h)
Definition: mss2.c:239
int high
Definition: mss12.h:49
av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext *sc1, SliceContext *sc2)
Definition: mss12.c:564
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:260
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:181
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:70
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:289
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void(* mss2_gray_fill_masked)(uint8_t *dst, int dst_stride, int maskcolor, const uint8_t *mask, int mask_stride, int w, int h)
Definition: mss2dsp.h:42
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1501
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:223
int value
Definition: mss12.h:49
void ff_er_frame_end(ERContext *s)
int corrupted
Definition: mss12.h:89
static void arith2_rescale_interval(ArithCoder *c, int range, int low, int high, int n)
Definition: mss2.c:75
int size
Definition: avcodec.h:1161
const char * b
Definition: vf_curves.c:109
void(* mss2_blit_wmv9)(uint8_t *dst, int dst_stride, const uint8_t *srcy, int srcy_stride, const uint8_t *srcu, const uint8_t *srcv, int srcuv_stride, int w, int h)
Definition: mss2dsp.h:32
#define AV_RB24
Definition: intreadwrite.h:64
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:286
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1442
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
int w
Definition: mss2.c:461
discard all
Definition: avcodec.h:667
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:222
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:393
uint8_t * rgb_pic
Definition: mss12.h:83
int mask_stride
Definition: mss12.h:82
QpelDSPContext qdsp
Definition: mpegvideo.h:372
AVCodec.
Definition: avcodec.h:3173
int frmrtq_postproc
3bits,
Definition: vc1.h:219
int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder, int x, int y, int width, int height)
Definition: mss12.c:528
int bits
Definition: vc1.h:179
int slice_split
Definition: mss12.h:90
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int res_transtab
reserved, always 0
Definition: vc1.h:188
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint8_t bits
Definition: crc.c:295
uint32_t pal[256]
Definition: mss12.h:77
uint8_t
uint8_t * last_pal_pic
Definition: mss12.h:79
#define av_cold
Definition: attributes.h:74
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int rgb_stride
Definition: mss12.h:85
int keyframe
Definition: mss12.h:87
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1576
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:200
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
int coded
Definition: mss2.c:461
int split_position
Definition: mss2.c:38
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:315
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:787
static AVFrame * frame
quarterpel DSP functions
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:218
MSMPEG4 data tables.
uint8_t * data
Definition: avcodec.h:1160
GetByteContext * gB
Definition: mss12.h:52
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:212
int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:572
#define av_log(a,...)
int(* get_model_sym)(struct ArithCoder *c, Model *m)
Definition: mss12.h:54
static int arith2_get_number(ArithCoder *c, int n)
Definition: mss2.c:93
int h
Definition: mss2.c:461
static int arith2_get_prob(ArithCoder *c, int16_t *probs)
Definition: mss2.c:113
static av_cold int wmv9_init(AVCodecContext *avctx)
Definition: mss2.c:744
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
av_cold void ff_mss2dsp_init(MSS2DSPContext *dsp)
Definition: mss2dsp.c:150
int res_y411
reserved, old interlaced mode
Definition: vc1.h:184
int overlap
overlapped transforms in use
Definition: vc1.h:226
int res_x8
reserved
Definition: vc1.h:185
static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int x, int y, int w, int h, int wmv9_mask)
Definition: mss2.c:376
#define AV_RB16
Definition: intreadwrite.h:53
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:180
ERContext er
Definition: mpegvideo.h:679
const char * r
Definition: vf_curves.c:107
#define ARITH_GET_MODEL_SYM(prefix)
Definition: mss12.h:118
int mvY
Definition: mss12.h:88
void(* upsample_plane)(uint8_t *plane, int plane_stride, int w, int h)
Definition: mss2dsp.h:45
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1_block.c:2924
const char * name
Name of the codec implementation.
Definition: avcodec.h:3180
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
GetBitContext gb
Definition: mpegvideo.h:578
int resync_marker
could this stream contain resync markers
Definition: vc1.h:396
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:241
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:191
int pal_stride
Definition: mss12.h:80
const uint8_t ff_wmv2_scantableB[64]
Definition: msmpeg4data.c:1994
Definition: get_bits.h:63
static char * split(char *message, char delim)
Definition: af_channelmap.c:82
static void arith2_init(ArithCoder *c, GetByteContext *gB)
Definition: mss2.c:149
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:3439
AVCodecContext * avctx
Definition: mss12.h:76
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:628
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:242
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: utils.c:1081
SliceContext sc[2]
Definition: mss2.c:43
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:234
#define FFMIN(a, b)
Definition: common.h:81
float y
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1412
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:109
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:628
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:555
static int decode_555(GetByteContext *gB, uint16_t *dst, int stride, int keyframe, int w, int h)
Definition: mss2.c:176
MotionEstContext me
Definition: mpegvideo.h:417
int n
Definition: avisynth_c.h:589
static int arith2_get_consumed_bytes(ArithCoder *c)
Definition: mss2.c:135
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:189
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:224
int finterpflag
INTERPFRM present.
Definition: vc1.h:228
av_cold int ff_mss12_decode_end(MSS12Context *c)
Definition: mss12.c:676
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:183
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:441
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:186
main external API structure.
Definition: avcodec.h:1239
const uint8_t ff_wmv2_scantableA[64]
Definition: msmpeg4data.c:1987
static av_cold int mss2_decode_end(AVCodecContext *avctx)
Definition: mss2.c:803
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:1032
union ArithCoder::@71 gbc
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: get_bits.h:457
void * buf
Definition: avisynth_c.h:595
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:304
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:297
struct AVFrame * f
Definition: mpegvideo.h:104
uint8_t respic
Frame-level flag for resized images.
Definition: vc1.h:274
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1844
int quantizer_mode
2bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:227
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:249
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:349
quarterpel DSP context
Definition: qpeldsp.h:72
#define MAX_WMV9_RECTANGLES
Definition: mss2.c:464
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:225
#define MIN_CACHE_BITS
Definition: get_bits.h:125
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:377
AVCodec ff_mss2_decoder
Definition: mss2.c:847
#define AV_RL24
Definition: intreadwrite.h:78
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:201
MpegEncContext s
Definition: vc1.h:174
VC1Context v
Definition: mss2.c:37
MpegEncContext.
Definition: mpegvideo.h:213
void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:398
struct AVCodecContext * avctx
Definition: mpegvideo.h:230
int y
Definition: mss2.c:461
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:520
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
common internal api header.
void ff_mss12_slicecontext_reset(SliceContext *sc)
Definition: mss12.c:428
void(* mss2_blit_wmv9_masked)(uint8_t *dst, int dst_stride, int maskcolor, const uint8_t *mask, int mask_stride, const uint8_t *srcy, int srcy_stride, const uint8_t *srcu, const uint8_t *srcv, int srcuv_stride, int w, int h)
Definition: mss2dsp.h:36
static double c[64]
int res_fasttx
reserved, always 1
Definition: vc1.h:187
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:346
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:2925
Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder DSP routines.
int free_colours
Definition: mss12.h:86
void * priv_data
Definition: avcodec.h:1281
static av_always_inline int diff(const uint32_t a, const uint32_t b)
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:2071
#define av_log2
Definition: intmath.h:105
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
int low
Definition: mss12.h:49
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:229
uint8_t * last_rgb_pic
Definition: mss12.h:84
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:449
int bitrtq_postproc
5bits, quantized framerate-based postprocessing strength
Definition: vc1.h:220
AVFrame * last_pic
Definition: mss2.c:39
static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
Definition: mss2.c:159
#define av_uninit(x)
Definition: attributes.h:141
#define ARITH_GET_BIT(prefix)
Definition: mss12.h:102
int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:322
static void arith2_normalise(ArithCoder *c)
Definition: mss2.c:46
#define av_freep(p)
#define READ_PAIR(a, b)
MSS2DSPContext dsp
Definition: mss2.c:41
static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mss2.c:467
int dquant
How qscale varies with MBs, 2bits (not in Simple)
Definition: vc1.h:224
int mvX
Definition: mss12.h:88
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
#define FFSWAP(type, a, b)
Definition: common.h:84
#define stride
int(* get_number)(struct ArithCoder *c, int n)
Definition: mss12.h:55
This structure stores compressed data.
Definition: avcodec.h:1137
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:358
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:967
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Definition: vc1dsp.c:969
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
uint8_t * pal_pic
Definition: mss12.h:78
VC1DSPContext vc1dsp
Definition: vc1.h:177
Predicted.
Definition: avutil.h:268
Common header for Microsoft Screen 1 and 2.
int x
Definition: mss2.c:461